/* Output routines for GCC for Renesas / SuperH SH.
Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
- 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+ 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
Improved by Jim Wilson (wilson@cygnus.com).
#include "flags.h"
#include "expr.h"
#include "optabs.h"
+#include "reload.h"
#include "function.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "cfglayout.h"
#include "intl.h"
#include "sched-int.h"
+#include "params.h"
#include "ggc.h"
#include "gimple.h"
#include "cfgloop.h"
and returned from sh_reorder2. */
static short cached_can_issue_more;
+/* Unique number for UNSPEC_BBR pattern. */
+static unsigned int unspec_bbr_uid = 1;
+
/* Provides the class number of the smallest class containing
reg number. */
static int noncall_uses_reg (rtx, rtx, rtx *);
static rtx gen_block_redirect (rtx, int, int);
static void sh_reorg (void);
-static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
+static void output_stack_adjust (int, rtx, int, HARD_REG_SET *, bool);
static rtx frame_insn (rtx);
static rtx push (int);
static void pop (int);
static void push_regs (HARD_REG_SET *, int);
static int calc_live_regs (HARD_REG_SET *);
static HOST_WIDE_INT rounded_frame_size (int);
+static bool sh_frame_pointer_required (void);
static rtx mark_constant_pool_use (rtx);
-const struct attribute_spec sh_attribute_table[];
static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
static tree sh_handle_resbank_handler_attribute (tree *, tree,
tree, int, bool *);
static bool sh_ms_bitfield_layout_p (const_tree);
static void sh_init_builtins (void);
+static tree sh_builtin_decl (unsigned, bool);
static void sh_media_init_builtins (void);
+static tree sh_media_builtin_decl (unsigned, bool);
static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
static void sh_file_start (void);
static int sh_address_cost (rtx, bool);
static int sh_pr_n_sets (void);
static rtx sh_allocate_initial_value (rtx);
+static bool sh_legitimate_address_p (enum machine_mode, rtx, bool);
static rtx sh_legitimize_address (rtx, rtx, enum machine_mode);
static int shmedia_target_regs_stack_space (HARD_REG_SET *);
static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
struct save_schedule_s *, int);
static rtx sh_struct_value_rtx (tree, int);
+static rtx sh_function_value (const_tree, const_tree, bool);
+static rtx sh_libcall_value (enum machine_mode, const_rtx);
static bool sh_return_in_memory (const_tree, const_tree);
static rtx sh_builtin_saveregs (void);
static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
static tree sh_build_builtin_va_list (void);
static void sh_va_start (tree, rtx);
static tree sh_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
+static bool sh_promote_prototypes (const_tree);
+static enum machine_mode sh_promote_function_mode (const_tree type,
+ enum machine_mode,
+ int *punsignedp,
+ const_tree funtype,
+ int for_return);
static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
const_tree, bool);
static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
static int sh_dwarf_calling_convention (const_tree);
static void sh_encode_section_info (tree, rtx, int);
static int sh2a_function_vector_p (tree);
+static void sh_trampoline_init (rtx, tree, rtx);
+static rtx sh_trampoline_adjust_address (rtx);
+\f
+static const struct attribute_spec sh_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
+ { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
+ { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
+ { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
+ { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
+ { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
+ { "resbank", 0, 0, true, false, false, sh_handle_resbank_handler_attribute },
+ { "function_vector", 1, 1, true, false, false, sh2a_handle_function_vector_handler_attribute },
+#ifdef SYMBIAN
+ /* Symbian support adds three new attributes:
+ dllexport - for exporting a function/variable that will live in a dll
+ dllimport - for importing a function/variable from a dll
+ Microsoft allows multiple declspecs in one __declspec, separating
+ them with spaces. We do NOT support this. Instead, use __declspec
+ multiple times. */
+ { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
+ { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
+#endif
+ { NULL, 0, 0, false, false, false, NULL }
+};
\f
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
#undef TARGET_INIT_BUILTINS
#define TARGET_INIT_BUILTINS sh_init_builtins
+#undef TARGET_BUILTIN_DECL
+#define TARGET_BUILTIN_DECL sh_builtin_decl
#undef TARGET_EXPAND_BUILTIN
#define TARGET_EXPAND_BUILTIN sh_expand_builtin
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
-#undef TARGET_PROMOTE_FUNCTION_ARGS
-#define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
-#undef TARGET_PROMOTE_FUNCTION_RETURN
-#define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE sh_promote_function_mode
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE sh_function_value
+#undef TARGET_LIBCALL_VALUE
+#define TARGET_LIBCALL_VALUE sh_libcall_value
#undef TARGET_STRUCT_VALUE_RTX
#define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
#undef TARGET_RETURN_IN_MEMORY
#undef TARGET_DWARF_CALLING_CONVENTION
#define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED sh_frame_pointer_required
+
/* Return regmode weight for insn. */
#define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
#undef TARGET_STRIP_NAME_ENCODING
#define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
#undef TARGET_CXX_IMPORT_EXPORT_CLASS
-#define TARGET_CXX_IMPORT_EXPORT_CLASS symbian_import_export_class
+#define TARGET_CXX_IMPORT_EXPORT_CLASS sh_symbian_import_export_class
#endif /* SYMBIAN */
#undef TARGET_SECONDARY_RELOAD
#define TARGET_SECONDARY_RELOAD sh_secondary_reload
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P sh_legitimate_address_p
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT sh_trampoline_init
+#undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
+#define TARGET_TRAMPOLINE_ADJUST_ADDRESS sh_trampoline_adjust_address
+
/* Machine-specific symbol_ref flags. */
#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
}
}
\f
+/* Set default optimization options. */
+void
+sh_optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
+{
+ if (level)
+ {
+ if (!size)
+ sh_div_str = "inv:minlat";
+ }
+ if (size)
+ {
+ target_flags |= MASK_SMALLCODE;
+ sh_div_str = SH_DIV_STR_FOR_SIZE ;
+ }
+ else
+ TARGET_CBRANCHDI4 = 1;
+ /* We can't meaningfully test TARGET_SHMEDIA here, because -m options
+ haven't been parsed yet, hence we'd read only the default.
+ sh_target_reg_class will return NO_REGS if this is not SHMEDIA, so
+ it's OK to always set flag_branch_target_load_optimize. */
+ if (level > 1)
+ {
+ flag_branch_target_load_optimize = 1;
+ if (!size)
+ target_flags |= MASK_SAVE_ALL_TARGET_REGS;
+ }
+ /* Likewise, we can't meaningfully test TARGET_SH2E / TARGET_IEEE
+ here, so leave it to OVERRIDE_OPTIONS to set
+ flag_finite_math_only. We set it to 2 here so we know if the user
+ explicitly requested this to be on or off. */
+ flag_finite_math_only = 2;
+ /* If flag_schedule_insns is 1, we set it to 2 here so we know if
+ the user explicitly requested this to be on or off. */
+ if (flag_schedule_insns > 0)
+ flag_schedule_insns = 2;
+
+ set_param_value ("simultaneous-prefetches", 2);
+}
+
+/* Implement OVERRIDE_OPTIONS macro. Validate and override various
+ options, and do some machine dependent initialization. */
+void
+sh_override_options (void)
+{
+ int regno;
+
+ SUBTARGET_OVERRIDE_OPTIONS;
+ if (flag_finite_math_only == 2)
+ flag_finite_math_only
+ = !flag_signaling_nans && TARGET_SH2E && ! TARGET_IEEE;
+ if (TARGET_SH2E && !flag_finite_math_only)
+ target_flags |= MASK_IEEE;
+ sh_cpu = PROCESSOR_SH1;
+ assembler_dialect = 0;
+ if (TARGET_SH2)
+ sh_cpu = PROCESSOR_SH2;
+ if (TARGET_SH2E)
+ sh_cpu = PROCESSOR_SH2E;
+ if (TARGET_SH2A)
+ sh_cpu = PROCESSOR_SH2A;
+ if (TARGET_SH3)
+ sh_cpu = PROCESSOR_SH3;
+ if (TARGET_SH3E)
+ sh_cpu = PROCESSOR_SH3E;
+ if (TARGET_SH4)
+ {
+ assembler_dialect = 1;
+ sh_cpu = PROCESSOR_SH4;
+ }
+ if (TARGET_SH4A_ARCH)
+ {
+ assembler_dialect = 1;
+ sh_cpu = PROCESSOR_SH4A;
+ }
+ if (TARGET_SH5)
+ {
+ sh_cpu = PROCESSOR_SH5;
+ target_flags |= MASK_ALIGN_DOUBLE;
+ if (TARGET_SHMEDIA_FPU)
+ target_flags |= MASK_FMOVD;
+ if (TARGET_SHMEDIA)
+ {
+ /* There are no delay slots on SHmedia. */
+ flag_delayed_branch = 0;
+ /* Relaxation isn't yet supported for SHmedia */
+ target_flags &= ~MASK_RELAX;
+ /* After reload, if conversion does little good but can cause
+ ICEs:
+ - find_if_block doesn't do anything for SH because we don't
+ have conditional execution patterns. (We use conditional
+ move patterns, which are handled differently, and only
+ before reload).
+ - find_cond_trap doesn't do anything for the SH because we
+ don't have conditional traps.
+ - find_if_case_1 uses redirect_edge_and_branch_force in
+ the only path that does an optimization, and this causes
+ an ICE when branch targets are in registers.
+ - find_if_case_2 doesn't do anything for the SHmedia after
+ reload except when it can redirect a tablejump - and
+ that's rather rare. */
+ flag_if_conversion2 = 0;
+ if (! strcmp (sh_div_str, "call"))
+ sh_div_strategy = SH_DIV_CALL;
+ else if (! strcmp (sh_div_str, "call2"))
+ sh_div_strategy = SH_DIV_CALL2;
+ if (! strcmp (sh_div_str, "fp") && TARGET_FPU_ANY)
+ sh_div_strategy = SH_DIV_FP;
+ else if (! strcmp (sh_div_str, "inv"))
+ sh_div_strategy = SH_DIV_INV;
+ else if (! strcmp (sh_div_str, "inv:minlat"))
+ sh_div_strategy = SH_DIV_INV_MINLAT;
+ else if (! strcmp (sh_div_str, "inv20u"))
+ sh_div_strategy = SH_DIV_INV20U;
+ else if (! strcmp (sh_div_str, "inv20l"))
+ sh_div_strategy = SH_DIV_INV20L;
+ else if (! strcmp (sh_div_str, "inv:call2"))
+ sh_div_strategy = SH_DIV_INV_CALL2;
+ else if (! strcmp (sh_div_str, "inv:call"))
+ sh_div_strategy = SH_DIV_INV_CALL;
+ else if (! strcmp (sh_div_str, "inv:fp"))
+ {
+ if (TARGET_FPU_ANY)
+ sh_div_strategy = SH_DIV_INV_FP;
+ else
+ sh_div_strategy = SH_DIV_INV;
+ }
+ TARGET_CBRANCHDI4 = 0;
+ /* Assembler CFI isn't yet fully supported for SHmedia. */
+ flag_dwarf2_cfi_asm = 0;
+ }
+ }
+ else
+ {
+ /* Only the sh64-elf assembler fully supports .quad properly. */
+ targetm.asm_out.aligned_op.di = NULL;
+ targetm.asm_out.unaligned_op.di = NULL;
+ }
+ if (TARGET_SH1)
+ {
+ if (! strcmp (sh_div_str, "call-div1"))
+ sh_div_strategy = SH_DIV_CALL_DIV1;
+ else if (! strcmp (sh_div_str, "call-fp")
+ && (TARGET_FPU_DOUBLE
+ || (TARGET_HARD_SH4 && TARGET_SH2E)
+ || (TARGET_SHCOMPACT && TARGET_FPU_ANY)))
+ sh_div_strategy = SH_DIV_CALL_FP;
+ else if (! strcmp (sh_div_str, "call-table") && TARGET_SH2)
+ sh_div_strategy = SH_DIV_CALL_TABLE;
+ else
+ /* Pick one that makes most sense for the target in general.
+ It is not much good to use different functions depending
+ on -Os, since then we'll end up with two different functions
+ when some of the code is compiled for size, and some for
+ speed. */
+
+ /* SH4 tends to emphasize speed. */
+ if (TARGET_HARD_SH4)
+ sh_div_strategy = SH_DIV_CALL_TABLE;
+ /* These have their own way of doing things. */
+ else if (TARGET_SH2A)
+ sh_div_strategy = SH_DIV_INTRINSIC;
+ /* ??? Should we use the integer SHmedia function instead? */
+ else if (TARGET_SHCOMPACT && TARGET_FPU_ANY)
+ sh_div_strategy = SH_DIV_CALL_FP;
+ /* SH1 .. SH3 cores often go into small-footprint systems, so
+ default to the smallest implementation available. */
+ else if (TARGET_SH2) /* ??? EXPERIMENTAL */
+ sh_div_strategy = SH_DIV_CALL_TABLE;
+ else
+ sh_div_strategy = SH_DIV_CALL_DIV1;
+ }
+ if (!TARGET_SH1)
+ TARGET_PRETEND_CMOVE = 0;
+ if (sh_divsi3_libfunc[0])
+ ; /* User supplied - leave it alone. */
+ else if (TARGET_DIVIDE_CALL_FP)
+ sh_divsi3_libfunc = "__sdivsi3_i4";
+ else if (TARGET_DIVIDE_CALL_TABLE)
+ sh_divsi3_libfunc = "__sdivsi3_i4i";
+ else if (TARGET_SH5)
+ sh_divsi3_libfunc = "__sdivsi3_1";
+ else
+ sh_divsi3_libfunc = "__sdivsi3";
+ if (sh_branch_cost == -1)
+ sh_branch_cost
+ = TARGET_SH5 ? 1 : ! TARGET_SH2 || TARGET_HARD_SH4 ? 2 : 1;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (! VALID_REGISTER_P (regno))
+ sh_register_names[regno][0] = '\0';
+
+ for (regno = 0; regno < ADDREGNAMES_SIZE; regno++)
+ if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno)))
+ sh_additional_register_names[regno][0] = '\0';
+
+ flag_omit_frame_pointer = (PREFERRED_DEBUGGING_TYPE == DWARF2_DEBUG);
+
+ if ((flag_pic && ! TARGET_PREFERGOT)
+ || (TARGET_SHMEDIA && !TARGET_PT_FIXED))
+ flag_no_function_cse = 1;
+
+ if (targetm.small_register_classes_for_mode_p (VOIDmode)) \
+ {
+ /* Never run scheduling before reload, since that can
+ break global alloc, and generates slower code anyway due
+ to the pressure on R0. */
+ /* Enable sched1 for SH4 if the user explicitly requests.
+ When sched1 is enabled, the ready queue will be reordered by
+ the target hooks if pressure is high. We can not do this for
+ PIC, SH3 and lower as they give spill failures for R0. */
+ if (!TARGET_HARD_SH4 || flag_pic)
+ flag_schedule_insns = 0;
+ /* ??? Current exception handling places basic block boundaries
+ after call_insns. It causes the high pressure on R0 and gives
+ spill failures for R0 in reload. See PR 22553 and the thread
+ on gcc-patches
+ <http://gcc.gnu.org/ml/gcc-patches/2005-10/msg00816.html>. */
+ else if (flag_exceptions)
+ {
+ if (flag_schedule_insns == 1)
+ warning (0, "ignoring -fschedule-insns because of exception handling bug");
+ flag_schedule_insns = 0;
+ }
+ else if (flag_schedule_insns == 2)
+ flag_schedule_insns = 0;
+ }
+
+ if ((target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS) == 0)
+ target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
+
+ /* Unwind info is not correct around the CFG unless either a frame
+ pointer is present or M_A_O_A is set. Fixing this requires rewriting
+ unwind info generation to be aware of the CFG and propagating states
+ around edges. */
+ if ((flag_unwind_tables || flag_asynchronous_unwind_tables
+ || flag_exceptions || flag_non_call_exceptions)
+ && flag_omit_frame_pointer
+ && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
+ {
+ if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
+ warning (0, "unwind tables currently require either a frame pointer "
+ "or -maccumulate-outgoing-args for correctness");
+ target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
+ }
+
+ /* Unwinding with -freorder-blocks-and-partition does not work on this
+ architecture, because it requires far jumps to label crossing between
+ hot/cold sections which are rejected on this architecture. */
+ if (flag_reorder_blocks_and_partition)
+ {
+ if (flag_exceptions)
+ {
+ inform (input_location,
+ "-freorder-blocks-and-partition does not work with "
+ "exceptions on this architecture");
+ flag_reorder_blocks_and_partition = 0;
+ flag_reorder_blocks = 1;
+ }
+ else if (flag_unwind_tables)
+ {
+ inform (input_location,
+ "-freorder-blocks-and-partition does not support unwind "
+ "info on this architecture");
+ flag_reorder_blocks_and_partition = 0;
+ flag_reorder_blocks = 1;
+ }
+ }
+
+ if (align_loops == 0)
+ align_loops = 1 << (TARGET_SH5 ? 3 : 2);
+ if (align_jumps == 0)
+ align_jumps = 1 << CACHE_LOG;
+ else if (align_jumps < (TARGET_SHMEDIA ? 4 : 2))
+ align_jumps = TARGET_SHMEDIA ? 4 : 2;
+
+ /* Allocation boundary (in *bytes*) for the code of a function.
+ SH1: 32 bit alignment is faster, because instructions are always
+ fetched as a pair from a longword boundary.
+ SH2 .. SH5 : align to cache line start. */
+ if (align_functions == 0)
+ align_functions
+ = TARGET_SMALLCODE ? FUNCTION_BOUNDARY/8 : (1 << CACHE_LOG);
+ /* The linker relaxation code breaks when a function contains
+ alignments that are larger than that at the start of a
+ compilation unit. */
+ if (TARGET_RELAX)
+ {
+ int min_align
+ = align_loops > align_jumps ? align_loops : align_jumps;
+
+ /* Also take possible .long constants / mova tables int account. */
+ if (min_align < 4)
+ min_align = 4;
+ if (align_functions < min_align)
+ align_functions = min_align;
+ }
+
+ if (sh_fixed_range_str)
+ sh_fix_range (sh_fixed_range_str);
+}
+\f
/* Print the operand address in x to the stream. */
void
break;
case 't':
- gcc_assert (GET_CODE (x) == MEM);
+ gcc_assert (MEM_P (x));
x = XEXP (x, 0);
switch (GET_CODE (x))
{
case 'M':
if (TARGET_SHMEDIA)
{
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& GET_CODE (XEXP (x, 0)) == PLUS
- && (GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
+ && (REG_P (XEXP (XEXP (x, 0), 1))
|| GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
fputc ('x', stream);
}
else
{
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
{
switch (GET_MODE (x))
{
break;
case 'm':
- gcc_assert (GET_CODE (x) == MEM);
+ gcc_assert (MEM_P (x));
x = XEXP (x, 0);
/* Fall through. */
case 'U':
break;
case 'd':
- gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == V2SFmode);
+ gcc_assert (REG_P (x) && GET_MODE (x) == V2SFmode);
fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
break;
}
goto default_output;
case 'u':
- if (GET_CODE (x) == CONST_INT)
+ if (CONST_INT_P (x))
{
fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
break;
== GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
&& subreg_lowpart_p (inner))
inner = SUBREG_REG (inner);
- if (GET_CODE (inner) == CONST_INT)
+ if (CONST_INT_P (inner))
{
x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
goto default_output;
if (GET_CODE (inner) == SUBREG
&& (GET_MODE_SIZE (GET_MODE (inner))
< GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
- && GET_CODE (SUBREG_REG (inner)) == REG)
+ && REG_P (SUBREG_REG (inner)))
{
offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
GET_MODE (SUBREG_REG (inner)),
GET_MODE (inner));
inner = SUBREG_REG (inner);
}
- if (GET_CODE (inner) != REG || GET_MODE_SIZE (inner_mode) > 8)
+ if (!REG_P (inner) || GET_MODE_SIZE (inner_mode) > 8)
abort ();
/* Floating point register pairs are always big endian;
general purpose registers are 64 bit wide. */
goto default_output;
case SUBREG:
gcc_assert (SUBREG_BYTE (x) == 0
- && GET_CODE (SUBREG_REG (x)) == REG);
+ && REG_P (SUBREG_REG (x)));
x = SUBREG_REG (x);
/* Fall through. */
else if (FP_REGISTER_P (REGNO (x))
&& mode == V4SFmode)
fprintf ((stream), "fv%s", reg_names[regno] + 2);
- else if (GET_CODE (x) == REG
+ else if (REG_P (x)
&& mode == V2SFmode)
fprintf ((stream), "fp%s", reg_names[regno] + 2);
else if (FP_REGISTER_P (REGNO (x))
expand_block_move (rtx *operands)
{
int align = INTVAL (operands[3]);
- int constp = (GET_CODE (operands[2]) == CONST_INT);
+ int constp = (CONST_INT_P (operands[2]));
int bytes = (constp ? INTVAL (operands[2]) : 0);
if (! constp)
rtx temp;
if (SYMBOLIC_CONST_P (operands[1]))
{
- if (GET_CODE (operands[0]) == MEM)
+ if (MEM_P (operands[0]))
operands[1] = force_reg (Pmode, operands[1]);
else if (TARGET_SHMEDIA
&& GET_CODE (operands[1]) == LABEL_REF
&& ! sh_register_operand (operands[1], mode))
operands[1] = copy_to_mode_reg (mode, operands[1]);
- if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode))
+ if (MEM_P (operands[0]) && ! memory_operand (operands[0], mode))
{
/* This is like change_address_1 (operands[0], mode, 0, 1) ,
except that we can't use that function because it is static. */
being used for the source. */
else if (TARGET_SH1
&& refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
- && GET_CODE (operands[0]) == MEM
+ && MEM_P (operands[0])
&& GET_CODE (XEXP (operands[0], 0)) == PLUS
- && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG)
+ && REG_P (XEXP (XEXP (operands[0], 0), 1)))
operands[1] = copy_to_mode_reg (mode, operands[1]);
}
comparison = GET_CODE (operands[0]);
else
scratch = operands[4];
- if (GET_CODE (operands[1]) == CONST_INT
- && GET_CODE (operands[2]) != CONST_INT)
+ if (CONST_INT_P (operands[1])
+ && !CONST_INT_P (operands[2]))
{
rtx tmp = operands[1];
operands[2] = tmp;
comparison = swap_condition (comparison);
}
- if (GET_CODE (operands[2]) == CONST_INT)
+ if (CONST_INT_P (operands[2]))
{
HOST_WIDE_INT val = INTVAL (operands[2]);
if ((val == -1 || val == -0x81)
allocated to a different hard register, thus we load the constant into
a register unless it is zero. */
if (!REG_P (operands[2])
- && (GET_CODE (operands[2]) != CONST_INT
+ && (!CONST_INT_P (operands[2])
|| (mode == SImode && operands[2] != CONST0_RTX (SImode)
&& ((comparison != EQ && comparison != NE)
|| (REG_P (op1) && REGNO (op1) != R0_REG)
break;
case GTU: case GT:
msw_taken = comparison;
- if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
+ if (CONST_INT_P (op2l) && INTVAL (op2l) == -1)
break;
if (comparison != GTU || op2h != CONST0_RTX (SImode))
msw_skip = swap_condition (msw_taken);
lsw_taken = LTU;
break;
case LEU: case LE:
- if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
+ if (CONST_INT_P (op2l) && INTVAL (op2l) == -1)
msw_taken = comparison;
else
{
rtx dst = operands[0];
rtx src = operands[1];
- if (GET_CODE (dst) == MEM
+ if (MEM_P (dst)
&& GET_CODE (XEXP (dst, 0)) == PRE_DEC)
return "mov.l %T1,%0\n\tmov.l %1,%0";
else
return "mov %1,%0\n\tmov %T1,%T0";
}
- else if (GET_CODE (src) == CONST_INT)
+ else if (CONST_INT_P (src))
{
if (INTVAL (src) < 0)
output_asm_insn ("mov #-1,%S0", operands);
return "mov %1,%R0";
}
- else if (GET_CODE (src) == MEM)
+ else if (MEM_P (src))
{
int ptrreg = -1;
int dreg = REGNO (dst);
supported, so we can't use the 'o' constraint.
Thus we must check for and handle r0+REG addresses here.
We punt for now, since this is likely very rare. */
- gcc_assert (GET_CODE (XEXP (inside, 1)) != REG);
+ gcc_assert (!REG_P (XEXP (inside, 1)));
break;
case LABEL_REF:
jump = "mov.l %O0,%1; jmp @%1";
}
/* If we have a scratch register available, use it. */
- if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
+ if (NONJUMP_INSN_P ((prev = prev_nonnote_insn (insn)))
&& INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
{
this_jmp.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
{
rtx next_insn = NEXT_INSN (insn);
- if (next_insn && GET_CODE (next_insn) == JUMP_INSN && condjump_p (next_insn))
+ if (next_insn && JUMP_P (next_insn) && condjump_p (next_insn))
{
rtx src = SET_SRC (PATTERN (next_insn));
if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
if (!reload_completed || !flag_pic)
return false;
- if (GET_CODE (insn) != INSN)
+ if (!NONJUMP_INSN_P (insn))
return false;
if (asm_noperands (insn) >= 0)
return false;
if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
{
if (GET_MODE (x) == DImode
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) == 1)
return 2;
return MAX_COST;
}
/* If shift by a non constant, then this will be expensive. */
- if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (x, 1)))
return SH_DYNAMIC_SHIFT_COST;
/* Otherwise, return the true cost in instructions. Cope with out of range
int i;
/* Anding with a register is a single cycle and instruction. */
- if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (x, 1)))
return 1;
i = INTVAL (XEXP (x, 1));
addsubcosts (rtx x)
{
/* Adding a register is a single cycle insn. */
- if (GET_CODE (XEXP (x, 1)) == REG
+ if (REG_P (XEXP (x, 1))
|| GET_CODE (XEXP (x, 1)) == SUBREG)
return 1;
/* Likewise for small constants. */
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
return 1;
{
/* There is a two instruction sequence for 31 bit left shifts,
but it requires r0. */
- if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 0)
+ if (REG_P (operands[0]) && REGNO (operands[0]) == 0)
{
emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
if (TARGET_SH3)
{
- if (GET_CODE (operands[2]) != CONST_INT)
+ if (!CONST_INT_P (operands[2]))
{
rtx count = copy_to_mode_reg (SImode, operands[2]);
emit_insn (gen_negsi2 (count, count));
return 1;
}
}
- if (GET_CODE (operands[2]) != CONST_INT)
+ if (!CONST_INT_P (operands[2]))
return 0;
value = INTVAL (operands[2]) & 31;
if (left < 0 || left > 31)
return 0;
- if (GET_CODE (mask_rtx) == CONST_INT)
+ if (CONST_INT_P (mask_rtx))
mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
else
mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
scan = emit_insn_after (gen_align_4 (), scan);
need_align = 0;
for (; start != barrier; start = NEXT_INSN (start))
- if (GET_CODE (start) == INSN
+ if (NONJUMP_INSN_P (start)
&& recog_memoized (start) == CODE_FOR_casesi_worker_2)
{
rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
static int
hi_const (rtx src)
{
- return (GET_CODE (src) == CONST_INT
+ return (CONST_INT_P (src)
&& INTVAL (src) >= -32768
&& INTVAL (src) <= 32767);
}
static int
broken_move (rtx insn)
{
- if (GET_CODE (insn) == INSN)
+ if (NONJUMP_INSN_P (insn))
{
rtx pat = PATTERN (insn);
if (GET_CODE (pat) == PARALLEL)
&& GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
&& (fp_zero_operand (SET_SRC (pat))
|| fp_one_operand (SET_SRC (pat)))
- /* ??? If this is a -m4 or -m4-single compilation, in general
- we don't know the current setting of fpscr, so disable fldi.
+ /* In general we don't know the current setting of fpscr, so disable fldi.
There is an exception if this was a register-register move
before reload - and hence it was ascertained that we have
single precision setting - and in a post-reload optimization
we changed this to do a constant load. In that case
we don't have an r0 clobber, hence we must use fldi. */
- && (! TARGET_SH4 || TARGET_FMOVD
+ && (TARGET_FMOVD
|| (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
== SCRATCH))
- && GET_CODE (SET_DEST (pat)) == REG
+ && REG_P (SET_DEST (pat))
&& FP_REGISTER_P (REGNO (SET_DEST (pat))))
&& ! (TARGET_SH2A
&& GET_MODE (SET_DEST (pat)) == SImode
static int
mova_p (rtx insn)
{
- return (GET_CODE (insn) == INSN
+ return (NONJUMP_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) == SET
&& GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
&& XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
{
worker = NEXT_INSN (worker);
gcc_assert (worker
- && GET_CODE (worker) != CODE_LABEL
- && GET_CODE (worker) != JUMP_INSN);
- } while (GET_CODE (worker) == NOTE
+ && !LABEL_P (worker)
+ && !JUMP_P (worker));
+ } while (NOTE_P (worker)
|| recog_memoized (worker) != CODE_FOR_casesi_worker_1);
wpat = PATTERN (worker);
wpat0 = XVECEXP (wpat, 0, 0);
int si_limit;
int hi_limit;
rtx orig = from;
+ rtx last_got = NULL_RTX;
+ rtx last_symoff = NULL_RTX;
/* For HImode: range is 510, add 4 because pc counts from address of
second instruction after this one, subtract 2 for the jump instruction
call, determine the alignment. N.B. When find_barrier recurses for
an out-of-reach mova, we might see labels at the start of previously
inserted constant tables. */
- if (GET_CODE (from) == CODE_LABEL
+ if (LABEL_P (from)
&& CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
{
if (optimize)
new_align = 1 << label_to_alignment (from);
- else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER)
+ else if (BARRIER_P (prev_nonnote_insn (from)))
new_align = 1 << barrier_align (from);
else
new_align = 1;
for explicit alignments. If the table is long, we might be forced
to emit the new table in front of it; the length of the alignment
might be the last straw. */
- else if (GET_CODE (from) == INSN
+ else if (NONJUMP_INSN_P (from)
&& GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
&& XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
at the end. That is better than putting it in front because
this way, we don't need extra alignment for adding a 4-byte-aligned
mov(a) label to a 2/4 or 8/4 byte aligned table. */
- else if (GET_CODE (from) == INSN
+ else if (NONJUMP_INSN_P (from)
&& GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
&& XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
return from;
- if (GET_CODE (from) == BARRIER)
+ if (BARRIER_P (from))
{
rtx next;
dst = SET_DEST (pat);
mode = GET_MODE (dst);
+ /* GOT pcrelat setting comes in pair of
+ mova .L8,r0
+ mov.l .L8,r12
+ instructions. (plus add r0,r12).
+ Remember if we see one without the other. */
+ if (GET_CODE (src) == UNSPEC && PIC_ADDR_P (XVECEXP (src, 0, 0)))
+ last_got = last_got ? NULL_RTX : from;
+ else if (PIC_ADDR_P (src))
+ last_got = last_got ? NULL_RTX : from;
+
/* We must explicitly check the mode, because sometimes the
front end will generate code to load unsigned constants into
HImode targets without properly sign extending them. */
{
switch (untangle_mova (&num_mova, &mova, from))
{
+ case 1:
+ if (flag_pic)
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ if (GET_CODE (src) == CONST
+ && GET_CODE (XEXP (src, 0)) == UNSPEC
+ && XINT (XEXP (src, 0), 1) == UNSPEC_SYMOFF)
+ last_symoff = from;
+ }
+ break;
case 0: return find_barrier (0, 0, mova);
case 2:
{
if (found_si > count_si)
count_si = found_si;
}
- else if (GET_CODE (from) == JUMP_INSN
- && (GET_CODE (PATTERN (from)) == ADDR_VEC
- || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))
+ else if (JUMP_TABLE_DATA_P (from))
{
if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
|| (num_mova
}
}
/* For the SH1, we generate alignments even after jumps-around-jumps. */
- else if (GET_CODE (from) == JUMP_INSN
+ else if (JUMP_P (from)
&& ! TARGET_SH2
&& ! TARGET_SMALLCODE)
new_align = 4;
+ /* There is a possibility that a bf is transformed into a bf/s by the
+ delay slot scheduler. */
+ if (JUMP_P (from) && !JUMP_TABLE_DATA_P (from)
+ && get_attr_type (from) == TYPE_CBRANCH
+ && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (from)))) != SEQUENCE)
+ inc += 2;
+
if (found_si)
{
count_si += inc;
so we'll make one. */
rtx label = gen_label_rtx ();
+ /* Don't emit a constant table in the middle of insns for
+ casesi_worker_2. This is a bit overkill but is enough
+ because casesi_worker_2 wouldn't appear so frequently. */
+ if (last_symoff)
+ from = last_symoff;
+
/* If we exceeded the range, then we must back up over the last
instruction we looked at. Otherwise, we just need to undo the
NEXT_INSN at the end of the loop. */
else
from = PREV_INSN (from);
+ /* Don't emit a constant table int the middle of global pointer setting,
+ since that that would move the addressing base GOT into another table.
+ We need the first mov instruction before the _GLOBAL_OFFSET_TABLE_
+ in the pool anyway, so just move up the whole constant pool. */
+ if (last_got)
+ from = PREV_INSN (last_got);
+
+ /* Don't insert the constant pool table at the position which
+ may be the landing pad. */
+ if (flag_exceptions
+ && CALL_P (from)
+ && find_reg_note (from, REG_EH_REGION, NULL_RTX))
+ from = PREV_INSN (from);
+
/* Walk back to be just before any jump or label.
Putting it before a label reduces the number of times the branch
around the constant pool table will be hit. Putting it before
a jump makes it more likely that the bra delay slot will be
filled. */
- while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE
- || GET_CODE (from) == CODE_LABEL)
+ while (NOTE_P (from) || JUMP_P (from)
+ || LABEL_P (from))
from = PREV_INSN (from);
from = emit_jump_insn_after (gen_jump (label), from);
int i;
rtx pattern, part, reg_part, reg;
- if (GET_CODE (insn) != INSN)
+ if (!NONJUMP_INSN_P (insn))
return 0;
pattern = PATTERN (insn);
if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
if (part == reg_part || GET_CODE (part) == CLOBBER)
continue;
if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
- && GET_CODE (SET_DEST (part)) == REG)
+ && REG_P (SET_DEST (part)))
? SET_SRC (part) : part)))
return 0;
}
{
pattern = single_set (insn);
if (pattern
- && GET_CODE (SET_DEST (pattern)) == REG
+ && REG_P (SET_DEST (pattern))
&& REGNO (reg) == REGNO (SET_DEST (pattern)))
*set = pattern;
return 0;
}
- if (GET_CODE (insn) != CALL_INSN)
+ if (!CALL_P (insn))
{
/* We don't use rtx_equal_p because we don't care if the mode is
different. */
pattern = single_set (insn);
if (pattern
- && GET_CODE (SET_DEST (pattern)) == REG
+ && REG_P (SET_DEST (pattern))
&& REGNO (reg) == REGNO (SET_DEST (pattern)))
{
rtx par, part;
{
/* We don't use rtx_equal_p, because we don't care if the
mode is different. */
- if (GET_CODE (SET_DEST (pattern)) != REG
+ if (!REG_P (SET_DEST (pattern))
|| REGNO (reg) != REGNO (SET_DEST (pattern)))
return 1;
}
if (GET_CODE (pattern) != CALL
- || GET_CODE (XEXP (pattern, 0)) != MEM
+ || !MEM_P (XEXP (pattern, 0))
|| ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
return 1;
{
rtx y = SUBREG_REG (x);
- if (GET_CODE (y) != REG)
+ if (!REG_P (y))
break;
if (REGNO (y) < 16)
return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
rtx dest;
/* First, check if we already have an instruction that satisfies our need. */
- if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))
+ if (prev && NONJUMP_INSN_P (prev) && ! INSN_DELETED_P (prev))
{
if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
return prev;
else if (optimize && need_block >= 0)
{
rtx next = next_active_insn (next_active_insn (dest));
- if (next && GET_CODE (next) == JUMP_INSN
+ if (next && JUMP_P (next)
&& GET_CODE (PATTERN (next)) == SET
&& recog_memoized (next) == CODE_FOR_jump_compact)
{
branch; simplejump_p fails for indirect jumps even if they have
a JUMP_LABEL. */
rtx insn = emit_insn_before (gen_indirect_jump_scratch
- (reg, GEN_INT (INSN_UID (JUMP_LABEL (jump))))
- , jump);
+ (reg, GEN_INT (unspec_bbr_uid++)),
+ jump);
/* ??? We would like this to have the scope of the jump, but that
scope will change when a delay slot insn of an inner scope is added.
Hence, after delay slot scheduling, we'll have to expect
/* We can't use JUMP_LABEL here because it might be undefined
when not optimizing. */
return emit_insn_before (gen_block_branch_redirect
- (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))))
- , jump);
+ (GEN_INT (unspec_bbr_uid++)),
+ jump);
return prev;
}
if (bp->far_label)
(emit_insn_after
(gen_stuff_delay_slot
- (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
+ (GEN_INT (unspec_bbr_uid++),
GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
insn));
/* Prevent reorg from undoing our splits. */
{
rtx vec_lab, pat, prev, prevpat, x, braf_label;
- if (GET_CODE (insn) != JUMP_INSN
+ if (!JUMP_P (insn)
|| GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
continue;
pat = PATTERN (insn);
/* Search the matching casesi_jump_2. */
for (prev = vec_lab; ; prev = PREV_INSN (prev))
{
- if (GET_CODE (prev) != JUMP_INSN)
+ if (!JUMP_P (prev))
continue;
prevpat = PATTERN (prev);
if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
prev = prev_real_insn (prev);
for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
- credit >= 0 && prev && GET_CODE (prev) == INSN;
+ credit >= 0 && prev && NONJUMP_INSN_P (prev);
prev = prev_real_insn (prev))
{
jump_to_next = 0;
credit -= get_attr_length (prev);
}
if (prev
- && GET_CODE (prev) == JUMP_INSN
+ && JUMP_P (prev)
&& JUMP_LABEL (prev))
{
rtx x;
do
next = next_nonnote_insn (next);
- while (next && GET_CODE (next) == CODE_LABEL);
+ while (next && LABEL_P (next));
if (! next
|| ! INSN_P (next)
rtx pattern, reg, link, set, scan, dies, label;
int rescan = 0, foundinsn = 0;
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
pattern = PATTERN (insn);
pattern = SET_SRC (pattern);
if (GET_CODE (pattern) != CALL
- || GET_CODE (XEXP (pattern, 0)) != MEM)
+ || !MEM_P (XEXP (pattern, 0)))
continue;
reg = XEXP (XEXP (pattern, 0), 0);
continue;
}
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
continue;
/* Try scanning backward to find where the register is set. */
link = NULL;
for (scan = PREV_INSN (insn);
- scan && GET_CODE (scan) != CODE_LABEL;
+ scan && !LABEL_P (scan);
scan = PREV_INSN (scan))
{
if (! INSN_P (scan))
the call, and can result in situations where a single call
insn may have two targets depending on where we came from. */
- if (GET_CODE (scan) == CODE_LABEL && ! foundinsn)
+ if (LABEL_P (scan) && ! foundinsn)
break;
if (! INSN_P (scan))
safely, we would have to check that all the
instructions at the jump destination did not use REG. */
- if (GET_CODE (scan) == JUMP_INSN)
+ if (JUMP_P (scan))
break;
if (! reg_mentioned_p (reg, scan))
foundinsn = 1;
if (scan != insn
- && (GET_CODE (scan) == CALL_INSN || sfunc_uses_reg (scan)))
+ && (CALL_P (scan) || sfunc_uses_reg (scan)))
{
/* There is a function call to this register other
than the one we are checking. If we optimize
scan = NEXT_INSN (scan);
if (scan != insn
- && ((GET_CODE (scan) == CALL_INSN
+ && ((CALL_P (scan)
&& reg_mentioned_p (reg, scan))
|| ((reg2 = sfunc_uses_reg (scan))
&& REGNO (reg2) == REGNO (reg))))
num_mova = 0;
}
}
- else if (GET_CODE (insn) == JUMP_INSN
+ else if (JUMP_P (insn)
&& GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
&& num_mova
/* ??? loop invariant motion can also move a mova out of a
}
}
if (broken_move (insn)
- || (GET_CODE (insn) == INSN
+ || (NONJUMP_INSN_P (insn)
&& recog_memoized (insn) == CODE_FOR_casesi_worker_2))
{
rtx scan;
/* Now find all the moves between the points and modify them. */
for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
{
- if (GET_CODE (scan) == CODE_LABEL)
+ if (LABEL_P (scan))
last_float = 0;
- if (GET_CODE (scan) == INSN
+ if (NONJUMP_INSN_P (scan)
&& recog_memoized (scan) == CODE_FOR_casesi_worker_2)
need_aligned_label = 1;
if (broken_move (scan))
}
dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
}
- if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
+ if (REG_P (dst) && FP_ANY_REGISTER_P (REGNO (dst)))
{
/* This must be an insn that clobbers r0. */
rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
dest = NEXT_INSN (dest);
dest_uid = INSN_UID (dest);
}
- if (GET_CODE (dest) == JUMP_INSN && GET_CODE (PATTERN (dest)) == RETURN)
+ if (JUMP_P (dest) && GET_CODE (PATTERN (dest)) == RETURN)
return 0;
return dest_uid;
}
so transform it into a note. */
SET_INSN_DELETED (insn);
}
- else if (GET_CODE (insn) == JUMP_INSN
+ else if (JUMP_P (insn)
/* Don't mess with ADDR_DIFF_VEC */
&& (GET_CODE (PATTERN (insn)) == SET
|| GET_CODE (PATTERN (insn)) == RETURN))
0));
if (beyond
- && (GET_CODE (beyond) == JUMP_INSN
+ && (JUMP_P (beyond)
|| ((beyond = next_active_insn (beyond))
- && GET_CODE (beyond) == JUMP_INSN))
+ && JUMP_P (beyond)))
&& GET_CODE (PATTERN (beyond)) == SET
&& recog_memoized (beyond) == CODE_FOR_jump_compact
&& ((INSN_ADDRESSES
next = next_active_insn (insn);
- if ((GET_CODE (next) == JUMP_INSN
- || ((next = next_active_insn (next))
- && GET_CODE (next) == JUMP_INSN))
+ if (next
+ && (JUMP_P (next)
+ || ((next = next_active_insn (next))
+ && JUMP_P (next)))
&& GET_CODE (PATTERN (next)) == SET
&& recog_memoized (next) == CODE_FOR_jump_compact
&& ((INSN_ADDRESSES
static void
output_stack_adjust (int size, rtx reg, int epilogue_p,
- HARD_REG_SET *live_regs_mask)
+ HARD_REG_SET *live_regs_mask, bool frame_p)
{
- rtx (*emit_fn) (rtx) = epilogue_p ? &emit_insn : &frame_insn;
+ rtx (*emit_fn) (rtx) = frame_p ? &frame_insn : &emit_insn;
if (size)
{
HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
{
rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
pr_live = (pr_initial
- ? (GET_CODE (pr_initial) != REG
+ ? (!REG_P (pr_initial)
|| REGNO (pr_initial) != (PR_REG))
: df_regs_ever_live_p (PR_REG));
/* For Shcompact, if not optimizing, we end up with a memory reference
HOST_WIDE_INT size = get_frame_size ();
HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
+ if (ACCUMULATE_OUTGOING_ARGS)
+ size += crtl->outgoing_args_size;
+
return ((size + pushed + align - 1) & -align) - pushed;
}
&& (NPARM_REGS(SImode)
> crtl->args.info.arg_count[(int) SH_ARG_INT]))
pretend_args = 0;
+ /* Dwarf2 module doesn't expect frame related insns here. */
output_stack_adjust (-pretend_args
- crtl->args.info.stack_regs * 8,
- stack_pointer_rtx, 0, NULL);
+ stack_pointer_rtx, 0, NULL, false);
if (TARGET_SHCOMPACT && flag_pic && crtl->args.info.call_cookie)
/* We're going to use the PIC register to load the address of the
/* If we're supposed to switch stacks at function entry, do so now. */
if (sp_switch_attr)
{
+ rtx lab, newsrc;
/* The argument specifies a variable holding the address of the
stack the interrupt function should switch to/from at entry/exit. */
+ tree arg = TREE_VALUE ( TREE_VALUE (sp_switch_attr));
const char *s
- = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (sp_switch_attr)));
+ = ggc_strdup (TREE_STRING_POINTER (arg));
rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
- emit_insn (gen_sp_switch_1 (sp_switch));
+ lab = add_constant (sp_switch, SImode, 0);
+ newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
+ newsrc = gen_const_mem (SImode, newsrc);
+
+ emit_insn (gen_sp_switch_1 (newsrc));
}
d = calc_live_regs (&live_regs_mask);
offset_base = d + d_rounding;
output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
- 0, NULL);
+ 0, NULL, true);
sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
tmp_pnt = schedule.temps;
target_flags = save_flags;
output_stack_adjust (-rounded_frame_size (d) + d_rounding,
- stack_pointer_rtx, 0, NULL);
+ stack_pointer_rtx, 0, NULL, true);
if (frame_pointer_needed)
frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
if (frame_pointer_needed)
{
- /* We must avoid scheduling the epilogue with previous basic blocks
- when exception handling is enabled. See PR/18032. */
- if (flag_exceptions)
- emit_insn (gen_blockage ());
+ /* We must avoid scheduling the epilogue with previous basic blocks.
+ See PR/18032 and PR/40313. */
+ emit_insn (gen_blockage ());
output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
- &live_regs_mask);
+ &live_regs_mask, false);
/* We must avoid moving the stack pointer adjustment past code
which reads from the local frame, else an interrupt could
occur after the SP adjustment and clobber data in the local
frame. */
emit_insn (gen_blockage ());
- output_stack_adjust (frame_size, stack_pointer_rtx, e, &live_regs_mask);
+ output_stack_adjust (frame_size, stack_pointer_rtx, e,
+ &live_regs_mask, false);
}
if (SHMEDIA_REGS_STACK_ADJUST ())
pop (PR_REG);
}
- /* Banked registers are poped first to avoid being scheduled in the
+ /* Banked registers are popped first to avoid being scheduled in the
delay slot. RTE switches banks before the ds instruction. */
if (current_function_interrupt)
{
- for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
- if (TEST_HARD_REG_BIT (live_regs_mask, i))
- pop (LAST_BANKED_REG - i);
+ for (i = LAST_BANKED_REG; i >= FIRST_BANKED_REG; i--)
+ if (TEST_HARD_REG_BIT (live_regs_mask, i))
+ pop (i);
last_reg = FIRST_PSEUDO_REGISTER - LAST_BANKED_REG - 1;
}
output_stack_adjust (crtl->args.pretend_args_size
+ save_size + d_rounding
+ crtl->args.info.stack_regs * 8,
- stack_pointer_rtx, e, NULL);
+ stack_pointer_rtx, e, NULL, false);
if (crtl->calls_eh_return)
emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
pr_offset = rounded_frame_size (d);
emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
- emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
+
+ if (frame_pointer_needed)
+ emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
+ else
+ emit_insn (GEN_ADD3 (tmp, tmp, stack_pointer_rtx));
tmp = gen_frame_mem (Pmode, tmp);
emit_insn (GEN_MOV (tmp, ra));
+ /* Tell this store isn't dead. */
+ emit_use (tmp);
}
/* Clear variables at function end. */
record = (*lang_hooks.types.make_type) (RECORD_TYPE);
- f_next_o = build_decl (FIELD_DECL, get_identifier ("__va_next_o"),
+ f_next_o = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__va_next_o"),
ptr_type_node);
- f_next_o_limit = build_decl (FIELD_DECL,
+ f_next_o_limit = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL,
get_identifier ("__va_next_o_limit"),
ptr_type_node);
- f_next_fp = build_decl (FIELD_DECL, get_identifier ("__va_next_fp"),
+ f_next_fp = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__va_next_fp"),
ptr_type_node);
- f_next_fp_limit = build_decl (FIELD_DECL,
+ f_next_fp_limit = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL,
get_identifier ("__va_next_fp_limit"),
ptr_type_node);
- f_next_stack = build_decl (FIELD_DECL, get_identifier ("__va_next_stack"),
+ f_next_stack = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__va_next_stack"),
ptr_type_node);
DECL_FIELD_CONTEXT (f_next_o) = record;
}
addr = create_tmp_var (pptr_type_node, NULL);
- lab_false = create_artificial_label ();
- lab_over = create_artificial_label ();
+ lab_false = create_artificial_label (UNKNOWN_LOCATION);
+ lab_over = create_artificial_label (UNKNOWN_LOCATION);
valist = build1 (INDIRECT_REF, ptr_type_node, addr);
if (result)
{
gimplify_assign (result, tmp, pre_p);
-
+ result = build1 (NOP_EXPR, TREE_TYPE (result), result);
tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_over));
gimplify_and_add (tmp, pre_p);
}
DBX_REGISTER_NUMBER (regno))));
}
-bool
+static enum machine_mode
+sh_promote_function_mode (const_tree type, enum machine_mode mode,
+ int *punsignedp, const_tree funtype,
+ int for_return ATTRIBUTE_UNUSED)
+{
+ if (sh_promote_prototypes (funtype))
+ return promote_mode (type, mode, punsignedp);
+ else
+ return mode;
+}
+
+static bool
sh_promote_prototypes (const_tree type)
{
if (TARGET_HITACHI)
return gen_rtx_REG (Pmode, 2);
}
+/* Worker function for TARGET_FUNCTION_VALUE.
+
+ For the SH, this is like LIBCALL_VALUE, except that we must change the
+ mode like PROMOTE_MODE does.
+ ??? PROMOTE_MODE is ignored for non-scalar types. The set of types
+ tested here has to be kept in sync with the one in explow.c:promote_mode.
+*/
+
+static rtx
+sh_function_value (const_tree valtype,
+ const_tree fn_decl_or_type,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ if (fn_decl_or_type
+ && !DECL_P (fn_decl_or_type))
+ fn_decl_or_type = NULL;
+
+ return gen_rtx_REG (
+ ((GET_MODE_CLASS (TYPE_MODE (valtype)) == MODE_INT
+ && GET_MODE_SIZE (TYPE_MODE (valtype)) < 4
+ && (TREE_CODE (valtype) == INTEGER_TYPE
+ || TREE_CODE (valtype) == ENUMERAL_TYPE
+ || TREE_CODE (valtype) == BOOLEAN_TYPE
+ || TREE_CODE (valtype) == REAL_TYPE
+ || TREE_CODE (valtype) == OFFSET_TYPE))
+ && sh_promote_prototypes (fn_decl_or_type)
+ ? (TARGET_SHMEDIA64 ? DImode : SImode) : TYPE_MODE (valtype)),
+ BASE_RETURN_VALUE_REG (TYPE_MODE (valtype)));
+}
+
+/* Worker function for TARGET_LIBCALL_VALUE. */
+
+static rtx
+sh_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (mode, BASE_RETURN_VALUE_REG (mode));
+}
+
+/* Worker function for FUNCTION_VALUE_REGNO_P. */
+
+bool
+sh_function_value_regno_p (const unsigned int regno)
+{
+ return ((regno) == FIRST_RET_REG
+ || (TARGET_SH2E && (regno) == FIRST_FP_RET_REG)
+ || (TARGET_SHMEDIA_FPU && (regno) == FIRST_FP_RET_REG));
+}
+
/* Worker function for TARGET_RETURN_IN_MEMORY. */
static bool
R0-R14, MACH, MACL, GBR and PR. This is useful only on SH2A targets.
*/
-const struct attribute_spec sh_attribute_table[] =
-{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
- { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
- { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
- { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
- { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
- { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
- { "resbank", 0, 0, true, false, false, sh_handle_resbank_handler_attribute },
- { "function_vector", 1, 1, true, false, false, sh2a_handle_function_vector_handler_attribute },
-#ifdef SYMBIAN
- /* Symbian support adds three new attributes:
- dllexport - for exporting a function/variable that will live in a dll
- dllimport - for importing a function/variable from a dll
-
- Microsoft allows multiple declspecs in one __declspec, separating
- them with spaces. We do NOT support this. Instead, use __declspec
- multiple times. */
- { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
- { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
-#endif
- { NULL, 0, 0, false, false, false, NULL }
-};
-
/* Handle a 'resbank' attribute. */
static tree
sh_handle_resbank_handler_attribute (tree * node, tree name,
return REAL_VALUES_EQUAL (r, dconst1);
}
-/* For -m4 and -m4-single-only, mode switching is used. If we are
+/* In general mode switching is used. If we are
compiling without -mfmovd, movsf_ie isn't taken into account for
mode switching. We could check in machine_dependent_reorg for
cases where we know we are in single precision mode, but there is
int
fldi_ok (void)
{
- return ! TARGET_SH4 || TARGET_FMOVD || reload_completed;
+ return 1;
}
int
case. Disregard the case where this is a store to memory, since
we are checking a register used in the store address. */
set = single_set (insn);
- if (set && GET_CODE (SET_DEST (set)) != MEM
+ if (set && !MEM_P (SET_DEST (set))
&& reg_overlap_mentioned_p (reg, SET_DEST (set)))
return 1;
rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
rtx set = single_set (this_insn);
- if (GET_CODE (this_insn) == CALL_INSN)
+ if (CALL_P (this_insn))
code = CALL_INSN;
- else if (GET_CODE (this_insn) == JUMP_INSN)
+ else if (JUMP_P (this_insn))
{
if (INSN_ANNULLED_BRANCH_P (this_insn))
return 0;
return 0;
if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
{
- if (GET_CODE (SET_DEST (set)) != MEM)
+ if (!MEM_P (SET_DEST (set)))
retval = 1;
else
return 0;
if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
return 0;
if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
- return GET_CODE (SET_DEST (set)) != MEM;
+ return !MEM_P (SET_DEST (set));
if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
return 0;
t = build_index_type (integer_one_node);
t = build_array_type (integer_type_node, t);
- t = build_decl (VAR_DECL, get_identifier ("__fpscr_values"), t);
+ t = build_decl (BUILTINS_LOCATION,
+ VAR_DECL, get_identifier ("__fpscr_values"), t);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_EXTERNAL (t) = 1;
if (! TEST_HARD_REG_BIT (regs_live, 1))
return gen_rtx_REG (Pmode, 1);
- /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
+ /* Hard reg 1 is live; since this is a small register classes target,
there shouldn't be anything but a jump before the function end. */
gcc_assert (!TEST_HARD_REG_BIT (regs_live, 7));
return gen_rtx_REG (Pmode, 7);
{
/* Instructions with unfilled delay slots take up an extra two bytes for
the nop in the delay slot. */
- if (((GET_CODE (insn) == INSN
+ if (((NONJUMP_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER)
- || GET_CODE (insn) == CALL_INSN
- || (GET_CODE (insn) == JUMP_INSN
- && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
- && GET_CODE (PATTERN (insn)) != ADDR_VEC))
+ || CALL_P (insn)
+ || (JUMP_P (insn) && !JUMP_TABLE_DATA_P (insn)))
&& GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
&& get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
return 2;
/* SH2e has a bug that prevents the use of annulled branches, so if
the delay slot is not filled, we'll have to put a NOP in it. */
if (sh_cpu_attr == CPU_SH2E
- && GET_CODE (insn) == JUMP_INSN
- && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
- && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && JUMP_P (insn) && !JUMP_TABLE_DATA_P (insn)
&& get_attr_type (insn) == TYPE_CBRANCH
&& GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE)
return 2;
/* sh-dsp parallel processing insn take four bytes instead of two. */
- if (GET_CODE (insn) == INSN)
+ if (NONJUMP_INSN_P (insn))
{
int sum = 0;
rtx body = PATTERN (insn);
bool
sh_legitimate_index_p (enum machine_mode mode, rtx op)
{
- if (GET_CODE (op) == CONST_INT)
+ if (CONST_INT_P (op))
{
if (TARGET_SHMEDIA)
{
REG++
--REG */
-bool
+static bool
sh_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
{
if (MAYBE_BASE_REGISTER_RTX_P (x, strict))
|| XINT (x, 1) == UNSPEC_GOTPLT
|| XINT (x, 1) == UNSPEC_GOTTPOFF
|| XINT (x, 1) == UNSPEC_DTPOFF
+ || XINT (x, 1) == UNSPEC_TPOFF
|| XINT (x, 1) == UNSPEC_PLT
|| XINT (x, 1) == UNSPEC_SYMOFF
|| XINT (x, 1) == UNSPEC_PCREL_SYMOFF))
if (GET_CODE (x) == PLUS
&& (GET_MODE_SIZE (mode) == 4
|| GET_MODE_SIZE (mode) == 8)
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& BASE_REGISTER_RTX_P (XEXP (x, 0))
&& ! TARGET_SHMEDIA
&& ! ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && mode == DFmode)
return x;
}
+/* Attempt to replace *P, which is an address that needs reloading, with
+ a valid memory address for an operand of mode MODE.
+ Like for sh_legitimize_address, for the SH we try to get a normal form
+ of the address. That will allow inheritance of the address reloads. */
+
+bool
+sh_legitimize_reload_address (rtx *p, enum machine_mode mode, int opnum,
+ int itype)
+{
+ enum reload_type type = (enum reload_type) itype;
+
+ if (GET_CODE (*p) == PLUS
+ && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
+ && CONST_INT_P (XEXP (*p, 1))
+ && MAYBE_BASE_REGISTER_RTX_P (XEXP (*p, 0), true)
+ && ! TARGET_SHMEDIA
+ && ! (TARGET_SH4 && mode == DFmode)
+ && ! (mode == PSImode && type == RELOAD_FOR_INPUT_ADDRESS)
+ && (ALLOW_INDEXED_ADDRESS
+ || XEXP (*p, 0) == stack_pointer_rtx
+ || XEXP (*p, 0) == hard_frame_pointer_rtx))
+ {
+ rtx index_rtx = XEXP (*p, 1);
+ HOST_WIDE_INT offset = INTVAL (index_rtx), offset_base;
+ rtx sum;
+
+ if (TARGET_SH2A && mode == DFmode && (offset & 0x7))
+ {
+ push_reload (*p, NULL_RTX, p, NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum, type);
+ goto win;
+ }
+ if (TARGET_SH2E && mode == SFmode)
+ {
+ *p = copy_rtx (*p);
+ push_reload (*p, NULL_RTX, p, NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum, type);
+ goto win;
+ }
+ /* Instead of offset_base 128..131 use 124..127, so that
+ simple add suffices. */
+ if (offset > 127)
+ offset_base = ((offset + 4) & ~60) - 4;
+ else
+ offset_base = offset & ~60;
+ /* Sometimes the normal form does not suit DImode. We could avoid
+ that by using smaller ranges, but that would give less optimized
+ code when SImode is prevalent. */
+ if (GET_MODE_SIZE (mode) + offset - offset_base <= 64)
+ {
+ sum = gen_rtx_PLUS (Pmode, XEXP (*p, 0), GEN_INT (offset_base));
+ *p = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - offset_base));
+ push_reload (sum, NULL_RTX, &XEXP (*p, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum, type);
+ goto win;
+ }
+ }
+ /* We must re-recognize what we created before. */
+ else if (GET_CODE (*p) == PLUS
+ && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
+ && GET_CODE (XEXP (*p, 0)) == PLUS
+ && CONST_INT_P (XEXP (XEXP (*p, 0), 1))
+ && MAYBE_BASE_REGISTER_RTX_P (XEXP (XEXP (*p, 0), 0), true)
+ && CONST_INT_P (XEXP (*p, 1))
+ && ! TARGET_SHMEDIA
+ && ! (TARGET_SH2E && mode == SFmode))
+ {
+ /* Because this address is so complex, we know it must have
+ been created by LEGITIMIZE_RELOAD_ADDRESS before; thus,
+ it is already unshared, and needs no further unsharing. */
+ push_reload (XEXP (*p, 0), NULL_RTX, &XEXP (*p, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0, opnum, type);
+ goto win;
+ }
+
+ return false;
+
+ win:
+ return true;
+}
+
/* Mark the use of a constant in the literal table. If the constant
has multiple labels, make it unique. */
static rtx
lab = x;
for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
{
- if (GET_CODE (insn) != CODE_LABEL
+ if (!LABEL_P (insn)
|| LABEL_REFS (insn) != NEXT_INSN (insn))
break;
lab = insn;
/* Mark constants in a window. */
for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) != INSN)
+ if (!NONJUMP_INSN_P (insn))
continue;
pattern = PATTERN (insn);
}
/* The only input for a call that is timing-critical is the
function's address. */
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
rtx call = PATTERN (insn);
call = XVECEXP (call, 0 ,0);
if (GET_CODE (call) == SET)
call = SET_SRC (call);
- if (GET_CODE (call) == CALL && GET_CODE (XEXP (call, 0)) == MEM
+ if (GET_CODE (call) == CALL && MEM_P (XEXP (call, 0))
/* sibcalli_thunk uses a symbol_ref in an unspec. */
&& (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
|| ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
return 1;
if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
{
- if (GET_CODE (SET_DEST (x)) == REG)
+ if (REG_P (SET_DEST (x)))
{
if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
return 1;
if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
{
rtx note = XEXP (x, 0);
- if (GET_CODE (note) == REG && GET_MODE (note) == mode)
+ if (REG_P (note) && GET_MODE (note) == mode)
reg_weight--;
}
}
FNADDR is an RTX for the address of the function's pure code.
CXT is an RTX for the static chain value for the function. */
-void
-sh_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
+static void
+sh_trampoline_init (rtx tramp_mem, tree fndecl, rtx cxt)
{
- rtx tramp_mem = gen_frame_mem (BLKmode, tramp);
+ rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
+ rtx tramp = force_reg (Pmode, XEXP (tramp_mem, 0));
if (TARGET_SHMEDIA64)
{
rtx ptabs = force_reg (DImode, GEN_INT (0x6bf10600));
rtx blink = force_reg (DImode, GEN_INT (0x4401fff0));
- tramp = force_reg (Pmode, tramp);
fnaddr = force_reg (SImode, fnaddr);
cxt = force_reg (SImode, cxt);
emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, quad0, 0),
}
}
+/* On SH5, trampolines are SHmedia code, so add 1 to the address. */
+
+static rtx
+sh_trampoline_adjust_address (rtx tramp)
+{
+ if (TARGET_SHMEDIA)
+ tramp = expand_simple_binop (Pmode, PLUS, tramp, const1_rtx,
+ gen_reg_rtx (Pmode), 0, OPTAB_LIB_WIDEN);
+ return tramp;
+}
+
/* FIXME: This is overly conservative. A SHcompact function that
receives arguments ``by reference'' will have them stored in its
own stack frame, so it must not pass pointers or references to
const enum insn_code icode;
const char *const name;
int signature;
+ tree fndecl;
};
/* describe number and signedness of arguments; arg[0] == result
/* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
/* mshards_q: returns signed short. */
/* nsb: takes long long arg, returns unsigned char. */
-static const struct builtin_description bdesc[] =
-{
- { CODE_FOR_absv2si2, "__builtin_absv2si2", SH_BLTIN_V2SI2 },
- { CODE_FOR_absv4hi2, "__builtin_absv4hi2", SH_BLTIN_V4HI2 },
- { CODE_FOR_addv2si3, "__builtin_addv2si3", SH_BLTIN_V2SI3 },
- { CODE_FOR_addv4hi3, "__builtin_addv4hi3", SH_BLTIN_V4HI3 },
- { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3 },
- { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3 },
- { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3 },
- { CODE_FOR_alloco_i, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV },
- { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3 },
- { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3 },
- { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3 },
- { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3 },
- { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3 },
- { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3 },
- { CODE_FOR_mcmv, "__builtin_sh_media_MCMV", SH_BLTIN_UUUU },
- { CODE_FOR_mcnvs_lw, "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3 },
- { CODE_FOR_mcnvs_wb, "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI },
- { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI },
- { CODE_FOR_mextr1, "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3 },
- { CODE_FOR_mextr2, "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3 },
- { CODE_FOR_mextr3, "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3 },
- { CODE_FOR_mextr4, "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3 },
- { CODE_FOR_mextr5, "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3 },
- { CODE_FOR_mextr6, "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3 },
- { CODE_FOR_mextr7, "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3 },
- { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI },
- { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI },
- { CODE_FOR_mulv2si3, "__builtin_mulv2si3", SH_BLTIN_V2SI3, },
- { CODE_FOR_mulv4hi3, "__builtin_mulv4hi3", SH_BLTIN_V4HI3 },
- { CODE_FOR_mmulfx_l, "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3 },
- { CODE_FOR_mmulfx_w, "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3 },
- { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3 },
- { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI },
- { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI },
- { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU },
- { CODE_FOR_mperm_w, "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI },
- { CODE_FOR_msad_ubq, "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU },
- { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI },
- { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI },
- { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI },
- { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI },
- { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS },
- { CODE_FOR_mshfhi_b, "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3 },
- { CODE_FOR_mshfhi_l, "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3 },
- { CODE_FOR_mshfhi_w, "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3 },
- { CODE_FOR_mshflo_b, "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3 },
- { CODE_FOR_mshflo_l, "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3 },
- { CODE_FOR_mshflo_w, "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3 },
- { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI },
- { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI },
- { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI },
- { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI },
- { CODE_FOR_subv2si3, "__builtin_subv2si3", SH_BLTIN_V2SI3 },
- { CODE_FOR_subv4hi3, "__builtin_subv4hi3", SH_BLTIN_V4HI3 },
- { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3 },
- { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3 },
- { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3 },
- { CODE_FOR_fcosa_s, "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF },
- { CODE_FOR_fsina_s, "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF },
- { CODE_FOR_fipr, "__builtin_sh_media_FIPR_S", SH_BLTIN_3 },
- { CODE_FOR_ftrv, "__builtin_sh_media_FTRV_S", SH_BLTIN_3 },
- { CODE_FOR_mac_media, "__builtin_sh_media_FMAC_S", SH_BLTIN_3 },
- { CODE_FOR_sqrtdf2, "__builtin_sh_media_FSQRT_D", SH_BLTIN_2 },
- { CODE_FOR_sqrtsf2, "__builtin_sh_media_FSQRT_S", SH_BLTIN_2 },
- { CODE_FOR_fsrra_s, "__builtin_sh_media_FSRRA_S", SH_BLTIN_2 },
- { CODE_FOR_ldhi_l, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L },
- { CODE_FOR_ldhi_q, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q },
- { CODE_FOR_ldlo_l, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L },
- { CODE_FOR_ldlo_q, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q },
- { CODE_FOR_sthi_l, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L },
- { CODE_FOR_sthi_q, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q },
- { CODE_FOR_stlo_l, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L },
- { CODE_FOR_stlo_q, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q },
- { CODE_FOR_ldhi_l64, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64 },
- { CODE_FOR_ldhi_q64, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64 },
- { CODE_FOR_ldlo_l64, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64 },
- { CODE_FOR_ldlo_q64, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64 },
- { CODE_FOR_sthi_l64, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64 },
- { CODE_FOR_sthi_q64, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64 },
- { CODE_FOR_stlo_l64, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64 },
- { CODE_FOR_stlo_q64, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64 },
- { CODE_FOR_nsb, "__builtin_sh_media_NSB", SH_BLTIN_SU },
- { CODE_FOR_byterev, "__builtin_sh_media_BYTEREV", SH_BLTIN_2 },
- { CODE_FOR_prefetch, "__builtin_sh_media_PREFO", SH_BLTIN_PSSV },
+static struct builtin_description bdesc[] =
+{
+ { CODE_FOR_absv2si2, "__builtin_absv2si2", SH_BLTIN_V2SI2, 0 },
+ { CODE_FOR_absv4hi2, "__builtin_absv4hi2", SH_BLTIN_V4HI2, 0 },
+ { CODE_FOR_addv2si3, "__builtin_addv2si3", SH_BLTIN_V2SI3, 0 },
+ { CODE_FOR_addv4hi3, "__builtin_addv4hi3", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3, 0 },
+ { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_alloco_i, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV, 0 },
+ { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3, 0 },
+ { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3, 0 },
+ { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_mcmv, "__builtin_sh_media_MCMV", SH_BLTIN_UUUU, 0 },
+ { CODE_FOR_mcnvs_lw, "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3, 0 },
+ { CODE_FOR_mcnvs_wb, "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI, 0 },
+ { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI, 0 },
+ { CODE_FOR_mextr1, "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_mextr2, "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_mextr3, "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_mextr4, "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_mextr5, "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_mextr6, "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_mextr7, "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI, 0 },
+ { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI, 0 },
+ { CODE_FOR_mulv2si3, "__builtin_mulv2si3", SH_BLTIN_V2SI3, 0 },
+ { CODE_FOR_mulv4hi3, "__builtin_mulv4hi3", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_mmulfx_l, "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3, 0 },
+ { CODE_FOR_mmulfx_w, "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI, 0 },
+ { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI, 0 },
+ { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU, 0 },
+ { CODE_FOR_mperm_w, "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI, 0 },
+ { CODE_FOR_msad_ubq, "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU, 0 },
+ { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI, 0 },
+ { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI, 0 },
+ { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI, 0 },
+ { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI, 0 },
+ { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS, 0 },
+ { CODE_FOR_mshfhi_b, "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_mshfhi_l, "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3, 0 },
+ { CODE_FOR_mshfhi_w, "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_mshflo_b, "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_mshflo_l, "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3, 0 },
+ { CODE_FOR_mshflo_w, "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI, 0 },
+ { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI, 0 },
+ { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI, 0 },
+ { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI, 0 },
+ { CODE_FOR_subv2si3, "__builtin_subv2si3", SH_BLTIN_V2SI3, 0 },
+ { CODE_FOR_subv4hi3, "__builtin_subv4hi3", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3, 0 },
+ { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3, 0 },
+ { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3, 0 },
+ { CODE_FOR_fcosa_s, "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF, 0 },
+ { CODE_FOR_fsina_s, "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF, 0 },
+ { CODE_FOR_fipr, "__builtin_sh_media_FIPR_S", SH_BLTIN_3, 0 },
+ { CODE_FOR_ftrv, "__builtin_sh_media_FTRV_S", SH_BLTIN_3, 0 },
+ { CODE_FOR_mac_media, "__builtin_sh_media_FMAC_S", SH_BLTIN_3, 0 },
+ { CODE_FOR_sqrtdf2, "__builtin_sh_media_FSQRT_D", SH_BLTIN_2, 0 },
+ { CODE_FOR_sqrtsf2, "__builtin_sh_media_FSQRT_S", SH_BLTIN_2, 0 },
+ { CODE_FOR_fsrra_s, "__builtin_sh_media_FSRRA_S", SH_BLTIN_2, 0 },
+ { CODE_FOR_ldhi_l, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L, 0 },
+ { CODE_FOR_ldhi_q, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q, 0 },
+ { CODE_FOR_ldlo_l, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L, 0 },
+ { CODE_FOR_ldlo_q, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q, 0 },
+ { CODE_FOR_sthi_l, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L, 0 },
+ { CODE_FOR_sthi_q, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q, 0 },
+ { CODE_FOR_stlo_l, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L, 0 },
+ { CODE_FOR_stlo_q, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q, 0 },
+ { CODE_FOR_ldhi_l64, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64, 0 },
+ { CODE_FOR_ldhi_q64, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64, 0 },
+ { CODE_FOR_ldlo_l64, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64, 0 },
+ { CODE_FOR_ldlo_q64, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64, 0 },
+ { CODE_FOR_sthi_l64, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64, 0 },
+ { CODE_FOR_sthi_q64, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64, 0 },
+ { CODE_FOR_stlo_l64, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64, 0 },
+ { CODE_FOR_stlo_q64, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64, 0 },
+ { CODE_FOR_nsb, "__builtin_sh_media_NSB", SH_BLTIN_SU, 0 },
+ { CODE_FOR_byterev, "__builtin_sh_media_BYTEREV", SH_BLTIN_2, 0 },
+ { CODE_FOR_prefetch, "__builtin_sh_media_PREFO", SH_BLTIN_PSSV, 0 },
};
static void
sh_media_init_builtins (void)
{
tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
- const struct builtin_description *d;
+ struct builtin_description *d;
memset (shared, 0, sizeof shared);
for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
shared[signature] = type;
}
- add_builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
- NULL, NULL_TREE);
+ d->fndecl =
+ add_builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
+ NULL, NULL_TREE);
}
}
+/* Returns the shmedia builtin decl for CODE. */
+
+static tree
+sh_media_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (code >= ARRAY_SIZE (bdesc))
+ return error_mark_node;
+
+ return bdesc[code].fndecl;
+}
+
/* Implements target hook vector_mode_supported_p. */
bool
sh_vector_mode_supported_p (enum machine_mode mode)
return false;
}
+bool
+sh_frame_pointer_required (void)
+{
+/* If needed override this in other tm.h files to cope with various OS
+ lossage requiring a frame pointer. */
+ if (SUBTARGET_FRAME_POINTER_REQUIRED)
+ return true;
+
+ if (crtl->profile)
+ return true;
+
+ return false;
+}
+
/* Implements target hook dwarf_calling_convention. Return an enum
of dwarf_calling_convention. */
int
sh_media_init_builtins ();
}
+/* Returns the sh builtin decl for CODE. */
+
+static tree
+sh_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (TARGET_SHMEDIA)
+ return sh_media_builtin_decl (code, initialize_p);
+
+ return error_mark_node;
+}
+
/* Expand an expression EXP that calls a built-in function,
with result going to TARGET if that's convenient
(and in mode MODE if that's convenient).
return 0;
}
+/* Return true if registers in machine mode MODE will likely be
+ allocated to registers in small register classes. */
+
+static bool
+sh_small_register_classes_for_mode_p (enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+ return (! TARGET_SHMEDIA);
+}
/* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
that label is used. */
address = XVECEXP (address, 0, 0);
}
if (GET_CODE (address) == LABEL_REF
- && GET_CODE (XEXP (address, 0)) == CODE_LABEL)
+ && LABEL_P (XEXP (address, 0)))
LABEL_NUSES (XEXP (address, 0)) += nuses;
}
final_start_function (insns, file, 1);
final (insns, file, 1);
final_end_function ();
- free_after_compilation (cfun);
reload_completed = 0;
epilogue_completed = 0;
rtx result = target;
HOST_WIDE_INT val;
- if (GET_CODE (op0) != REG || REGNO (op0) != T_REG
- || GET_CODE (op1) != CONST_INT)
+ if (!REG_P (op0) || REGNO (op0) != T_REG
+ || !CONST_INT_P (op1))
return 0;
- if (GET_CODE (result) != REG)
+ if (!REG_P (result))
result = gen_reg_rtx (SImode);
val = INTVAL (op1);
if ((code == EQ && val == 1) || (code == NE && val == 0))
emit_insn (gen_movt (result));
else if (TARGET_SH2A && ((code == EQ && val == 0)
|| (code == NE && val == 1)))
- emit_insn (gen_movrt (result));
+ emit_insn (gen_xorsi3_movrt (result));
else if ((code == EQ && val == 0) || (code == NE && val == 1))
{
emit_clobber (result);
/* Search for the sfunc. It should really come right after INSN. */
while ((insn = NEXT_INSN (insn)))
{
- if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
+ if (LABEL_P (insn) || JUMP_P (insn))
break;
if (! INSN_P (insn))
continue;
rtx new_rtx = replace_n_hard_rtx (SUBREG_REG (x), replacements,
n_replacements, modify);
- if (GET_CODE (new_rtx) == CONST_INT)
+ if (CONST_INT_P (new_rtx))
{
x = simplify_subreg (GET_MODE (x), new_rtx,
GET_MODE (SUBREG_REG (x)),
return x;
}
- else if (GET_CODE (x) == REG)
+ else if (REG_P (x))
{
unsigned regno = REGNO (x);
unsigned nregs = (regno < FIRST_PSEUDO_REGISTER
rtx to = replacements[i*2+1];
unsigned from_regno, from_nregs, to_regno, new_regno;
- if (GET_CODE (from) != REG)
+ if (!REG_P (from))
continue;
from_regno = REGNO (from);
from_nregs = (from_regno < FIRST_PSEUDO_REGISTER
{
if (regno < from_regno
|| regno + nregs > from_regno + nregs
- || GET_CODE (to) != REG
+ || !REG_P (to)
|| result)
return NULL_RTX;
to_regno = REGNO (to);
rtx new_rtx = replace_n_hard_rtx (XEXP (x, 0), replacements,
n_replacements, modify);
- if (GET_CODE (new_rtx) == CONST_INT)
+ if (CONST_INT_P (new_rtx))
{
x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
new_rtx, GET_MODE (XEXP (x, 0)));
if (GET_CODE (x) != TRUNCATE)
return 0;
reg = XEXP (x, 0);
- if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && GET_CODE (reg) == REG)
+ if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && REG_P (reg))
{
enum machine_mode reg_mode = GET_MODE (reg);
XEXP (x, 0) = simplify_subreg (DImode, reg, reg_mode,
static int
sh_contains_memref_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
{
- return (GET_CODE (*loc) == MEM);
+ return (MEM_P (*loc));
}
/* Return nonzero iff INSN contains a MEM. */
abort ();
}
if (rclass == FPUL_REGS
- && ((GET_CODE (x) == REG
+ && ((REG_P (x)
&& (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
|| REGNO (x) == T_REG))
|| GET_CODE (x) == PLUS))
return NO_REGS;
}
if (rclass == FPSCR_REGS
- && ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
- || (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS)))
+ && ((REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
+ || (MEM_P (x) && GET_CODE (XEXP (x, 0)) == PLUS)))
return GENERAL_REGS;
if (REGCLASS_HAS_FP_REG (rclass)
&& TARGET_SHMEDIA
} /* end of input-only processing. */
if (((REGCLASS_HAS_FP_REG (rclass)
- && (GET_CODE (x) == REG
+ && (REG_P (x)
&& (GENERAL_OR_AP_REGISTER_P (REGNO (x))
|| (FP_REGISTER_P (REGNO (x)) && mode == SImode
&& TARGET_FMOVD))))
|| (REGCLASS_HAS_GENERAL_REG (rclass)
- && GET_CODE (x) == REG
+ && REG_P (x)
&& FP_REGISTER_P (REGNO (x))))
&& ! TARGET_SHMEDIA
&& (mode == SFmode || mode == SImode))
if ((rclass == FPUL_REGS
|| (REGCLASS_HAS_FP_REG (rclass)
&& ! TARGET_SHMEDIA && mode == SImode))
- && (GET_CODE (x) == MEM
- || (GET_CODE (x) == REG
+ && (MEM_P (x)
+ || (REG_P (x)
&& (REGNO (x) >= FIRST_PSEUDO_REGISTER
|| REGNO (x) == T_REG
|| system_reg_operand (x, VOIDmode)))))
if ((rclass == TARGET_REGS
|| (TARGET_SHMEDIA && rclass == SIBCALL_REGS))
&& !satisfies_constraint_Csy (x)
- && (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x))))
+ && (!REG_P (x) || ! GENERAL_REGISTER_P (REGNO (x))))
return GENERAL_REGS;
if ((rclass == MAC_REGS || rclass == PR_REGS)
- && GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x))
+ && REG_P (x) && ! GENERAL_REGISTER_P (REGNO (x))
&& rclass != REGNO_REG_CLASS (REGNO (x)))
return GENERAL_REGS;
- if (rclass != GENERAL_REGS && GET_CODE (x) == REG
+ if (rclass != GENERAL_REGS && REG_P (x)
&& TARGET_REGISTER_P (REGNO (x)))
return GENERAL_REGS;
return NO_REGS;