/* Output routines for GCC for Renesas / SuperH SH.
Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
- 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+ 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
Improved by Jim Wilson (wilson@cygnus.com).
#include "insn-attr.h"
#include "toplev.h"
#include "recog.h"
-#include "c-pragma.h"
#include "integrate.h"
#include "dwarf2.h"
#include "tm_p.h"
#include "cfglayout.h"
#include "intl.h"
#include "sched-int.h"
+#include "params.h"
#include "ggc.h"
#include "gimple.h"
#include "cfgloop.h"
and returned from sh_reorder2. */
static short cached_can_issue_more;
-/* Saved operands from the last compare to use when we generate an scc
- or bcc insn. */
-
-rtx sh_compare_op0;
-rtx sh_compare_op1;
-
/* Provides the class number of the smallest class containing
reg number. */
static int calc_live_regs (HARD_REG_SET *);
static HOST_WIDE_INT rounded_frame_size (int);
static rtx mark_constant_pool_use (rtx);
-const struct attribute_spec sh_attribute_table[];
static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
static tree sh_handle_resbank_handler_attribute (tree *, tree,
tree, int, bool *);
static bool sh_function_ok_for_sibcall (tree, tree);
static bool sh_cannot_modify_jumps_p (void);
-static int sh_target_reg_class (void);
+static enum reg_class sh_target_reg_class (void);
static bool sh_optimize_target_register_callee_saved (bool);
static bool sh_ms_bitfield_layout_p (const_tree);
static int sh_address_cost (rtx, bool);
static int sh_pr_n_sets (void);
static rtx sh_allocate_initial_value (rtx);
+static bool sh_legitimate_address_p (enum machine_mode, rtx, bool);
+static rtx sh_legitimize_address (rtx, rtx, enum machine_mode);
static int shmedia_target_regs_stack_space (HARD_REG_SET *);
static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
static tree sh_build_builtin_va_list (void);
static void sh_va_start (tree, rtx);
static tree sh_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
+static enum machine_mode sh_promote_function_mode (const_tree type,
+ enum machine_mode,
+ int *punsignedp,
+ const_tree funtype,
+ int for_return);
static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
const_tree, bool);
static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
static int sh_dwarf_calling_convention (const_tree);
static void sh_encode_section_info (tree, rtx, int);
static int sh2a_function_vector_p (tree);
+\f
+static const struct attribute_spec sh_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
+ { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
+ { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
+ { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
+ { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
+ { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
+ { "resbank", 0, 0, true, false, false, sh_handle_resbank_handler_attribute },
+ { "function_vector", 1, 1, true, false, false, sh2a_handle_function_vector_handler_attribute },
+#ifdef SYMBIAN
+ /* Symbian support adds three new attributes:
+ dllexport - for exporting a function/variable that will live in a dll
+ dllimport - for importing a function/variable from a dll
+ Microsoft allows multiple declspecs in one __declspec, separating
+ them with spaces. We do NOT support this. Instead, use __declspec
+ multiple times. */
+ { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
+ { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
+#endif
+ { NULL, 0, 0, false, false, false, NULL }
+};
\f
/* Initialize the GCC target structure. */
#undef TARGET_ATTRIBUTE_TABLE
#undef TARGET_SCHED_INIT
#define TARGET_SCHED_INIT sh_md_init
+#undef TARGET_LEGITIMIZE_ADDRESS
+#define TARGET_LEGITIMIZE_ADDRESS sh_legitimize_address
+
#undef TARGET_CANNOT_MODIFY_JUMPS_P
#define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
#undef TARGET_BRANCH_TARGET_REGISTER_CLASS
#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
+#undef TARGET_DWARF_REGISTER_SPAN
+#define TARGET_DWARF_REGISTER_SPAN sh_dwarf_register_span
+
#ifdef HAVE_AS_TLS
#undef TARGET_HAVE_TLS
#define TARGET_HAVE_TLS true
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
-#undef TARGET_PROMOTE_FUNCTION_ARGS
-#define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
-#undef TARGET_PROMOTE_FUNCTION_RETURN
-#define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
+#undef TARGET_PROMOTE_FUNCTION_MODE
+#define TARGET_PROMOTE_FUNCTION_MODE sh_promote_function_mode
#undef TARGET_STRUCT_VALUE_RTX
#define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
#undef TARGET_SECONDARY_RELOAD
#define TARGET_SECONDARY_RELOAD sh_secondary_reload
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P sh_legitimate_address_p
+
/* Machine-specific symbol_ref flags. */
#define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
}
}
\f
+/* Set default optimization options. */
+void
+sh_optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
+{
+ if (level)
+ {
+ flag_omit_frame_pointer = 2;
+ if (!size)
+ sh_div_str = "inv:minlat";
+ }
+ if (size)
+ {
+ target_flags |= MASK_SMALLCODE;
+ sh_div_str = SH_DIV_STR_FOR_SIZE ;
+ }
+ else
+ TARGET_CBRANCHDI4 = 1;
+ /* We can't meaningfully test TARGET_SHMEDIA here, because -m options
+ haven't been parsed yet, hence we'd read only the default.
+ sh_target_reg_class will return NO_REGS if this is not SHMEDIA, so
+ it's OK to always set flag_branch_target_load_optimize. */
+ if (level > 1)
+ {
+ flag_branch_target_load_optimize = 1;
+ if (!size)
+ target_flags |= MASK_SAVE_ALL_TARGET_REGS;
+ }
+ /* Likewise, we can't meaningfully test TARGET_SH2E / TARGET_IEEE
+ here, so leave it to OVERRIDE_OPTIONS to set
+ flag_finite_math_only. We set it to 2 here so we know if the user
+ explicitly requested this to be on or off. */
+ flag_finite_math_only = 2;
+ /* If flag_schedule_insns is 1, we set it to 2 here so we know if
+ the user explicitly requested this to be on or off. */
+ if (flag_schedule_insns > 0)
+ flag_schedule_insns = 2;
+
+ set_param_value ("simultaneous-prefetches", 2);
+}
+
+/* Implement OVERRIDE_OPTIONS macro. Validate and override various
+ options, and do some machine dependent initialization. */
+void
+sh_override_options (void)
+{
+ int regno;
+
+ SUBTARGET_OVERRIDE_OPTIONS;
+ if (flag_finite_math_only == 2)
+ flag_finite_math_only
+ = !flag_signaling_nans && TARGET_SH2E && ! TARGET_IEEE;
+ if (TARGET_SH2E && !flag_finite_math_only)
+ target_flags |= MASK_IEEE;
+ sh_cpu = PROCESSOR_SH1;
+ assembler_dialect = 0;
+ if (TARGET_SH2)
+ sh_cpu = PROCESSOR_SH2;
+ if (TARGET_SH2E)
+ sh_cpu = PROCESSOR_SH2E;
+ if (TARGET_SH2A)
+ sh_cpu = PROCESSOR_SH2A;
+ if (TARGET_SH3)
+ sh_cpu = PROCESSOR_SH3;
+ if (TARGET_SH3E)
+ sh_cpu = PROCESSOR_SH3E;
+ if (TARGET_SH4)
+ {
+ assembler_dialect = 1;
+ sh_cpu = PROCESSOR_SH4;
+ }
+ if (TARGET_SH4A_ARCH)
+ {
+ assembler_dialect = 1;
+ sh_cpu = PROCESSOR_SH4A;
+ }
+ if (TARGET_SH5)
+ {
+ sh_cpu = PROCESSOR_SH5;
+ target_flags |= MASK_ALIGN_DOUBLE;
+ if (TARGET_SHMEDIA_FPU)
+ target_flags |= MASK_FMOVD;
+ if (TARGET_SHMEDIA)
+ {
+ /* There are no delay slots on SHmedia. */
+ flag_delayed_branch = 0;
+ /* Relaxation isn't yet supported for SHmedia */
+ target_flags &= ~MASK_RELAX;
+ /* After reload, if conversion does little good but can cause
+ ICEs:
+ - find_if_block doesn't do anything for SH because we don't
+ have conditional execution patterns. (We use conditional
+ move patterns, which are handled differently, and only
+ before reload).
+ - find_cond_trap doesn't do anything for the SH because we
+ don't have conditional traps.
+ - find_if_case_1 uses redirect_edge_and_branch_force in
+ the only path that does an optimization, and this causes
+ an ICE when branch targets are in registers.
+ - find_if_case_2 doesn't do anything for the SHmedia after
+ reload except when it can redirect a tablejump - and
+ that's rather rare. */
+ flag_if_conversion2 = 0;
+ if (! strcmp (sh_div_str, "call"))
+ sh_div_strategy = SH_DIV_CALL;
+ else if (! strcmp (sh_div_str, "call2"))
+ sh_div_strategy = SH_DIV_CALL2;
+ if (! strcmp (sh_div_str, "fp") && TARGET_FPU_ANY)
+ sh_div_strategy = SH_DIV_FP;
+ else if (! strcmp (sh_div_str, "inv"))
+ sh_div_strategy = SH_DIV_INV;
+ else if (! strcmp (sh_div_str, "inv:minlat"))
+ sh_div_strategy = SH_DIV_INV_MINLAT;
+ else if (! strcmp (sh_div_str, "inv20u"))
+ sh_div_strategy = SH_DIV_INV20U;
+ else if (! strcmp (sh_div_str, "inv20l"))
+ sh_div_strategy = SH_DIV_INV20L;
+ else if (! strcmp (sh_div_str, "inv:call2"))
+ sh_div_strategy = SH_DIV_INV_CALL2;
+ else if (! strcmp (sh_div_str, "inv:call"))
+ sh_div_strategy = SH_DIV_INV_CALL;
+ else if (! strcmp (sh_div_str, "inv:fp"))
+ {
+ if (TARGET_FPU_ANY)
+ sh_div_strategy = SH_DIV_INV_FP;
+ else
+ sh_div_strategy = SH_DIV_INV;
+ }
+ TARGET_CBRANCHDI4 = 0;
+ /* Assembler CFI isn't yet fully supported for SHmedia. */
+ flag_dwarf2_cfi_asm = 0;
+ }
+ }
+ else
+ {
+ /* Only the sh64-elf assembler fully supports .quad properly. */
+ targetm.asm_out.aligned_op.di = NULL;
+ targetm.asm_out.unaligned_op.di = NULL;
+ }
+ if (TARGET_SH1)
+ {
+ if (! strcmp (sh_div_str, "call-div1"))
+ sh_div_strategy = SH_DIV_CALL_DIV1;
+ else if (! strcmp (sh_div_str, "call-fp")
+ && (TARGET_FPU_DOUBLE
+ || (TARGET_HARD_SH4 && TARGET_SH2E)
+ || (TARGET_SHCOMPACT && TARGET_FPU_ANY)))
+ sh_div_strategy = SH_DIV_CALL_FP;
+ else if (! strcmp (sh_div_str, "call-table") && TARGET_SH2)
+ sh_div_strategy = SH_DIV_CALL_TABLE;
+ else
+ /* Pick one that makes most sense for the target in general.
+ It is not much good to use different functions depending
+ on -Os, since then we'll end up with two different functions
+ when some of the code is compiled for size, and some for
+ speed. */
+
+ /* SH4 tends to emphasize speed. */
+ if (TARGET_HARD_SH4)
+ sh_div_strategy = SH_DIV_CALL_TABLE;
+ /* These have their own way of doing things. */
+ else if (TARGET_SH2A)
+ sh_div_strategy = SH_DIV_INTRINSIC;
+ /* ??? Should we use the integer SHmedia function instead? */
+ else if (TARGET_SHCOMPACT && TARGET_FPU_ANY)
+ sh_div_strategy = SH_DIV_CALL_FP;
+ /* SH1 .. SH3 cores often go into small-footprint systems, so
+ default to the smallest implementation available. */
+ else if (TARGET_SH2) /* ??? EXPERIMENTAL */
+ sh_div_strategy = SH_DIV_CALL_TABLE;
+ else
+ sh_div_strategy = SH_DIV_CALL_DIV1;
+ }
+ if (!TARGET_SH1)
+ TARGET_PRETEND_CMOVE = 0;
+ if (sh_divsi3_libfunc[0])
+ ; /* User supplied - leave it alone. */
+ else if (TARGET_DIVIDE_CALL_FP)
+ sh_divsi3_libfunc = "__sdivsi3_i4";
+ else if (TARGET_DIVIDE_CALL_TABLE)
+ sh_divsi3_libfunc = "__sdivsi3_i4i";
+ else if (TARGET_SH5)
+ sh_divsi3_libfunc = "__sdivsi3_1";
+ else
+ sh_divsi3_libfunc = "__sdivsi3";
+ if (sh_branch_cost == -1)
+ sh_branch_cost
+ = TARGET_SH5 ? 1 : ! TARGET_SH2 || TARGET_HARD_SH4 ? 2 : 1;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (! VALID_REGISTER_P (regno))
+ sh_register_names[regno][0] = '\0';
+
+ for (regno = 0; regno < ADDREGNAMES_SIZE; regno++)
+ if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno)))
+ sh_additional_register_names[regno][0] = '\0';
+
+ if (flag_omit_frame_pointer == 2)
+ {
+ /* The debugging information is sufficient,
+ but gdb doesn't implement this yet */
+ if (0)
+ flag_omit_frame_pointer
+ = (PREFERRED_DEBUGGING_TYPE == DWARF2_DEBUG);
+ else
+ flag_omit_frame_pointer = 0;
+ }
+
+ if ((flag_pic && ! TARGET_PREFERGOT)
+ || (TARGET_SHMEDIA && !TARGET_PT_FIXED))
+ flag_no_function_cse = 1;
+
+ if (SMALL_REGISTER_CLASSES)
+ {
+ /* Never run scheduling before reload, since that can
+ break global alloc, and generates slower code anyway due
+ to the pressure on R0. */
+ /* Enable sched1 for SH4 if the user explicitly requests.
+ When sched1 is enabled, the ready queue will be reordered by
+ the target hooks if pressure is high. We can not do this for
+ PIC, SH3 and lower as they give spill failures for R0. */
+ if (!TARGET_HARD_SH4 || flag_pic)
+ flag_schedule_insns = 0;
+ /* ??? Current exception handling places basic block boundaries
+ after call_insns. It causes the high pressure on R0 and gives
+ spill failures for R0 in reload. See PR 22553 and the thread
+ on gcc-patches
+ <http://gcc.gnu.org/ml/gcc-patches/2005-10/msg00816.html>. */
+ else if (flag_exceptions)
+ {
+ if (flag_schedule_insns == 1)
+ warning (0, "ignoring -fschedule-insns because of exception handling bug");
+ flag_schedule_insns = 0;
+ }
+ else if (flag_schedule_insns == 2)
+ flag_schedule_insns = 0;
+ }
+
+ /* Unwinding with -freorder-blocks-and-partition does not work on this
+ architecture, because it requires far jumps to label crossing between
+ hot/cold sections which are rejected on this architecture. */
+ if (flag_reorder_blocks_and_partition)
+ {
+ if (flag_exceptions)
+ {
+ inform (input_location,
+ "-freorder-blocks-and-partition does not work with "
+ "exceptions on this architecture");
+ flag_reorder_blocks_and_partition = 0;
+ flag_reorder_blocks = 1;
+ }
+ else if (flag_unwind_tables)
+ {
+ inform (input_location,
+ "-freorder-blocks-and-partition does not support unwind "
+ "info on this architecture");
+ flag_reorder_blocks_and_partition = 0;
+ flag_reorder_blocks = 1;
+ }
+ }
+
+ if (align_loops == 0)
+ align_loops = 1 << (TARGET_SH5 ? 3 : 2);
+ if (align_jumps == 0)
+ align_jumps = 1 << CACHE_LOG;
+ else if (align_jumps < (TARGET_SHMEDIA ? 4 : 2))
+ align_jumps = TARGET_SHMEDIA ? 4 : 2;
+
+ /* Allocation boundary (in *bytes*) for the code of a function.
+ SH1: 32 bit alignment is faster, because instructions are always
+ fetched as a pair from a longword boundary.
+ SH2 .. SH5 : align to cache line start. */
+ if (align_functions == 0)
+ align_functions
+ = TARGET_SMALLCODE ? FUNCTION_BOUNDARY/8 : (1 << CACHE_LOG);
+ /* The linker relaxation code breaks when a function contains
+ alignments that are larger than that at the start of a
+ compilation unit. */
+ if (TARGET_RELAX)
+ {
+ int min_align
+ = align_loops > align_jumps ? align_loops : align_jumps;
+
+ /* Also take possible .long constants / mova tables int account. */
+ if (min_align < 4)
+ min_align = 4;
+ if (align_functions < min_align)
+ align_functions = min_align;
+ }
+
+ if (sh_fixed_range_str)
+ sh_fix_range (sh_fixed_range_str);
+}
+\f
/* Print the operand address in x to the stream. */
void
break;
case 't':
- gcc_assert (GET_CODE (x) == MEM);
+ gcc_assert (MEM_P (x));
x = XEXP (x, 0);
switch (GET_CODE (x))
{
case 'M':
if (TARGET_SHMEDIA)
{
- if (GET_CODE (x) == MEM
+ if (MEM_P (x)
&& GET_CODE (XEXP (x, 0)) == PLUS
- && (GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
+ && (REG_P (XEXP (XEXP (x, 0), 1))
|| GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
fputc ('x', stream);
}
else
{
- if (GET_CODE (x) == MEM)
+ if (MEM_P (x))
{
switch (GET_MODE (x))
{
break;
case 'm':
- gcc_assert (GET_CODE (x) == MEM);
+ gcc_assert (MEM_P (x));
x = XEXP (x, 0);
/* Fall through. */
case 'U':
break;
case 'd':
- gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == V2SFmode);
+ gcc_assert (REG_P (x) && GET_MODE (x) == V2SFmode);
fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
break;
}
goto default_output;
case 'u':
- if (GET_CODE (x) == CONST_INT)
+ if (CONST_INT_P (x))
{
fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
break;
== GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
&& subreg_lowpart_p (inner))
inner = SUBREG_REG (inner);
- if (GET_CODE (inner) == CONST_INT)
+ if (CONST_INT_P (inner))
{
x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
goto default_output;
if (GET_CODE (inner) == SUBREG
&& (GET_MODE_SIZE (GET_MODE (inner))
< GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
- && GET_CODE (SUBREG_REG (inner)) == REG)
+ && REG_P (SUBREG_REG (inner)))
{
offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
GET_MODE (SUBREG_REG (inner)),
GET_MODE (inner));
inner = SUBREG_REG (inner);
}
- if (GET_CODE (inner) != REG || GET_MODE_SIZE (inner_mode) > 8)
+ if (!REG_P (inner) || GET_MODE_SIZE (inner_mode) > 8)
abort ();
/* Floating point register pairs are always big endian;
general purpose registers are 64 bit wide. */
goto default_output;
case SUBREG:
gcc_assert (SUBREG_BYTE (x) == 0
- && GET_CODE (SUBREG_REG (x)) == REG);
+ && REG_P (SUBREG_REG (x)));
x = SUBREG_REG (x);
/* Fall through. */
else if (FP_REGISTER_P (REGNO (x))
&& mode == V4SFmode)
fprintf ((stream), "fv%s", reg_names[regno] + 2);
- else if (GET_CODE (x) == REG
+ else if (REG_P (x)
&& mode == V2SFmode)
fprintf ((stream), "fp%s", reg_names[regno] + 2);
else if (FP_REGISTER_P (REGNO (x))
expand_block_move (rtx *operands)
{
int align = INTVAL (operands[3]);
- int constp = (GET_CODE (operands[2]) == CONST_INT);
+ int constp = (CONST_INT_P (operands[2]));
int bytes = (constp ? INTVAL (operands[2]) : 0);
if (! constp)
if ((mode == SImode || mode == DImode)
&& flag_pic
&& ! ((mode == Pmode || mode == ptr_mode)
- && tls_symbolic_operand (operands[1], Pmode) != 0))
+ && tls_symbolic_operand (operands[1], Pmode) != TLS_MODEL_NONE))
{
rtx temp;
if (SYMBOLIC_CONST_P (operands[1]))
{
- if (GET_CODE (operands[0]) == MEM)
+ if (MEM_P (operands[0]))
operands[1] = force_reg (Pmode, operands[1]);
else if (TARGET_SHMEDIA
&& GET_CODE (operands[1]) == LABEL_REF
&& ! sh_register_operand (operands[1], mode))
operands[1] = copy_to_mode_reg (mode, operands[1]);
- if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode))
+ if (MEM_P (operands[0]) && ! memory_operand (operands[0], mode))
{
/* This is like change_address_1 (operands[0], mode, 0, 1) ,
except that we can't use that function because it is static. */
being used for the source. */
else if (TARGET_SH1
&& refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
- && GET_CODE (operands[0]) == MEM
+ && MEM_P (operands[0])
&& GET_CODE (XEXP (operands[0], 0)) == PLUS
- && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG)
+ && REG_P (XEXP (XEXP (operands[0], 0), 1)))
operands[1] = copy_to_mode_reg (mode, operands[1]);
}
op1 = operands[1];
if (GET_CODE (op1) == CONST
&& GET_CODE (XEXP (op1, 0)) == PLUS
- && tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode))
+ && (tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode)
+ != TLS_MODEL_NONE))
{
opc = XEXP (XEXP (op1, 0), 1);
op1 = XEXP (XEXP (op1, 0), 0);
else
opc = NULL_RTX;
- if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
+ if ((tls_kind = tls_symbolic_operand (op1, Pmode)) != TLS_MODEL_NONE)
{
rtx tga_op1, tga_ret, tmp, tmp2;
rtx op1;
rtx scratch = NULL_RTX;
- if (comparison == CODE_FOR_nothing)
+ if (comparison == LAST_AND_UNUSED_RTX_CODE)
comparison = GET_CODE (operands[0]);
else
scratch = operands[4];
- if (GET_CODE (operands[1]) == CONST_INT
- && GET_CODE (operands[2]) != CONST_INT)
+ if (CONST_INT_P (operands[1])
+ && !CONST_INT_P (operands[2]))
{
rtx tmp = operands[1];
operands[2] = tmp;
comparison = swap_condition (comparison);
}
- if (GET_CODE (operands[2]) == CONST_INT)
+ if (CONST_INT_P (operands[2]))
{
HOST_WIDE_INT val = INTVAL (operands[2]);
if ((val == -1 || val == -0x81)
allocated to a different hard register, thus we load the constant into
a register unless it is zero. */
if (!REG_P (operands[2])
- && (GET_CODE (operands[2]) != CONST_INT
+ && (!CONST_INT_P (operands[2])
|| (mode == SImode && operands[2] != CONST0_RTX (SImode)
&& ((comparison != EQ && comparison != NE)
|| (REG_P (op1) && REGNO (op1) != R0_REG)
operands[1], operands[2])));
jump = emit_jump_insn (branch_expander (operands[3]));
if (probability >= 0)
- REG_NOTES (jump)
- = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (probability),
- REG_NOTES (jump));
+ add_reg_note (jump, REG_BR_PROB, GEN_INT (probability));
}
op2h = gen_highpart_mode (SImode, DImode, operands[2]);
op1l = gen_lowpart (SImode, operands[1]);
op2l = gen_lowpart (SImode, operands[2]);
- msw_taken = msw_skip = lsw_taken = CODE_FOR_nothing;
+ msw_taken = msw_skip = lsw_taken = LAST_AND_UNUSED_RTX_CODE;
prob = split_branch_probability;
rev_prob = REG_BR_PROB_BASE - prob;
switch (comparison)
break;
case GTU: case GT:
msw_taken = comparison;
- if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
+ if (CONST_INT_P (op2l) && INTVAL (op2l) == -1)
break;
if (comparison != GTU || op2h != CONST0_RTX (SImode))
msw_skip = swap_condition (msw_taken);
lsw_taken = LTU;
break;
case LEU: case LE:
- if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
+ if (CONST_INT_P (op2l) && INTVAL (op2l) == -1)
msw_taken = comparison;
else
{
break;
default: return false;
}
- num_branches = ((msw_taken != CODE_FOR_nothing)
- + (msw_skip != CODE_FOR_nothing)
- + (lsw_taken != CODE_FOR_nothing));
+ num_branches = ((msw_taken != LAST_AND_UNUSED_RTX_CODE)
+ + (msw_skip != LAST_AND_UNUSED_RTX_CODE)
+ + (lsw_taken != LAST_AND_UNUSED_RTX_CODE));
if (comparison != EQ && comparison != NE && num_branches > 1)
{
if (!CONSTANT_P (operands[2])
operands[2] = op2h;
operands[4] = NULL_RTX;
if (reload_completed
- && ! arith_reg_or_0_operand (op2h, SImode) && true_regnum (op1h)
- && (msw_taken != CODE_FOR_nothing || msw_skip != CODE_FOR_nothing))
+ && ! arith_reg_or_0_operand (op2h, SImode)
+ && (true_regnum (op1h) || (comparison != EQ && comparison != NE))
+ && (msw_taken != LAST_AND_UNUSED_RTX_CODE
+ || msw_skip != LAST_AND_UNUSED_RTX_CODE))
{
emit_move_insn (scratch, operands[2]);
operands[2] = scratch;
}
- if (msw_taken != CODE_FOR_nothing)
+ if (msw_taken != LAST_AND_UNUSED_RTX_CODE)
expand_cbranchsi4 (operands, msw_taken, msw_taken_prob);
- if (msw_skip != CODE_FOR_nothing)
+ if (msw_skip != LAST_AND_UNUSED_RTX_CODE)
{
rtx taken_label = operands[3];
/* Operands were possibly modified, but msw_skip doesn't expect this.
Always use the original ones. */
- if (msw_taken != CODE_FOR_nothing)
+ if (msw_taken != LAST_AND_UNUSED_RTX_CODE)
{
operands[1] = op1h;
operands[2] = op2h;
}
operands[1] = op1l;
operands[2] = op2l;
- if (lsw_taken != CODE_FOR_nothing)
+ if (lsw_taken != LAST_AND_UNUSED_RTX_CODE)
{
if (reload_completed
- && ! arith_reg_or_0_operand (op2l, SImode) && true_regnum (op1l))
- operands[4] = scratch;
+ && ! arith_reg_or_0_operand (op2l, SImode)
+ && (true_regnum (op1l) || (lsw_taken != EQ && lsw_taken != NE)))
+ {
+ emit_move_insn (scratch, operands[2]);
+ operands[2] = scratch;
+ }
expand_cbranchsi4 (operands, lsw_taken, lsw_taken_prob);
}
- if (msw_skip != CODE_FOR_nothing)
+ if (msw_skip != LAST_AND_UNUSED_RTX_CODE)
emit_label (skip_label);
return true;
}
+/* Emit INSN, possibly in a PARALLEL with an USE of fpscr for SH4. */
+
+static void
+sh_emit_set_t_insn (rtx insn, enum machine_mode mode)
+{
+ if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ insn = gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2, insn,
+ gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
+ (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
+ }
+ else
+ emit_insn (insn);
+}
+
/* Prepare the operands for an scc instruction; make sure that the
- compare has been done. */
-rtx
-prepare_scc_operands (enum rtx_code code)
+ compare has been done and the result is in T_REG. */
+void
+sh_emit_scc_to_t (enum rtx_code code, rtx op0, rtx op1)
{
rtx t_reg = gen_rtx_REG (SImode, T_REG);
enum rtx_code oldcode = code;
}
if (code != oldcode)
{
- rtx tmp = sh_compare_op0;
- sh_compare_op0 = sh_compare_op1;
- sh_compare_op1 = tmp;
+ rtx tmp = op0;
+ op0 = op1;
+ op1 = tmp;
}
- mode = GET_MODE (sh_compare_op0);
+ mode = GET_MODE (op0);
if (mode == VOIDmode)
- mode = GET_MODE (sh_compare_op1);
+ mode = GET_MODE (op1);
- sh_compare_op0 = force_reg (mode, sh_compare_op0);
+ op0 = force_reg (mode, op0);
if ((code != EQ && code != NE
- && (sh_compare_op1 != const0_rtx
+ && (op1 != const0_rtx
|| code == GTU || code == GEU || code == LTU || code == LEU))
- || (mode == DImode && sh_compare_op1 != const0_rtx)
+ || (mode == DImode && op1 != const0_rtx)
|| (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
- sh_compare_op1 = force_reg (mode, sh_compare_op1);
+ op1 = force_reg (mode, op1);
- if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
- (mode == SFmode ? emit_sf_insn : emit_df_insn)
- (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
- gen_rtx_SET (VOIDmode, t_reg,
- gen_rtx_fmt_ee (code, SImode,
- sh_compare_op0, sh_compare_op1)),
- gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
- else
- emit_insn (gen_rtx_SET (VOIDmode, t_reg,
- gen_rtx_fmt_ee (code, SImode,
- sh_compare_op0, sh_compare_op1)));
+ sh_emit_set_t_insn (gen_rtx_SET (VOIDmode, t_reg,
+ gen_rtx_fmt_ee (code, SImode, op0, op1)),
+ mode);
+}
- return t_reg;
+rtx
+sh_emit_cheap_store_flag (enum machine_mode mode, enum rtx_code code,
+ rtx op0, rtx op1)
+{
+ rtx target = gen_reg_rtx (SImode);
+ rtx tmp;
+
+ gcc_assert (TARGET_SHMEDIA);
+ switch (code)
+ {
+ case EQ:
+ case GT:
+ case LT:
+ case UNORDERED:
+ case GTU:
+ case LTU:
+ tmp = gen_rtx_fmt_ee (code, SImode, op0, op1);
+ emit_insn (gen_cstore4_media (target, tmp, op0, op1));
+ code = NE;
+ break;
+
+ case NE:
+ case GE:
+ case LE:
+ case ORDERED:
+ case GEU:
+ case LEU:
+ tmp = gen_rtx_fmt_ee (reverse_condition (code), mode, op0, op1);
+ emit_insn (gen_cstore4_media (target, tmp, op0, op1));
+ code = EQ;
+ break;
+
+ case UNEQ:
+ case UNGE:
+ case UNGT:
+ case UNLE:
+ case UNLT:
+ case LTGT:
+ return NULL_RTX;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (mode == DImode)
+ {
+ rtx t2 = gen_reg_rtx (DImode);
+ emit_insn (gen_extendsidi2 (t2, target));
+ target = t2;
+ }
+
+ return gen_rtx_fmt_ee (code, VOIDmode, target, const0_rtx);
}
/* Called from the md file, set up the operands of a compare instruction. */
void
-from_compare (rtx *operands, int code)
+sh_emit_compare_and_branch (rtx *operands, enum machine_mode mode)
{
- enum machine_mode mode = GET_MODE (sh_compare_op0);
- rtx insn;
- if (mode == VOIDmode)
- mode = GET_MODE (sh_compare_op1);
- if (code != EQ
- || mode == DImode
- || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
+ enum rtx_code code = GET_CODE (operands[0]);
+ enum rtx_code branch_code;
+ rtx op0 = operands[1];
+ rtx op1 = operands[2];
+ rtx insn, tem;
+ bool need_ccmpeq = false;
+
+ if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ op0 = force_reg (mode, op0);
+ op1 = force_reg (mode, op1);
+ }
+ else
{
- /* Force args into regs, since we can't use constants here. */
- sh_compare_op0 = force_reg (mode, sh_compare_op0);
- if (sh_compare_op1 != const0_rtx
- || code == GTU || code == GEU
- || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
- sh_compare_op1 = force_reg (mode, sh_compare_op1);
+ if (code != EQ || mode == DImode)
+ {
+ /* Force args into regs, since we can't use constants here. */
+ op0 = force_reg (mode, op0);
+ if (op1 != const0_rtx || code == GTU || code == GEU)
+ op1 = force_reg (mode, op1);
+ }
}
- if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT && code == GE)
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
{
- from_compare (operands, GT);
- insn = gen_ieee_ccmpeqsf_t (sh_compare_op0, sh_compare_op1);
+ if (code == LT
+ || (code == LE && TARGET_IEEE && TARGET_SH2E)
+ || (code == GE && !(TARGET_IEEE && TARGET_SH2E)))
+ {
+ tem = op0, op0 = op1, op1 = tem;
+ code = swap_condition (code);
+ }
+
+ /* GE becomes fcmp/gt+fcmp/eq, for SH2E and TARGET_IEEE only. */
+ if (code == GE)
+ {
+ gcc_assert (TARGET_IEEE && TARGET_SH2E);
+ need_ccmpeq = true;
+ code = GT;
+ }
+
+ /* Now we can have EQ, NE, GT, LE. NE and LE are then transformed
+ to EQ/GT respectively. */
+ gcc_assert (code == EQ || code == GT || code == NE || code == LE);
}
+
+ switch (code)
+ {
+ case EQ:
+ case GT:
+ case GE:
+ case GTU:
+ case GEU:
+ branch_code = code;
+ break;
+ case NE:
+ case LT:
+ case LE:
+ case LTU:
+ case LEU:
+ branch_code = reverse_condition (code);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ insn = gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (SImode, T_REG),
+ gen_rtx_fmt_ee (branch_code, SImode, op0, op1));
+
+ sh_emit_set_t_insn (insn, mode);
+ if (need_ccmpeq)
+ sh_emit_set_t_insn (gen_ieee_ccmpeqsf_t (op0, op1), mode);
+
+ if (branch_code == code)
+ emit_jump_insn (gen_branch_true (operands[3]));
else
- insn = gen_rtx_SET (VOIDmode,
- gen_rtx_REG (SImode, T_REG),
- gen_rtx_fmt_ee (code, SImode,
- sh_compare_op0, sh_compare_op1));
- if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ emit_jump_insn (gen_branch_false (operands[3]));
+}
+
+void
+sh_emit_compare_and_set (rtx *operands, enum machine_mode mode)
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx op0 = operands[2];
+ rtx op1 = operands[3];
+ rtx lab = NULL_RTX;
+ bool invert = false;
+ rtx tem;
+
+ op0 = force_reg (mode, op0);
+ if ((code != EQ && code != NE
+ && (op1 != const0_rtx
+ || code == GTU || code == GEU || code == LTU || code == LEU))
+ || (mode == DImode && op1 != const0_rtx)
+ || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
+ op1 = force_reg (mode, op1);
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
{
- insn = gen_rtx_PARALLEL (VOIDmode,
- gen_rtvec (2, insn,
- gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
- (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
+ if (code == LT || code == LE)
+ {
+ code = swap_condition (code);
+ tem = op0, op0 = op1, op1 = tem;
+ }
+ if (code == GE)
+ {
+ if (TARGET_IEEE)
+ {
+ lab = gen_label_rtx ();
+ sh_emit_scc_to_t (EQ, op0, op1);
+ emit_jump_insn (gen_branch_true (lab));
+ code = GT;
+ }
+ else
+ {
+ code = LT;
+ invert = true;
+ }
+ }
}
+
+ if (code == NE)
+ {
+ code = EQ;
+ invert = true;
+ }
+
+ sh_emit_scc_to_t (code, op0, op1);
+ if (lab)
+ emit_label (lab);
+ if (invert)
+ emit_insn (gen_movnegt (operands[0]));
else
- emit_insn (insn);
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, T_REG));
}
\f
/* Functions to output assembly code. */
rtx dst = operands[0];
rtx src = operands[1];
- if (GET_CODE (dst) == MEM
+ if (MEM_P (dst)
&& GET_CODE (XEXP (dst, 0)) == PRE_DEC)
return "mov.l %T1,%0\n\tmov.l %1,%0";
else
return "mov %1,%0\n\tmov %T1,%T0";
}
- else if (GET_CODE (src) == CONST_INT)
+ else if (CONST_INT_P (src))
{
if (INTVAL (src) < 0)
output_asm_insn ("mov #-1,%S0", operands);
return "mov %1,%R0";
}
- else if (GET_CODE (src) == MEM)
+ else if (MEM_P (src))
{
int ptrreg = -1;
int dreg = REGNO (dst);
supported, so we can't use the 'o' constraint.
Thus we must check for and handle r0+REG addresses here.
We punt for now, since this is likely very rare. */
- gcc_assert (GET_CODE (XEXP (inside, 1)) != REG);
+ gcc_assert (!REG_P (XEXP (inside, 1)));
break;
case LABEL_REF:
jump = "mov.l %O0,%1; jmp @%1";
}
/* If we have a scratch register available, use it. */
- if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
+ if (NONJUMP_INSN_P ((prev = prev_nonnote_insn (insn)))
&& INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
{
this_jmp.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
{
rtx next_insn = NEXT_INSN (insn);
- if (next_insn && GET_CODE (next_insn) == JUMP_INSN && condjump_p (next_insn))
+ if (next_insn && JUMP_P (next_insn) && condjump_p (next_insn))
{
rtx src = SET_SRC (PATTERN (next_insn));
if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
if (!reload_completed || !flag_pic)
return false;
- if (GET_CODE (insn) != INSN)
+ if (!NONJUMP_INSN_P (insn))
return false;
if (asm_noperands (insn) >= 0)
return false;
shift_insns_rtx (rtx insn)
{
rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
- int shift_count = INTVAL (XEXP (set_src, 1));
+ int shift_count = INTVAL (XEXP (set_src, 1)) & 31;
enum rtx_code shift_code = GET_CODE (set_src);
switch (shift_code)
if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
{
if (GET_MODE (x) == DImode
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) == 1)
return 2;
return MAX_COST;
}
/* If shift by a non constant, then this will be expensive. */
- if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (x, 1)))
return SH_DYNAMIC_SHIFT_COST;
- value = INTVAL (XEXP (x, 1));
+ /* Otherwise, return the true cost in instructions. Cope with out of range
+ shift counts more or less arbitrarily. */
+ value = INTVAL (XEXP (x, 1)) & 31;
- /* Otherwise, return the true cost in instructions. */
if (GET_CODE (x) == ASHIFTRT)
{
int cost = ashiftrt_insns[value];
int i;
/* Anding with a register is a single cycle and instruction. */
- if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (x, 1)))
return 1;
i = INTVAL (XEXP (x, 1));
addsubcosts (rtx x)
{
/* Adding a register is a single cycle insn. */
- if (GET_CODE (XEXP (x, 1)) == REG
+ if (REG_P (XEXP (x, 1))
|| GET_CODE (XEXP (x, 1)) == SUBREG)
return 1;
/* Likewise for small constants. */
- if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ if (CONST_INT_P (XEXP (x, 1))
&& CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
return 1;
int max, i;
/* Truncate the shift count in case it is out of bounds. */
- value = value & 0x1f;
+ value = value & 31;
if (value == 31)
{
{
/* There is a two instruction sequence for 31 bit left shifts,
but it requires r0. */
- if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 0)
+ if (REG_P (operands[0]) && REGNO (operands[0]) == 0)
{
emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
if (TARGET_SH3)
{
- if (GET_CODE (operands[2]) != CONST_INT)
+ if (!CONST_INT_P (operands[2]))
{
rtx count = copy_to_mode_reg (SImode, operands[2]);
emit_insn (gen_negsi2 (count, count));
return 1;
}
}
- if (GET_CODE (operands[2]) != CONST_INT)
+ if (!CONST_INT_P (operands[2]))
return 0;
value = INTVAL (operands[2]) & 31;
int
sh_dynamicalize_shift_p (rtx count)
{
- return shift_insns[INTVAL (count)] > 1 + SH_DYNAMIC_SHIFT_COST;
+ return shift_insns[INTVAL (count) & 31] > 1 + SH_DYNAMIC_SHIFT_COST;
}
/* Try to find a good way to implement the combiner pattern
if (left < 0 || left > 31)
return 0;
- if (GET_CODE (mask_rtx) == CONST_INT)
+ if (CONST_INT_P (mask_rtx))
mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
else
mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
shl_and_scr_length (rtx insn)
{
rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
- int len = shift_insns[INTVAL (XEXP (set_src, 1))];
+ int len = shift_insns[INTVAL (XEXP (set_src, 1)) & 31];
rtx op = XEXP (set_src, 0);
- len += shift_insns[INTVAL (XEXP (op, 1))] + 1;
+ len += shift_insns[INTVAL (XEXP (op, 1)) & 31] + 1;
op = XEXP (XEXP (op, 0), 0);
- return len + shift_insns[INTVAL (XEXP (op, 1))];
+ return len + shift_insns[INTVAL (XEXP (op, 1)) & 31];
}
/* Generate rtl for instructions for which shl_and_kind advised a particular
scan = emit_insn_after (gen_align_4 (), scan);
need_align = 0;
for (; start != barrier; start = NEXT_INSN (start))
- if (GET_CODE (start) == INSN
+ if (NONJUMP_INSN_P (start)
&& recog_memoized (start) == CODE_FOR_casesi_worker_2)
{
rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
static int
hi_const (rtx src)
{
- return (GET_CODE (src) == CONST_INT
+ return (CONST_INT_P (src)
&& INTVAL (src) >= -32768
&& INTVAL (src) <= 32767);
}
static int
broken_move (rtx insn)
{
- if (GET_CODE (insn) == INSN)
+ if (NONJUMP_INSN_P (insn))
{
rtx pat = PATTERN (insn);
if (GET_CODE (pat) == PARALLEL)
&& GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
&& (fp_zero_operand (SET_SRC (pat))
|| fp_one_operand (SET_SRC (pat)))
- /* ??? If this is a -m4 or -m4-single compilation, in general
- we don't know the current setting of fpscr, so disable fldi.
+ /* In general we don't know the current setting of fpscr, so disable fldi.
There is an exception if this was a register-register move
before reload - and hence it was ascertained that we have
single precision setting - and in a post-reload optimization
we changed this to do a constant load. In that case
we don't have an r0 clobber, hence we must use fldi. */
- && (! TARGET_SH4 || TARGET_FMOVD
+ && (TARGET_FMOVD
|| (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
== SCRATCH))
- && GET_CODE (SET_DEST (pat)) == REG
+ && REG_P (SET_DEST (pat))
&& FP_REGISTER_P (REGNO (SET_DEST (pat))))
&& ! (TARGET_SH2A
&& GET_MODE (SET_DEST (pat)) == SImode
static int
mova_p (rtx insn)
{
- return (GET_CODE (insn) == INSN
+ return (NONJUMP_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) == SET
&& GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
&& XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
{
worker = NEXT_INSN (worker);
gcc_assert (worker
- && GET_CODE (worker) != CODE_LABEL
- && GET_CODE (worker) != JUMP_INSN);
- } while (GET_CODE (worker) == NOTE
+ && !LABEL_P (worker)
+ && !JUMP_P (worker));
+ } while (NOTE_P (worker)
|| recog_memoized (worker) != CODE_FOR_casesi_worker_1);
wpat = PATTERN (worker);
wpat0 = XVECEXP (wpat, 0, 0);
if (optimize)
{
+ /* If NEW_MOVA has no address yet, it will be handled later. */
+ if (INSN_ADDRESSES_SIZE() <= (unsigned) INSN_UID (new_mova))
+ return -1;
+
n_addr = INSN_ADDRESSES (INSN_UID (new_mova));
n_target = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova), 0)));
if (n_addr > n_target || n_addr + 1022 < n_target)
call, determine the alignment. N.B. When find_barrier recurses for
an out-of-reach mova, we might see labels at the start of previously
inserted constant tables. */
- if (GET_CODE (from) == CODE_LABEL
+ if (LABEL_P (from)
&& CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
{
if (optimize)
new_align = 1 << label_to_alignment (from);
- else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER)
+ else if (BARRIER_P (prev_nonnote_insn (from)))
new_align = 1 << barrier_align (from);
else
new_align = 1;
for explicit alignments. If the table is long, we might be forced
to emit the new table in front of it; the length of the alignment
might be the last straw. */
- else if (GET_CODE (from) == INSN
+ else if (NONJUMP_INSN_P (from)
&& GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
&& XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
at the end. That is better than putting it in front because
this way, we don't need extra alignment for adding a 4-byte-aligned
mov(a) label to a 2/4 or 8/4 byte aligned table. */
- else if (GET_CODE (from) == INSN
+ else if (NONJUMP_INSN_P (from)
&& GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
&& XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
return from;
- if (GET_CODE (from) == BARRIER)
+ if (BARRIER_P (from))
{
rtx next;
if (found_si > count_si)
count_si = found_si;
}
- else if (GET_CODE (from) == JUMP_INSN
- && (GET_CODE (PATTERN (from)) == ADDR_VEC
- || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))
+ else if (JUMP_TABLE_DATA_P (from))
{
if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
|| (num_mova
}
}
/* For the SH1, we generate alignments even after jumps-around-jumps. */
- else if (GET_CODE (from) == JUMP_INSN
+ else if (JUMP_P (from)
&& ! TARGET_SH2
&& ! TARGET_SMALLCODE)
new_align = 4;
around the constant pool table will be hit. Putting it before
a jump makes it more likely that the bra delay slot will be
filled. */
- while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE
- || GET_CODE (from) == CODE_LABEL)
+ while (NOTE_P (from) || JUMP_P (from)
+ || LABEL_P (from))
from = PREV_INSN (from);
from = emit_jump_insn_after (gen_jump (label), from);
int i;
rtx pattern, part, reg_part, reg;
- if (GET_CODE (insn) != INSN)
+ if (!NONJUMP_INSN_P (insn))
return 0;
pattern = PATTERN (insn);
if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
if (part == reg_part || GET_CODE (part) == CLOBBER)
continue;
if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
- && GET_CODE (SET_DEST (part)) == REG)
+ && REG_P (SET_DEST (part)))
? SET_SRC (part) : part)))
return 0;
}
{
pattern = single_set (insn);
if (pattern
- && GET_CODE (SET_DEST (pattern)) == REG
+ && REG_P (SET_DEST (pattern))
&& REGNO (reg) == REGNO (SET_DEST (pattern)))
*set = pattern;
return 0;
}
- if (GET_CODE (insn) != CALL_INSN)
+ if (!CALL_P (insn))
{
/* We don't use rtx_equal_p because we don't care if the mode is
different. */
pattern = single_set (insn);
if (pattern
- && GET_CODE (SET_DEST (pattern)) == REG
+ && REG_P (SET_DEST (pattern))
&& REGNO (reg) == REGNO (SET_DEST (pattern)))
{
rtx par, part;
{
/* We don't use rtx_equal_p, because we don't care if the
mode is different. */
- if (GET_CODE (SET_DEST (pattern)) != REG
+ if (!REG_P (SET_DEST (pattern))
|| REGNO (reg) != REGNO (SET_DEST (pattern)))
return 1;
}
if (GET_CODE (pattern) != CALL
- || GET_CODE (XEXP (pattern, 0)) != MEM
+ || !MEM_P (XEXP (pattern, 0))
|| ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
return 1;
{
rtx y = SUBREG_REG (x);
- if (GET_CODE (y) != REG)
+ if (!REG_P (y))
break;
if (REGNO (y) < 16)
return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
rtx dest;
/* First, check if we already have an instruction that satisfies our need. */
- if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))
+ if (prev && NONJUMP_INSN_P (prev) && ! INSN_DELETED_P (prev))
{
if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
return prev;
else if (optimize && need_block >= 0)
{
rtx next = next_active_insn (next_active_insn (dest));
- if (next && GET_CODE (next) == JUMP_INSN
+ if (next && JUMP_P (next)
&& GET_CODE (PATTERN (next)) == SET
&& recog_memoized (next) == CODE_FOR_jump_compact)
{
{
rtx vec_lab, pat, prev, prevpat, x, braf_label;
- if (GET_CODE (insn) != JUMP_INSN
+ if (!JUMP_P (insn)
|| GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
continue;
pat = PATTERN (insn);
/* Search the matching casesi_jump_2. */
for (prev = vec_lab; ; prev = PREV_INSN (prev))
{
- if (GET_CODE (prev) != JUMP_INSN)
+ if (!JUMP_P (prev))
continue;
prevpat = PATTERN (prev);
if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
prev = prev_real_insn (prev);
for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
- credit >= 0 && prev && GET_CODE (prev) == INSN;
+ credit >= 0 && prev && NONJUMP_INSN_P (prev);
prev = prev_real_insn (prev))
{
jump_to_next = 0;
credit -= get_attr_length (prev);
}
if (prev
- && GET_CODE (prev) == JUMP_INSN
+ && JUMP_P (prev)
&& JUMP_LABEL (prev))
{
rtx x;
do
next = next_nonnote_insn (next);
- while (next && GET_CODE (next) == CODE_LABEL);
+ while (next && LABEL_P (next));
if (! next
|| ! INSN_P (next)
rtx pattern, reg, link, set, scan, dies, label;
int rescan = 0, foundinsn = 0;
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
pattern = PATTERN (insn);
pattern = SET_SRC (pattern);
if (GET_CODE (pattern) != CALL
- || GET_CODE (XEXP (pattern, 0)) != MEM)
+ || !MEM_P (XEXP (pattern, 0)))
continue;
reg = XEXP (XEXP (pattern, 0), 0);
continue;
}
- if (GET_CODE (reg) != REG)
+ if (!REG_P (reg))
continue;
/* Try scanning backward to find where the register is set. */
link = NULL;
for (scan = PREV_INSN (insn);
- scan && GET_CODE (scan) != CODE_LABEL;
+ scan && !LABEL_P (scan);
scan = PREV_INSN (scan))
{
if (! INSN_P (scan))
the call, and can result in situations where a single call
insn may have two targets depending on where we came from. */
- if (GET_CODE (scan) == CODE_LABEL && ! foundinsn)
+ if (LABEL_P (scan) && ! foundinsn)
break;
if (! INSN_P (scan))
safely, we would have to check that all the
instructions at the jump destination did not use REG. */
- if (GET_CODE (scan) == JUMP_INSN)
+ if (JUMP_P (scan))
break;
if (! reg_mentioned_p (reg, scan))
foundinsn = 1;
if (scan != insn
- && (GET_CODE (scan) == CALL_INSN || sfunc_uses_reg (scan)))
+ && (CALL_P (scan) || sfunc_uses_reg (scan)))
{
/* There is a function call to this register other
than the one we are checking. If we optimize
scan = NEXT_INSN (scan);
if (scan != insn
- && ((GET_CODE (scan) == CALL_INSN
+ && ((CALL_P (scan)
&& reg_mentioned_p (reg, scan))
|| ((reg2 = sfunc_uses_reg (scan))
&& REGNO (reg2) == REGNO (reg))))
num_mova = 0;
}
}
- else if (GET_CODE (insn) == JUMP_INSN
+ else if (JUMP_P (insn)
&& GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
&& num_mova
/* ??? loop invariant motion can also move a mova out of a
}
}
if (broken_move (insn)
- || (GET_CODE (insn) == INSN
+ || (NONJUMP_INSN_P (insn)
&& recog_memoized (insn) == CODE_FOR_casesi_worker_2))
{
rtx scan;
/* Now find all the moves between the points and modify them. */
for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
{
- if (GET_CODE (scan) == CODE_LABEL)
+ if (LABEL_P (scan))
last_float = 0;
- if (GET_CODE (scan) == INSN
+ if (NONJUMP_INSN_P (scan)
&& recog_memoized (scan) == CODE_FOR_casesi_worker_2)
need_aligned_label = 1;
if (broken_move (scan))
}
dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
}
- if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
+ if (REG_P (dst) && FP_ANY_REGISTER_P (REGNO (dst)))
{
/* This must be an insn that clobbers r0. */
rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
/* If we are not optimizing, then there may not be
a note. */
if (note)
- PUT_MODE (note, REG_INC);
+ PUT_REG_NOTE_KIND (note, REG_INC);
*last_float_addr = r0_inc_rtx;
}
dest = NEXT_INSN (dest);
dest_uid = INSN_UID (dest);
}
- if (GET_CODE (dest) == JUMP_INSN && GET_CODE (PATTERN (dest)) == RETURN)
+ if (JUMP_P (dest) && GET_CODE (PATTERN (dest)) == RETURN)
return 0;
return dest_uid;
}
so transform it into a note. */
SET_INSN_DELETED (insn);
}
- else if (GET_CODE (insn) == JUMP_INSN
+ else if (JUMP_P (insn)
/* Don't mess with ADDR_DIFF_VEC */
&& (GET_CODE (PATTERN (insn)) == SET
|| GET_CODE (PATTERN (insn)) == RETURN))
0));
if (beyond
- && (GET_CODE (beyond) == JUMP_INSN
+ && (JUMP_P (beyond)
|| ((beyond = next_active_insn (beyond))
- && GET_CODE (beyond) == JUMP_INSN))
+ && JUMP_P (beyond)))
&& GET_CODE (PATTERN (beyond)) == SET
&& recog_memoized (beyond) == CODE_FOR_jump_compact
&& ((INSN_ADDRESSES
next = next_active_insn (insn);
- if ((GET_CODE (next) == JUMP_INSN
+ if ((JUMP_P (next)
|| ((next = next_active_insn (next))
- && GET_CODE (next) == JUMP_INSN))
+ && JUMP_P (next)))
&& GET_CODE (PATTERN (next)) == SET
&& recog_memoized (next) == CODE_FOR_jump_compact
&& ((INSN_ADDRESSES
insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
}
if (! epilogue_p)
- REG_NOTES (insn)
- = (gen_rtx_EXPR_LIST
- (REG_FRAME_RELATED_EXPR,
- gen_rtx_SET (VOIDmode, reg,
- gen_rtx_PLUS (SImode, reg, GEN_INT (size))),
- REG_NOTES (insn)));
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, reg,
+ gen_rtx_PLUS (SImode, reg,
+ GEN_INT (size))));
}
}
}
x = gen_push (gen_rtx_REG (SImode, rn));
x = frame_insn (x);
- REG_NOTES (x)
- = gen_rtx_EXPR_LIST (REG_INC,
- gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
+ add_reg_note (x, REG_INC, gen_rtx_REG (SImode, STACK_POINTER_REGNUM));
return x;
}
x = gen_pop (gen_rtx_REG (SImode, rn));
x = emit_insn (x);
- REG_NOTES (x)
- = gen_rtx_EXPR_LIST (REG_INC,
- gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
+ add_reg_note (x, REG_INC, gen_rtx_REG (SImode, STACK_POINTER_REGNUM));
}
/* Generate code to push the regs specified in the mask. */
{
rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
pr_live = (pr_initial
- ? (GET_CODE (pr_initial) != REG
+ ? (!REG_P (pr_initial)
|| REGNO (pr_initial) != (PR_REG))
: df_regs_ever_live_p (PR_REG));
/* For Shcompact, if not optimizing, we end up with a memory reference
&& crtl->args.info.call_cookie
&& reg == PIC_OFFSET_TABLE_REGNUM)
|| (df_regs_ever_live_p (reg)
- && (!call_really_used_regs[reg]
+ && ((!call_really_used_regs[reg]
+ && !(reg != PIC_OFFSET_TABLE_REGNUM
+ && fixed_regs[reg] && call_used_regs[reg]))
|| (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
|| (crtl->calls_eh_return
&& (reg == EH_RETURN_DATA_REGNO (0)
/* If we're supposed to switch stacks at function entry, do so now. */
if (sp_switch_attr)
{
+ rtx lab, newsrc;
/* The argument specifies a variable holding the address of the
stack the interrupt function should switch to/from at entry/exit. */
+ tree arg = TREE_VALUE ( TREE_VALUE (sp_switch_attr));
const char *s
- = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (sp_switch_attr)));
+ = ggc_strdup (TREE_STRING_POINTER (arg));
rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
- emit_insn (gen_sp_switch_1 (sp_switch));
+ lab = add_constant (sp_switch, SImode, 0);
+ newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
+ newsrc = gen_const_mem (SImode, newsrc);
+
+ emit_insn (gen_sp_switch_1 (newsrc));
}
d = calc_live_regs (&live_regs_mask);
tmp_pnt = schedule.temps;
for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
{
- enum machine_mode mode = entry->mode;
+ enum machine_mode mode = (enum machine_mode) entry->mode;
unsigned int reg = entry->reg;
rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
rtx orig_reg_rtx;
stack_pointer_rtx,
GEN_INT (offset)));
- GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_pre_dec);
-
- gcc_assert (r0);
- mem_rtx = NULL_RTX;
-
- try_pre_dec:
- do
- if (HAVE_PRE_DECREMENT
- && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
- || mem_rtx == NULL_RTX
- || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
- {
- pre_dec = gen_frame_mem (mode, gen_rtx_PRE_DEC (Pmode, r0));
+ if (!memory_address_p (mode, XEXP (mem_rtx, 0)))
+ {
+ gcc_assert (r0);
+ mem_rtx = NULL_RTX;
+ }
- GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (pre_dec, 0),
- pre_dec_ok);
+ if (HAVE_PRE_DECREMENT
+ && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
+ || mem_rtx == NULL_RTX
+ || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
+ {
+ pre_dec = gen_frame_mem (mode, gen_rtx_PRE_DEC (Pmode, r0));
+ if (!memory_address_p (mode, XEXP (pre_dec, 0)))
pre_dec = NULL_RTX;
-
- break;
-
- pre_dec_ok:
- mem_rtx = NULL_RTX;
- offset += GET_MODE_SIZE (mode);
- }
- while (0);
+ else
+ {
+ mem_rtx = NULL_RTX;
+ offset += GET_MODE_SIZE (mode);
+ }
+ }
if (mem_rtx != NULL_RTX)
goto addr_ok;
a direct save from the to-be-saved register. */
if (REGNO (reg_rtx) != reg)
{
- rtx set, note_rtx;
+ rtx set;
set = gen_rtx_SET (VOIDmode, mem_rtx, orig_reg_rtx);
- note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
- REG_NOTES (insn));
- REG_NOTES (insn) = note_rtx;
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
}
if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
{
rtx reg_rtx = gen_rtx_REG (mode, reg);
- rtx set, note_rtx;
+ rtx set;
rtx mem_rtx = gen_frame_mem (mode,
gen_rtx_PLUS (Pmode,
stack_pointer_rtx,
GEN_INT (offset)));
set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
- note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
- REG_NOTES (insn));
- REG_NOTES (insn) = note_rtx;
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
}
}
}
if (frame_pointer_needed)
{
- /* We must avoid scheduling the epilogue with previous basic blocks
- when exception handling is enabled. See PR/18032. */
- if (flag_exceptions)
- emit_insn (gen_blockage ());
+ /* We must avoid scheduling the epilogue with previous basic blocks.
+ See PR/18032 and PR/40313. */
+ emit_insn (gen_blockage ());
output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
&live_regs_mask);
tmp_pnt = schedule.temps;
for (; entry->mode != VOIDmode; entry--)
{
- enum machine_mode mode = entry->mode;
+ enum machine_mode mode = (enum machine_mode) entry->mode;
int reg = entry->reg;
rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
stack_pointer_rtx,
GEN_INT (offset)));
- GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_post_inc);
-
- mem_rtx = NULL_RTX;
-
- try_post_inc:
- do
- if (HAVE_POST_INCREMENT
- && (offset == offset_in_r0
- || (offset + GET_MODE_SIZE (mode) != d + d_rounding
- && mem_rtx == NULL_RTX)
- || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
- {
- post_inc = gen_frame_mem (mode, gen_rtx_POST_INC (Pmode, r0));
+ if (!memory_address_p (mode, XEXP (mem_rtx, 0)))
+ mem_rtx = NULL_RTX;
- GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (post_inc, 0),
- post_inc_ok);
+ if (HAVE_POST_INCREMENT
+ && (offset == offset_in_r0
+ || (offset + GET_MODE_SIZE (mode) != d + d_rounding
+ && mem_rtx == NULL_RTX)
+ || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
+ {
+ post_inc = gen_frame_mem (mode, gen_rtx_POST_INC (Pmode, r0));
+ if (!memory_address_p (mode, XEXP (post_inc, 0)))
post_inc = NULL_RTX;
-
- break;
-
- post_inc_ok:
+ else
mem_rtx = NULL_RTX;
- }
- while (0);
+ }
if (mem_rtx != NULL_RTX)
goto addr_ok;
tmp = gen_frame_mem (Pmode, tmp);
emit_insn (GEN_MOV (tmp, ra));
+ /* Tell this store isn't dead. */
+ emit_use (tmp);
}
/* Clear variables at function end. */
record = (*lang_hooks.types.make_type) (RECORD_TYPE);
- f_next_o = build_decl (FIELD_DECL, get_identifier ("__va_next_o"),
+ f_next_o = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__va_next_o"),
ptr_type_node);
- f_next_o_limit = build_decl (FIELD_DECL,
+ f_next_o_limit = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL,
get_identifier ("__va_next_o_limit"),
ptr_type_node);
- f_next_fp = build_decl (FIELD_DECL, get_identifier ("__va_next_fp"),
+ f_next_fp = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__va_next_fp"),
ptr_type_node);
- f_next_fp_limit = build_decl (FIELD_DECL,
+ f_next_fp_limit = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL,
get_identifier ("__va_next_fp_limit"),
ptr_type_node);
- f_next_stack = build_decl (FIELD_DECL, get_identifier ("__va_next_stack"),
+ f_next_stack = build_decl (BUILTINS_LOCATION,
+ FIELD_DECL, get_identifier ("__va_next_stack"),
ptr_type_node);
DECL_FIELD_CONTEXT (f_next_o) = record;
}
addr = create_tmp_var (pptr_type_node, NULL);
- lab_false = create_artificial_label ();
- lab_over = create_artificial_label ();
+ lab_false = create_artificial_label (UNKNOWN_LOCATION);
+ lab_over = create_artificial_label (UNKNOWN_LOCATION);
valist = build1 (INDIRECT_REF, ptr_type_node, addr);
if (result)
{
gimplify_assign (result, tmp, pre_p);
-
+ result = build1 (NOP_EXPR, TREE_TYPE (result), result);
tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_over));
gimplify_and_add (tmp, pre_p);
}
return result;
}
+/* 64 bit floating points memory transfers are paired single precision loads
+ or store. So DWARF information needs fixing in little endian (unless
+ PR=SZ=1 in FPSCR). */
+rtx
+sh_dwarf_register_span (rtx reg)
+{
+ unsigned regno = REGNO (reg);
+
+ if (WORDS_BIG_ENDIAN || GET_MODE (reg) != DFmode)
+ return NULL_RTX;
+
+ return
+ gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_REG (SFmode,
+ DBX_REGISTER_NUMBER (regno+1)),
+ gen_rtx_REG (SFmode,
+ DBX_REGISTER_NUMBER (regno))));
+}
+
+static enum machine_mode
+sh_promote_function_mode (const_tree type, enum machine_mode mode,
+ int *punsignedp, const_tree funtype,
+ int for_return ATTRIBUTE_UNUSED)
+{
+ if (sh_promote_prototypes (funtype))
+ return promote_mode (type, mode, punsignedp);
+ else
+ return mode;
+}
+
bool
sh_promote_prototypes (const_tree type)
{
|| is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs))
|| is_attribute_p ("resbank", TREE_PURPOSE (attrs)))
warning (OPT_Wattributes,
- "%qs attribute only applies to interrupt functions",
- IDENTIFIER_POINTER (TREE_PURPOSE (attrs)));
+ "%qE attribute only applies to interrupt functions",
+ TREE_PURPOSE (attrs));
else
{
*tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
R0-R14, MACH, MACL, GBR and PR. This is useful only on SH2A targets.
*/
-const struct attribute_spec sh_attribute_table[] =
-{
- /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
- { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
- { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
- { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
- { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
- { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
- { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
- { "resbank", 0, 0, true, false, false, sh_handle_resbank_handler_attribute },
- { "function_vector", 1, 1, true, false, false, sh2a_handle_function_vector_handler_attribute },
-#ifdef SYMBIAN
- /* Symbian support adds three new attributes:
- dllexport - for exporting a function/variable that will live in a dll
- dllimport - for importing a function/variable from a dll
-
- Microsoft allows multiple declspecs in one __declspec, separating
- them with spaces. We do NOT support this. Instead, use __declspec
- multiple times. */
- { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
- { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
-#endif
- { NULL, 0, 0, false, false, false, NULL }
-};
-
/* Handle a 'resbank' attribute. */
static tree
sh_handle_resbank_handler_attribute (tree * node, tree name,
{
if (!TARGET_SH2A)
{
- warning (OPT_Wattributes, "%qs attribute is supported only for SH2A",
- IDENTIFIER_POINTER (name));
+ warning (OPT_Wattributes, "%qE attribute is supported only for SH2A",
+ name);
*no_add_attrs = true;
}
if (TREE_CODE (*node) != FUNCTION_DECL)
{
- warning (OPT_Wattributes, "%qs attribute only applies to functions",
- IDENTIFIER_POINTER (name));
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
*no_add_attrs = true;
}
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
- warning (OPT_Wattributes, "%qs attribute only applies to functions",
- IDENTIFIER_POINTER (name));
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
*no_add_attrs = true;
}
else if (TARGET_SHCOMPACT)
{
if (!TARGET_SH2A)
{
- warning (OPT_Wattributes, "%qs attribute only applies to SH2A",
- IDENTIFIER_POINTER (name));
+ warning (OPT_Wattributes, "%qE attribute only applies to SH2A",
+ name);
*no_add_attrs = true;
}
else if (TREE_CODE (*node) != FUNCTION_DECL)
{
- warning (OPT_Wattributes, "%qs attribute only applies to functions",
- IDENTIFIER_POINTER (name));
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
*no_add_attrs = true;
}
else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
{
/* The argument must be a constant integer. */
warning (OPT_Wattributes,
- "`%s' attribute argument not an integer constant",
- IDENTIFIER_POINTER (name));
+ "%qE attribute argument not an integer constant",
+ name);
*no_add_attrs = true;
}
else if (TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
{
/* The argument value must be between 0 to 255. */
warning (OPT_Wattributes,
- "`%s' attribute argument should be between 0 to 255",
- IDENTIFIER_POINTER (name));
+ "%qE attribute argument should be between 0 to 255",
+ name);
*no_add_attrs = true;
}
return NULL_TREE;
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
- warning (OPT_Wattributes, "%qs attribute only applies to functions",
- IDENTIFIER_POINTER (name));
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
*no_add_attrs = true;
}
else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
{
/* The argument must be a constant string. */
- warning (OPT_Wattributes, "%qs attribute argument not a string constant",
- IDENTIFIER_POINTER (name));
+ warning (OPT_Wattributes, "%qE attribute argument not a string constant",
+ name);
*no_add_attrs = true;
}
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
- warning (OPT_Wattributes, "%qs attribute only applies to functions",
- IDENTIFIER_POINTER (name));
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
*no_add_attrs = true;
}
/* The argument specifies a trap number to be used in a trapa instruction
else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
{
/* The argument must be a constant integer. */
- warning (OPT_Wattributes, "%qs attribute argument not an "
- "integer constant", IDENTIFIER_POINTER (name));
+ warning (OPT_Wattributes, "%qE attribute argument not an "
+ "integer constant", name);
*no_add_attrs = true;
}
return REAL_VALUES_EQUAL (r, dconst1);
}
-/* For -m4 and -m4-single-only, mode switching is used. If we are
+/* In general mode switching is used. If we are
compiling without -mfmovd, movsf_ie isn't taken into account for
mode switching. We could check in machine_dependent_reorg for
cases where we know we are in single precision mode, but there is
int
fldi_ok (void)
{
- return ! TARGET_SH4 || TARGET_FMOVD || reload_completed;
+ return 1;
}
int
}
/* Return the TLS type for TLS symbols, 0 for otherwise. */
-int
+enum tls_model
tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
if (GET_CODE (op) != SYMBOL_REF)
- return 0;
+ return TLS_MODEL_NONE;
return SYMBOL_REF_TLS_MODEL (op);
}
\f
case. Disregard the case where this is a store to memory, since
we are checking a register used in the store address. */
set = single_set (insn);
- if (set && GET_CODE (SET_DEST (set)) != MEM
+ if (set && !MEM_P (SET_DEST (set))
&& reg_overlap_mentioned_p (reg, SET_DEST (set)))
return 1;
rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
rtx set = single_set (this_insn);
- if (GET_CODE (this_insn) == CALL_INSN)
+ if (CALL_P (this_insn))
code = CALL_INSN;
- else if (GET_CODE (this_insn) == JUMP_INSN)
+ else if (JUMP_P (this_insn))
{
if (INSN_ANNULLED_BRANCH_P (this_insn))
return 0;
return 0;
if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
{
- if (GET_CODE (SET_DEST (set)) != MEM)
+ if (!MEM_P (SET_DEST (set)))
retval = 1;
else
return 0;
if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
return 0;
if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
- return GET_CODE (SET_DEST (set)) != MEM;
+ return !MEM_P (SET_DEST (set));
if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
return 0;
t = build_index_type (integer_one_node);
t = build_array_type (integer_type_node, t);
- t = build_decl (VAR_DECL, get_identifier ("__fpscr_values"), t);
+ t = build_decl (BUILTINS_LOCATION,
+ VAR_DECL, get_identifier ("__fpscr_values"), t);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_EXTERNAL (t) = 1;
void
fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
{
- enum attr_fp_mode fp_mode = mode;
+ enum attr_fp_mode fp_mode = (enum attr_fp_mode) mode;
enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
rtx addr_reg;
{
/* Instructions with unfilled delay slots take up an extra two bytes for
the nop in the delay slot. */
- if (((GET_CODE (insn) == INSN
+ if (((NONJUMP_INSN_P (insn)
&& GET_CODE (PATTERN (insn)) != USE
&& GET_CODE (PATTERN (insn)) != CLOBBER)
- || GET_CODE (insn) == CALL_INSN
- || (GET_CODE (insn) == JUMP_INSN
+ || CALL_P (insn)
+ || (JUMP_P (insn)
&& GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
&& GET_CODE (PATTERN (insn)) != ADDR_VEC))
&& GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
/* SH2e has a bug that prevents the use of annulled branches, so if
the delay slot is not filled, we'll have to put a NOP in it. */
- if (sh_cpu == CPU_SH2E
- && GET_CODE (insn) == JUMP_INSN
+ if (sh_cpu_attr == CPU_SH2E
+ && JUMP_P (insn)
&& GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
&& GET_CODE (PATTERN (insn)) != ADDR_VEC
&& get_attr_type (insn) == TYPE_CBRANCH
/* sh-dsp parallel processing insn take four bytes instead of two. */
- if (GET_CODE (insn) == INSN)
+ if (NONJUMP_INSN_P (insn))
{
int sum = 0;
rtx body = PATTERN (insn);
return 0;
}
\f
+/* Return TRUE for a valid displacement for the REG+disp addressing
+ with MODE. */
+
+/* ??? The SH2e does not have the REG+disp addressing mode when loading values
+ into the FRx registers. We implement this by setting the maximum offset
+ to zero when the value is SFmode. This also restricts loading of SFmode
+ values into the integer registers, but that can't be helped. */
+
+/* The SH allows a displacement in a QI or HI amode, but only when the
+ other operand is R0. GCC doesn't handle this very well, so we forgot
+ all of that.
+
+ A legitimate index for a QI or HI is 0, SI can be any number 0..63,
+ DI can be any number 0..60. */
+
+bool
+sh_legitimate_index_p (enum machine_mode mode, rtx op)
+{
+ if (CONST_INT_P (op))
+ {
+ if (TARGET_SHMEDIA)
+ {
+ int size;
+
+ /* Check if this the address of an unaligned load / store. */
+ if (mode == VOIDmode)
+ return CONST_OK_FOR_I06 (INTVAL (op));
+
+ size = GET_MODE_SIZE (mode);
+ return (!(INTVAL (op) & (size - 1))
+ && INTVAL (op) >= -512 * size
+ && INTVAL (op) < 512 * size);
+ }
+
+ if (TARGET_SH2A)
+ {
+ if (GET_MODE_SIZE (mode) == 1
+ && (unsigned) INTVAL (op) < 4096)
+ return true;
+ }
+
+ if ((GET_MODE_SIZE (mode) == 4
+ && (unsigned) INTVAL (op) < 64
+ && !(INTVAL (op) & 3)
+ && !(TARGET_SH2E && mode == SFmode))
+ || (GET_MODE_SIZE (mode) == 4
+ && (unsigned) INTVAL (op) < 16383
+ && !(INTVAL (op) & 3) && TARGET_SH2A))
+ return true;
+
+ if ((GET_MODE_SIZE (mode) == 8
+ && (unsigned) INTVAL (op) < 60
+ && !(INTVAL (op) & 3)
+ && !((TARGET_SH4 || TARGET_SH2A) && mode == DFmode))
+ || ((GET_MODE_SIZE (mode)==8)
+ && (unsigned) INTVAL (op) < 8192
+ && !(INTVAL (op) & (TARGET_SH2A_DOUBLE ? 7 : 3))
+ && (TARGET_SH2A && mode == DFmode)))
+ return true;
+ }
+
+ return false;
+}
+
+/* Recognize an RTL expression that is a valid memory address for
+ an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+ Allow REG
+ REG+disp
+ REG+r0
+ REG++
+ --REG */
+
+static bool
+sh_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
+{
+ if (MAYBE_BASE_REGISTER_RTX_P (x, strict))
+ return true;
+ else if ((GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
+ && ! TARGET_SHMEDIA
+ && MAYBE_BASE_REGISTER_RTX_P (XEXP (x, 0), strict))
+ return true;
+ else if (GET_CODE (x) == PLUS
+ && (mode != PSImode || reload_completed))
+ {
+ rtx xop0 = XEXP (x, 0);
+ rtx xop1 = XEXP (x, 1);
+
+ if (GET_MODE_SIZE (mode) <= 8
+ && MAYBE_BASE_REGISTER_RTX_P (xop0, strict)
+ && sh_legitimate_index_p (mode, xop1))
+ return true;
+
+ if ((ALLOW_INDEXED_ADDRESS || GET_MODE (x) == DImode
+ || ((xop0 == stack_pointer_rtx
+ || xop0 == hard_frame_pointer_rtx)
+ && REG_P (xop1) && REGNO (xop1) == R0_REG)
+ || ((xop1 == stack_pointer_rtx
+ || xop1 == hard_frame_pointer_rtx)
+ && REG_P (xop0) && REGNO (xop0) == R0_REG))
+ && ((!TARGET_SHMEDIA && GET_MODE_SIZE (mode) <= 4)
+ || (TARGET_SHMEDIA && GET_MODE_SIZE (mode) <= 8)
+ || ((TARGET_SH4 || TARGET_SH2A_DOUBLE)
+ && TARGET_FMOVD && mode == DFmode)))
+ {
+ if (MAYBE_BASE_REGISTER_RTX_P (xop1, strict)
+ && MAYBE_INDEX_REGISTER_RTX_P (xop0, strict))
+ return true;
+ if (MAYBE_INDEX_REGISTER_RTX_P (xop1, strict)
+ && MAYBE_BASE_REGISTER_RTX_P (xop0, strict))
+ return true;
+ }
+ }
+
+ return false;
+}
+\f
/* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
isn't protected by a PIC unspec. */
int
legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
rtx reg)
{
- if (tls_symbolic_operand (orig, Pmode))
+ if (tls_symbolic_operand (orig, Pmode) != TLS_MODEL_NONE)
return orig;
if (GET_CODE (orig) == LABEL_REF
return orig;
}
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ Otherwise, return X.
+
+ For the SH, if X is almost suitable for indexing, but the offset is
+ out of range, convert it into a normal form so that CSE has a chance
+ of reducing the number of address registers used. */
+
+static rtx
+sh_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
+{
+ if (flag_pic)
+ x = legitimize_pic_address (oldx, mode, NULL_RTX);
+
+ if (GET_CODE (x) == PLUS
+ && (GET_MODE_SIZE (mode) == 4
+ || GET_MODE_SIZE (mode) == 8)
+ && CONST_INT_P (XEXP (x, 1))
+ && BASE_REGISTER_RTX_P (XEXP (x, 0))
+ && ! TARGET_SHMEDIA
+ && ! ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && mode == DFmode)
+ && ! (TARGET_SH2E && mode == SFmode))
+ {
+ rtx index_rtx = XEXP (x, 1);
+ HOST_WIDE_INT offset = INTVAL (index_rtx), offset_base;
+ rtx sum;
+
+ /* On rare occasions, we might get an unaligned pointer
+ that is indexed in a way to give an aligned address.
+ Therefore, keep the lower two bits in offset_base. */
+ /* Instead of offset_base 128..131 use 124..127, so that
+ simple add suffices. */
+ if (offset > 127)
+ offset_base = ((offset + 4) & ~60) - 4;
+ else
+ offset_base = offset & ~60;
+
+ /* Sometimes the normal form does not suit DImode. We
+ could avoid that by using smaller ranges, but that
+ would give less optimized code when SImode is
+ prevalent. */
+ if (GET_MODE_SIZE (mode) + offset - offset_base <= 64)
+ {
+ sum = expand_binop (Pmode, add_optab, XEXP (x, 0),
+ GEN_INT (offset_base), NULL_RTX, 0,
+ OPTAB_LIB_WIDEN);
+
+ return gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - offset_base));
+ }
+ }
+
+ return x;
+}
+
/* Mark the use of a constant in the literal table. If the constant
has multiple labels, make it unique. */
static rtx
lab = x;
for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
{
- if (GET_CODE (insn) != CODE_LABEL
+ if (!LABEL_P (insn)
|| LABEL_REFS (insn) != NEXT_INSN (insn))
break;
lab = insn;
/* Mark constants in a window. */
for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
{
- if (GET_CODE (insn) != INSN)
+ if (!NONJUMP_INSN_P (insn))
continue;
pattern = PATTERN (insn);
}
/* The only input for a call that is timing-critical is the
function's address. */
- if (GET_CODE (insn) == CALL_INSN)
+ if (CALL_P (insn))
{
rtx call = PATTERN (insn);
call = XVECEXP (call, 0 ,0);
if (GET_CODE (call) == SET)
call = SET_SRC (call);
- if (GET_CODE (call) == CALL && GET_CODE (XEXP (call, 0)) == MEM
+ if (GET_CODE (call) == CALL && MEM_P (XEXP (call, 0))
/* sibcalli_thunk uses a symbol_ref in an unspec. */
&& (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
|| ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
return 1;
if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
{
- if (GET_CODE (SET_DEST (x)) == REG)
+ if (REG_P (SET_DEST (x)))
{
if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
return 1;
if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
{
rtx note = XEXP (x, 0);
- if (GET_CODE (note) == REG && GET_MODE (note) == mode)
+ if (REG_P (note) && GET_MODE (note) == mode)
reg_weight--;
}
}
return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
}
-static int
+static enum reg_class
sh_target_reg_class (void)
{
return TARGET_SHMEDIA ? TARGET_REGS : NO_REGS;
|| (!(TARGET_SH4A_ARCH || TARGET_SH4_300) && TARGET_USERMODE))
emit_library_call (function_symbol (NULL, "__ic_invalidate",
FUNCTION_ORDINARY),
- 0, VOIDmode, 1, tramp, SImode);
+ LCT_NORMAL, VOIDmode, 1, tramp, SImode);
else
emit_insn (gen_ic_invalidate_line (tramp));
}
argmode = TYPE_MODE (TREE_TYPE (arg));
if (argmode != opmode)
arg = build1 (NOP_EXPR, optype, arg);
- op[nop] = expand_expr (arg, NULL_RTX, opmode, 0);
+ op[nop] = expand_expr (arg, NULL_RTX, opmode, EXPAND_NORMAL);
if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
op[nop] = copy_to_mode_reg (opmode, op[nop]);
}
emit_insn (gen_binary_sf_op1 (op0, op1, op2, op));
}
+/* Return true if hard register REGNO can hold a value of machine-mode MODE.
+ We can allow any mode in any general register. The special registers
+ only allow SImode. Don't allow any mode in the PR.
+
+ We cannot hold DCmode values in the XD registers because alter_reg
+ handles subregs of them incorrectly. We could work around this by
+ spacing the XD registers like the DR registers, but this would require
+ additional memory in every compilation to hold larger register vectors.
+ We could hold SFmode / SCmode values in XD registers, but that
+ would require a tertiary reload when reloading from / to memory,
+ and a secondary reload to reload from / to general regs; that
+ seems to be a loosing proposition.
+
+ We want to allow TImode FP regs so that when V4SFmode is loaded as TImode,
+ it won't be ferried through GP registers first. */
+
+bool
+sh_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
+{
+ if (SPECIAL_REGISTER_P (regno))
+ return mode == SImode;
+
+ if (regno == FPUL_REG)
+ return (mode == SImode || mode == SFmode);
+
+ if (FP_REGISTER_P (regno) && mode == SFmode)
+ return true;
+
+ if (mode == V2SFmode)
+ {
+ if (((FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 2 == 0)
+ || GENERAL_REGISTER_P (regno)))
+ return true;
+ else
+ return false;
+ }
+
+ if (mode == V4SFmode)
+ {
+ if ((FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 4 == 0)
+ || GENERAL_REGISTER_P (regno))
+ return true;
+ else
+ return false;
+ }
+
+ if (mode == V16SFmode)
+ {
+ if (TARGET_SHMEDIA)
+ {
+ if (FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 16 == 0)
+ return true;
+ else
+ return false;
+ }
+ else
+ return regno == FIRST_XD_REG;
+ }
+
+ if (FP_REGISTER_P (regno))
+ {
+ if (mode == SFmode
+ || mode == SImode
+ || ((TARGET_SH2E || TARGET_SHMEDIA) && mode == SCmode)
+ || ((((TARGET_SH4 || TARGET_SH2A_DOUBLE) && mode == DFmode)
+ || mode == DCmode
+ || (TARGET_SHMEDIA
+ && (mode == DFmode || mode == DImode
+ || mode == V2SFmode || mode == TImode)))
+ && ((regno - FIRST_FP_REG) & 1) == 0)
+ || ((TARGET_SH4 || TARGET_SHMEDIA) && mode == TImode
+ && ((regno - FIRST_FP_REG) & 3) == 0))
+ return true;
+ else
+ return false;
+ }
+
+ if (XD_REGISTER_P (regno))
+ return mode == DFmode;
+
+ if (TARGET_REGISTER_P (regno))
+ return (mode == DImode || mode == SImode || mode == PDImode);
+
+ if (regno == PR_REG)
+ return mode == SImode;
+
+ if (regno == FPSCR_REG)
+ return mode == PSImode;
+
+ /* FIXME. This works around PR target/37633 for -O0. */
+ if (!optimize && TARGET_SHMEDIA32 && GET_MODE_SIZE (mode) > 4)
+ {
+ unsigned int n = GET_MODE_SIZE (mode) / 8;
+
+ if (regno >= FIRST_GENERAL_REG + 10 - n + 1
+ && regno <= FIRST_GENERAL_REG + 14)
+ return false;
+ }
+
+ return true;
+}
+
/* Return the class of registers for which a mode change from FROM to TO
is invalid. */
bool
address = XVECEXP (address, 0, 0);
}
if (GET_CODE (address) == LABEL_REF
- && GET_CODE (XEXP (address, 0)) == CODE_LABEL)
+ && LABEL_P (XEXP (address, 0)))
LABEL_NUSES (XEXP (address, 0)) += nuses;
}
insn_locators_alloc ();
insns = get_insns ();
-#if 0
- if (optimize > 0)
- {
- /* Initialize the bitmap obstacks. */
- bitmap_obstack_initialize (NULL);
- bitmap_obstack_initialize (®_obstack);
- if (! cfun->cfg)
- init_flow ();
- rtl_register_cfg_hooks ();
- init_rtl_bb_info (ENTRY_BLOCK_PTR);
- init_rtl_bb_info (EXIT_BLOCK_PTR);
- ENTRY_BLOCK_PTR->flags |= BB_RTL;
- EXIT_BLOCK_PTR->flags |= BB_RTL;
- find_basic_blocks (insns);
-
- if (flag_schedule_insns_after_reload)
- {
- life_analysis (PROP_FINAL);
-
- split_all_insns (1);
-
- schedule_insns ();
- }
- /* We must split jmp insn in PIC case. */
- else if (flag_pic)
- split_all_insns_noflow ();
- }
-#else
if (optimize > 0)
{
if (! cfun->cfg)
init_flow (cfun);
split_all_insns_noflow ();
}
-#endif
sh_reorg ();
}
int
-sh_expand_t_scc (enum rtx_code code, rtx target)
+sh_expand_t_scc (rtx operands[])
{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx target = operands[0];
+ rtx op0 = operands[2];
+ rtx op1 = operands[3];
rtx result = target;
HOST_WIDE_INT val;
- if (GET_CODE (sh_compare_op0) != REG || REGNO (sh_compare_op0) != T_REG
- || GET_CODE (sh_compare_op1) != CONST_INT)
+ if (!REG_P (op0) || REGNO (op0) != T_REG
+ || !CONST_INT_P (op1))
return 0;
- if (GET_CODE (result) != REG)
+ if (!REG_P (result))
result = gen_reg_rtx (SImode);
- val = INTVAL (sh_compare_op1);
+ val = INTVAL (op1);
if ((code == EQ && val == 1) || (code == NE && val == 0))
emit_insn (gen_movt (result));
else if (TARGET_SH2A && ((code == EQ && val == 0)
|| (code == NE && val == 1)))
- emit_insn (gen_movrt (result));
+ emit_insn (gen_xorsi3_movrt (result));
else if ((code == EQ && val == 0) || (code == NE && val == 1))
{
emit_clobber (result);
/* Search for the sfunc. It should really come right after INSN. */
while ((insn = NEXT_INSN (insn)))
{
- if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
+ if (LABEL_P (insn) || JUMP_P (insn))
break;
if (! INSN_P (insn))
continue;
rtx new_rtx = replace_n_hard_rtx (SUBREG_REG (x), replacements,
n_replacements, modify);
- if (GET_CODE (new_rtx) == CONST_INT)
+ if (CONST_INT_P (new_rtx))
{
x = simplify_subreg (GET_MODE (x), new_rtx,
GET_MODE (SUBREG_REG (x)),
return x;
}
- else if (GET_CODE (x) == REG)
+ else if (REG_P (x))
{
unsigned regno = REGNO (x);
unsigned nregs = (regno < FIRST_PSEUDO_REGISTER
rtx to = replacements[i*2+1];
unsigned from_regno, from_nregs, to_regno, new_regno;
- if (GET_CODE (from) != REG)
+ if (!REG_P (from))
continue;
from_regno = REGNO (from);
from_nregs = (from_regno < FIRST_PSEUDO_REGISTER
{
if (regno < from_regno
|| regno + nregs > from_regno + nregs
- || GET_CODE (to) != REG
+ || !REG_P (to)
|| result)
return NULL_RTX;
to_regno = REGNO (to);
rtx new_rtx = replace_n_hard_rtx (XEXP (x, 0), replacements,
n_replacements, modify);
- if (GET_CODE (new_rtx) == CONST_INT)
+ if (CONST_INT_P (new_rtx))
{
x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
new_rtx, GET_MODE (XEXP (x, 0)));
if (GET_CODE (x) != TRUNCATE)
return 0;
reg = XEXP (x, 0);
- if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && GET_CODE (reg) == REG)
+ if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && REG_P (reg))
{
enum machine_mode reg_mode = GET_MODE (reg);
XEXP (x, 0) = simplify_subreg (DImode, reg, reg_mode,
static int
sh_contains_memref_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
{
- return (GET_CODE (*loc) == MEM);
+ return (MEM_P (*loc));
}
/* Return nonzero iff INSN contains a MEM. */
abort ();
}
if (rclass == FPUL_REGS
- && ((GET_CODE (x) == REG
+ && ((REG_P (x)
&& (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
|| REGNO (x) == T_REG))
|| GET_CODE (x) == PLUS))
return NO_REGS;
}
if (rclass == FPSCR_REGS
- && ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
- || (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS)))
+ && ((REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
+ || (MEM_P (x) && GET_CODE (XEXP (x, 0)) == PLUS)))
return GENERAL_REGS;
if (REGCLASS_HAS_FP_REG (rclass)
&& TARGET_SHMEDIA
} /* end of input-only processing. */
if (((REGCLASS_HAS_FP_REG (rclass)
- && (GET_CODE (x) == REG
+ && (REG_P (x)
&& (GENERAL_OR_AP_REGISTER_P (REGNO (x))
|| (FP_REGISTER_P (REGNO (x)) && mode == SImode
&& TARGET_FMOVD))))
|| (REGCLASS_HAS_GENERAL_REG (rclass)
- && GET_CODE (x) == REG
+ && REG_P (x)
&& FP_REGISTER_P (REGNO (x))))
&& ! TARGET_SHMEDIA
&& (mode == SFmode || mode == SImode))
if ((rclass == FPUL_REGS
|| (REGCLASS_HAS_FP_REG (rclass)
&& ! TARGET_SHMEDIA && mode == SImode))
- && (GET_CODE (x) == MEM
- || (GET_CODE (x) == REG
+ && (MEM_P (x)
+ || (REG_P (x)
&& (REGNO (x) >= FIRST_PSEUDO_REGISTER
|| REGNO (x) == T_REG
|| system_reg_operand (x, VOIDmode)))))
if ((rclass == TARGET_REGS
|| (TARGET_SHMEDIA && rclass == SIBCALL_REGS))
&& !satisfies_constraint_Csy (x)
- && (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x))))
+ && (!REG_P (x) || ! GENERAL_REGISTER_P (REGNO (x))))
return GENERAL_REGS;
if ((rclass == MAC_REGS || rclass == PR_REGS)
- && GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x))
+ && REG_P (x) && ! GENERAL_REGISTER_P (REGNO (x))
&& rclass != REGNO_REG_CLASS (REGNO (x)))
return GENERAL_REGS;
- if (rclass != GENERAL_REGS && GET_CODE (x) == REG
+ if (rclass != GENERAL_REGS && REG_P (x)
&& TARGET_REGISTER_P (REGNO (x)))
return GENERAL_REGS;
return NO_REGS;