/* Output routines for GCC for Renesas / SuperH SH.
Copyright (C) 1993, 1994, 1995, 1997, 1997, 1998, 1999, 2000, 2001, 2002,
- 2003 Free Software Foundation, Inc.
+ 2003, 2004 Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
Improved by Jim Wilson (wilson@cygnus.com).
#include "basic-block.h"
#include "ra.h"
#include "cfglayout.h"
+#include "intl.h"
+#include "sched-int.h"
+#include "ggc.h"
int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
interrupted. */
int pragma_nosave_low_regs;
-/* This is used for communication between SETUP_INCOMING_VARARGS and
+/* This is used for communication between TARGET_SETUP_INCOMING_VARARGS and
sh_expand_prologue. */
int current_function_anonymous_args;
/* Which cpu are we scheduling for. */
enum processor_type sh_cpu;
+/* Definitions used in ready queue reordering for first scheduling pass. */
+
+/* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
+static short *regmode_weight[2];
+
+/* Total SFmode and SImode weights of scheduled insns. */
+static int curr_regmode_pressure[2];
+
+/* If true, skip cycles for Q -> R movement. */
+static int skip_cycles = 0;
+
+/* Cached value of can_issue_more. This is cached in sh_variable_issue hook
+ and returned from sh_reorder2. */
+static short cached_can_issue_more;
+
/* Saved operands from the last compare to use when we generate an scc
or bcc insn. */
static bool shmedia_space_reserved_for_target_registers;
-static void split_branches PARAMS ((rtx));
-static int branch_dest PARAMS ((rtx));
-static void force_into PARAMS ((rtx, rtx));
-static void print_slot PARAMS ((rtx));
-static rtx add_constant PARAMS ((rtx, enum machine_mode, rtx));
-static void dump_table PARAMS ((rtx));
-static int hi_const PARAMS ((rtx));
-static int broken_move PARAMS ((rtx));
-static int mova_p PARAMS ((rtx));
-static rtx find_barrier PARAMS ((int, rtx, rtx));
-static int noncall_uses_reg PARAMS ((rtx, rtx, rtx *));
-static rtx gen_block_redirect PARAMS ((rtx, int, int));
-static void sh_reorg PARAMS ((void));
+static void split_branches (rtx);
+static int branch_dest (rtx);
+static void force_into (rtx, rtx);
+static void print_slot (rtx);
+static rtx add_constant (rtx, enum machine_mode, rtx);
+static void dump_table (rtx);
+static int hi_const (rtx);
+static int broken_move (rtx);
+static int mova_p (rtx);
+static rtx find_barrier (int, rtx, rtx);
+static int noncall_uses_reg (rtx, rtx, rtx *);
+static rtx gen_block_redirect (rtx, int, int);
+static void sh_reorg (void);
static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
-static rtx frame_insn PARAMS ((rtx));
-static rtx push PARAMS ((int));
-static void pop PARAMS ((int));
-static void push_regs PARAMS ((HARD_REG_SET *, int));
-static int calc_live_regs PARAMS ((HARD_REG_SET *));
-static void mark_use PARAMS ((rtx, rtx *));
-static HOST_WIDE_INT rounded_frame_size PARAMS ((int));
-static rtx mark_constant_pool_use PARAMS ((rtx));
+static rtx frame_insn (rtx);
+static rtx push (int);
+static void pop (int);
+static void push_regs (HARD_REG_SET *, int);
+static int calc_live_regs (HARD_REG_SET *);
+static void mark_use (rtx, rtx *);
+static HOST_WIDE_INT rounded_frame_size (int);
+static rtx mark_constant_pool_use (rtx);
const struct attribute_spec sh_attribute_table[];
-static tree sh_handle_interrupt_handler_attribute PARAMS ((tree *, tree, tree, int, bool *));
-static tree sh_handle_sp_switch_attribute PARAMS ((tree *, tree, tree, int, bool *));
-static tree sh_handle_trap_exit_attribute PARAMS ((tree *, tree, tree, int, bool *));
-static tree sh_handle_renesas_attribute PARAMS ((tree *, tree, tree, int, bool *));
-static void sh_output_function_epilogue PARAMS ((FILE *, HOST_WIDE_INT));
-static void sh_insert_attributes PARAMS ((tree, tree *));
-static int sh_adjust_cost PARAMS ((rtx, rtx, rtx, int));
-static int sh_use_dfa_interface PARAMS ((void));
-static int sh_issue_rate PARAMS ((void));
-static bool sh_function_ok_for_sibcall PARAMS ((tree, tree));
-
-static bool sh_cannot_modify_jumps_p PARAMS ((void));
+static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
+static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
+static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
+static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
+static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
+static void sh_insert_attributes (tree, tree *);
+static int sh_adjust_cost (rtx, rtx, rtx, int);
+static int sh_use_dfa_interface (void);
+static int sh_issue_rate (void);
+static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
+static short find_set_regmode_weight (rtx, enum machine_mode);
+static short find_insn_regmode_weight (rtx, enum machine_mode);
+static void find_regmode_weight (int, enum machine_mode);
+static void sh_md_init_global (FILE *, int, int);
+static void sh_md_finish_global (FILE *, int);
+static int rank_for_reorder (const void *, const void *);
+static void swap_reorder (rtx *, int);
+static void ready_reorder (rtx *, int);
+static short high_pressure (enum machine_mode);
+static int sh_reorder (FILE *, int, rtx *, int *, int);
+static int sh_reorder2 (FILE *, int, rtx *, int *, int);
+static void sh_md_init (FILE *, int, int);
+static int sh_variable_issue (FILE *, int, rtx, int);
+
+static bool sh_function_ok_for_sibcall (tree, tree);
+
+static bool sh_cannot_modify_jumps_p (void);
static int sh_target_reg_class (void);
static bool sh_optimize_target_register_callee_saved (bool);
-static bool sh_ms_bitfield_layout_p PARAMS ((tree));
-
-static void sh_init_builtins PARAMS ((void));
-static void sh_media_init_builtins PARAMS ((void));
-static rtx sh_expand_builtin PARAMS ((tree, rtx, rtx, enum machine_mode, int));
-static void sh_output_mi_thunk PARAMS ((FILE *, tree, HOST_WIDE_INT,
- HOST_WIDE_INT, tree));
-static void sh_file_start PARAMS ((void));
-static int flow_dependent_p PARAMS ((rtx, rtx));
-static void flow_dependent_p_1 PARAMS ((rtx, rtx, void *));
-static int shiftcosts PARAMS ((rtx));
-static int andcosts PARAMS ((rtx));
-static int addsubcosts PARAMS ((rtx));
-static int multcosts PARAMS ((rtx));
-static bool unspec_caller_rtx_p PARAMS ((rtx));
-static bool sh_cannot_copy_insn_p PARAMS ((rtx));
-static bool sh_rtx_costs PARAMS ((rtx, int, int, int *));
-static int sh_address_cost PARAMS ((rtx));
+static bool sh_ms_bitfield_layout_p (tree);
+
+static void sh_init_builtins (void);
+static void sh_media_init_builtins (void);
+static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
+static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
+static void sh_file_start (void);
+static int flow_dependent_p (rtx, rtx);
+static void flow_dependent_p_1 (rtx, rtx, void *);
+static int shiftcosts (rtx);
+static int andcosts (rtx);
+static int addsubcosts (rtx);
+static int multcosts (rtx);
+static bool unspec_caller_rtx_p (rtx);
+static bool sh_cannot_copy_insn_p (rtx);
+static bool sh_rtx_costs (rtx, int, int, int *);
+static int sh_address_cost (rtx);
static int shmedia_target_regs_stack_space (HARD_REG_SET *);
static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
struct save_schedule_s *, int);
-static bool sh_promote_prototypes PARAMS ((tree));
-static rtx sh_struct_value_rtx PARAMS ((tree, int));
-static bool sh_return_in_memory PARAMS ((tree, tree));
-static rtx sh_builtin_saveregs PARAMS ((void));
-static void sh_setup_incoming_varargs PARAMS ((CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int));
-static bool sh_strict_argument_naming PARAMS ((CUMULATIVE_ARGS *));
-static bool sh_pretend_outgoing_varargs_named PARAMS ((CUMULATIVE_ARGS *));
+static rtx sh_struct_value_rtx (tree, int);
+static bool sh_return_in_memory (tree, tree);
+static rtx sh_builtin_saveregs (void);
+static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
+static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
+static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
+static tree sh_build_builtin_va_list (void);
\f
/* Initialize the GCC target structure. */
#undef TARGET_SCHED_ISSUE_RATE
#define TARGET_SCHED_ISSUE_RATE sh_issue_rate
+/* The next 5 hooks have been implemented for reenabling sched1. With the
+ help of these macros we are limiting the movement of insns in sched1 to
+ reduce the register pressure. The overall idea is to keep count of SImode
+ and SFmode regs required by already scheduled insns. When these counts
+ cross some threshold values; give priority to insns that free registers.
+ The insn that frees registers is most likely to be the insn with lowest
+ LUID (original insn order); but such an insn might be there in the stalled
+ queue (Q) instead of the ready queue (R). To solve this, we skip cycles
+ upto a max of 8 cycles so that such insns may move from Q -> R.
+
+ The description of the hooks are as below:
+
+ TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
+ scheduler; it is called inside the sched_init function just after
+ find_insn_reg_weights function call. It is used to calculate the SImode
+ and SFmode weights of insns of basic blocks; much similiar to what
+ find_insn_reg_weights does.
+ TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
+
+ TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
+ indicated by TARGET_SCHED_REORDER2; doing this may move insns from
+ (Q)->(R).
+
+ TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
+ high; reorder the ready queue so that the insn with lowest LUID will be
+ issued next.
+
+ TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
+ TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
+
+ TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
+ can be returned from TARGET_SCHED_REORDER2.
+
+ TARGET_SCHED_INIT: Reset the register pressure counting variables. */
+
+#undef TARGET_SCHED_DFA_NEW_CYCLE
+#define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
+
+#undef TARGET_SCHED_INIT_GLOBAL
+#define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
+
+#undef TARGET_SCHED_FINISH_GLOBAL
+#define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
+
+#undef TARGET_SCHED_VARIABLE_ISSUE
+#define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
+
+#undef TARGET_SCHED_REORDER
+#define TARGET_SCHED_REORDER sh_reorder
+
+#undef TARGET_SCHED_REORDER2
+#define TARGET_SCHED_REORDER2 sh_reorder2
+
+#undef TARGET_SCHED_INIT
+#define TARGET_SCHED_INIT sh_md_init
+
#undef TARGET_CANNOT_MODIFY_JUMPS_P
#define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
#undef TARGET_BRANCH_TARGET_REGISTER_CLASS
#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
+
+#undef TARGET_PCH_VALID_P
+#define TARGET_PCH_VALID_P sh_pch_valid_p
+
+/* Return regmode weight for insn. */
+#define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
+
+/* Return current register pressure for regmode. */
+#define CURR_REGMODE_PRESSURE(MODE) curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
/* Print the operand address in x to the stream. */
void
-print_operand_address (stream, x)
- FILE *stream;
- rtx x;
+print_operand_address (FILE *stream, rtx x)
{
switch (GET_CODE (x))
{
'o' output an operator. */
void
-print_operand (stream, x, code)
- FILE *stream;
- rtx x;
- int code;
+print_operand (FILE *stream, rtx x, int code)
{
switch (code)
{
\f
/* Like force_operand, but guarantees that VALUE ends up in TARGET. */
static void
-force_into (value, target)
- rtx value, target;
+force_into (rtx value, rtx target)
{
value = force_operand (value, target);
if (! rtx_equal_p (value, target))
OPERANDS[3] is the alignment safe to use. */
int
-expand_block_move (operands)
- rtx *operands;
+expand_block_move (rtx *operands)
{
int align = INTVAL (operands[3]);
int constp = (GET_CODE (operands[2]) == CONST_INT);
tree entry_name;
rtx sym;
rtx func_addr_rtx;
- rtx r4 = gen_rtx (REG, SImode, 4);
- rtx r5 = gen_rtx (REG, SImode, 5);
+ rtx r4 = gen_rtx_REG (SImode, 4);
+ rtx r5 = gen_rtx_REG (SImode, 5);
entry_name = get_identifier ("__movstrSI12_i4");
rtx sym;
rtx func_addr_rtx;
int dwords;
- rtx r4 = gen_rtx (REG, SImode, 4);
- rtx r5 = gen_rtx (REG, SImode, 5);
- rtx r6 = gen_rtx (REG, SImode, 6);
+ rtx r4 = gen_rtx_REG (SImode, 4);
+ rtx r5 = gen_rtx_REG (SImode, 5);
+ rtx r6 = gen_rtx_REG (SImode, 6);
entry_name = get_identifier (bytes & 4
? "__movstr_i4_odd"
operands must be in a register. */
int
-prepare_move_operands (operands, mode)
- rtx operands[];
- enum machine_mode mode;
+prepare_move_operands (rtx operands[], enum machine_mode mode)
{
if ((mode == SImode || mode == DImode)
&& flag_pic
{
case TLS_MODEL_GLOBAL_DYNAMIC:
tga_ret = gen_rtx_REG (Pmode, R0_REG);
- emit_insn (gen_tls_global_dynamic (tga_ret, op1));
+ emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
op1 = tga_ret;
break;
case TLS_MODEL_LOCAL_DYNAMIC:
tga_ret = gen_rtx_REG (Pmode, R0_REG);
- emit_insn (gen_tls_local_dynamic (tga_ret, op1));
+ emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
tmp = gen_reg_rtx (Pmode);
emit_move_insn (tmp, tga_ret);
/* Prepare the operands for an scc instruction; make sure that the
compare has been done. */
rtx
-prepare_scc_operands (code)
- enum rtx_code code;
+prepare_scc_operands (enum rtx_code code)
{
rtx t_reg = gen_rtx_REG (SImode, T_REG);
enum rtx_code oldcode = code;
if (TARGET_SH4 && GET_MODE_CLASS (mode) == MODE_FLOAT)
(mode == SFmode ? emit_sf_insn : emit_df_insn)
- (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
- gen_rtx (SET, VOIDmode, t_reg,
- gen_rtx (code, SImode,
- sh_compare_op0, sh_compare_op1)),
- gen_rtx (USE, VOIDmode, get_fpscr_rtx ()))));
+ (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, t_reg,
+ gen_rtx_fmt_ee (code, SImode,
+ sh_compare_op0, sh_compare_op1)),
+ gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
else
- emit_insn (gen_rtx (SET, VOIDmode, t_reg,
- gen_rtx (code, SImode, sh_compare_op0,
- sh_compare_op1)));
+ emit_insn (gen_rtx_SET (VOIDmode, t_reg,
+ gen_rtx_fmt_ee (code, SImode,
+ sh_compare_op0, sh_compare_op1)));
return t_reg;
}
/* Called from the md file, set up the operands of a compare instruction. */
void
-from_compare (operands, code)
- rtx *operands;
- int code;
+from_compare (rtx *operands, int code)
{
enum machine_mode mode = GET_MODE (sh_compare_op0);
rtx insn;
else
insn = gen_rtx_SET (VOIDmode,
gen_rtx_REG (SImode, T_REG),
- gen_rtx (code, SImode, sh_compare_op0,
- sh_compare_op1));
+ gen_rtx_fmt_ee (code, SImode,
+ sh_compare_op0, sh_compare_op1));
if (TARGET_SH4 && GET_MODE_CLASS (mode) == MODE_FLOAT)
{
- insn = gen_rtx (PARALLEL, VOIDmode,
+ insn = gen_rtx_PARALLEL (VOIDmode,
gen_rtvec (2, insn,
- gen_rtx (USE, VOIDmode, get_fpscr_rtx ())));
+ gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
(mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
}
else
to take care when we see overlapping source and dest registers. */
const char *
-output_movedouble (insn, operands, mode)
- rtx insn ATTRIBUTE_UNUSED;
- rtx operands[];
- enum machine_mode mode;
+output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
+ enum machine_mode mode)
{
rtx dst = operands[0];
rtx src = operands[1];
into a sequence where putting the slot insn at the end wouldn't work. */
static void
-print_slot (insn)
- rtx insn;
+print_slot (rtx insn)
{
- final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 0, 1);
+ final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 0, 1, NULL);
INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
}
const char *
-output_far_jump (insn, op)
- rtx insn;
- rtx op;
+output_far_jump (rtx insn, rtx op)
{
struct { rtx lab, reg, op; } this;
rtx braf_base_lab = NULL_RTX;
/* Output code for ordinary branches. */
const char *
-output_branch (logic, insn, operands)
- int logic;
- rtx insn;
- rtx *operands;
+output_branch (int logic, rtx insn, rtx *operands)
{
switch (get_attr_length (insn))
{
output_asm_insn ("bra\t%l0", &op0);
fprintf (asm_out_file, "\tnop\n");
- (*targetm.asm_out.internal_label)(asm_out_file, "LF", label);
+ (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
return "";
}
}
const char *
-output_branchy_insn (code, template, insn, operands)
- enum rtx_code code;
- const char *template;
- rtx insn;
- rtx *operands;
+output_branchy_insn (enum rtx_code code, const char *template,
+ rtx insn, rtx *operands)
{
rtx next_insn = NEXT_INSN (insn);
}
const char *
-output_ieee_ccmpeq (insn, operands)
- rtx insn, *operands;
+output_ieee_ccmpeq (rtx insn, rtx *operands)
{
return output_branchy_insn (NE, "bt\t%l9\\;fcmp/eq\t%1,%0", insn, operands);
}
/* Output the start of the assembler file. */
static void
-sh_file_start ()
+sh_file_start (void)
{
default_file_start ();
if (TARGET_ELF)
/* We need to show the text section with the proper
attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
- emits it without attributes in TEXT_SECTION, else GAS
+ emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
will complain. We can teach GAS specifically about the
default attributes for our choice of text section, but
then we would have to change GAS again if/when we change
/* Check if PAT includes UNSPEC_CALLER unspec pattern. */
static bool
-unspec_caller_rtx_p (pat)
- rtx pat;
+unspec_caller_rtx_p (rtx pat)
{
switch (GET_CODE (pat))
{
that generates an unique label. */
static bool
-sh_cannot_copy_insn_p (insn)
- rtx insn;
+sh_cannot_copy_insn_p (rtx insn)
{
rtx pat;
of arbitrary constant shift instructions. */
int
-shift_insns_rtx (insn)
- rtx insn;
+shift_insns_rtx (rtx insn)
{
rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
int shift_count = INTVAL (XEXP (set_src, 1));
case ASHIFT:
return shift_insns[shift_count];
default:
- abort();
+ abort ();
}
}
/* Return the cost of a shift. */
static inline int
-shiftcosts (x)
- rtx x;
+shiftcosts (rtx x)
{
int value;
/* Return the cost of an AND operation. */
static inline int
-andcosts (x)
- rtx x;
+andcosts (rtx x)
{
int i;
/* Return the cost of an addition or a subtraction. */
static inline int
-addsubcosts (x)
- rtx x;
+addsubcosts (rtx x)
{
/* Adding a register is a single cycle insn. */
if (GET_CODE (XEXP (x, 1)) == REG
/* Fall through. */
default:
- return 5;
+ return 5;
}
/* Any other constant requires a 2 cycle pc-relative load plus an
/* Return the cost of a multiply. */
static inline int
-multcosts (x)
- rtx x ATTRIBUTE_UNUSED;
+multcosts (rtx x ATTRIBUTE_UNUSED)
{
if (TARGET_SHMEDIA)
return 3;
scanned. In either case, *TOTAL contains the cost result. */
static bool
-sh_rtx_costs (x, code, outer_code, total)
- rtx x;
- int code, outer_code, *total;
+sh_rtx_costs (rtx x, int code, int outer_code, int *total)
{
switch (code)
{
since it increases pressure on r0. */
static int
-sh_address_cost (X)
- rtx X;
+sh_address_cost (rtx X)
{
return (GET_CODE (X) == PLUS
&& ! CONSTANT_P (XEXP (X, 1))
/* Code to expand a shift. */
void
-gen_ashift (type, n, reg)
- int type;
- int n;
- rtx reg;
+gen_ashift (int type, int n, rtx reg)
{
/* Negative values here come from the shift_amounts array. */
if (n < 0)
/* Same for HImode */
void
-gen_ashift_hi (type, n, reg)
- int type;
- int n;
- rtx reg;
+gen_ashift_hi (int type, int n, rtx reg)
{
/* Negative values here come from the shift_amounts array. */
if (n < 0)
shift instructions. */
void
-gen_shifty_op (code, operands)
- int code;
- rtx *operands;
+gen_shifty_op (int code, rtx *operands)
{
int value = INTVAL (operands[2]);
int max, i;
matter. */
void
-gen_shifty_hi_op (code, operands)
- int code;
- rtx *operands;
+gen_shifty_hi_op (int code, rtx *operands)
{
int value = INTVAL (operands[2]);
int max, i;
- void (*gen_fun) PARAMS ((int, int, rtx));
+ void (*gen_fun) (int, int, rtx);
/* This operation is used by and_shl for SImode values with a few
high bits known to be cleared. */
/* ??? Rewrite to use super-optimizer sequences. */
int
-expand_ashiftrt (operands)
- rtx *operands;
+expand_ashiftrt (rtx *operands)
{
rtx sym;
rtx wrk;
}
int
-sh_dynamicalize_shift_p (count)
- rtx count;
+sh_dynamicalize_shift_p (rtx count)
{
return shift_insns[INTVAL (count)] > 1 + SH_DYNAMIC_SHIFT_COST;
}
shift_amounts for the last shift value that is to be used before the
sign extend. */
int
-shl_and_kind (left_rtx, mask_rtx, attrp)
- rtx left_rtx, mask_rtx;
- int *attrp;
+shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
{
unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
int left = INTVAL (left_rtx), right;
mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
else
mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
- /* Can this be expressed as a right shift / left shift pair ? */
+ /* Can this be expressed as a right shift / left shift pair? */
lsb = ((mask ^ (mask - 1)) >> 1) + 1;
right = exact_log2 (lsb);
mask2 = ~(mask + lsb - 1);
int late_right = exact_log2 (lsb2);
best_cost = shift_insns[left + late_right] + shift_insns[late_right];
}
- /* Try to use zero extend */
+ /* Try to use zero extend. */
if (mask2 == ~(lsb2 - 1))
{
int width, first;
for (width = 8; width <= 16; width += 8)
{
- /* Can we zero-extend right away? */
- if (lsb2 == (unsigned HOST_WIDE_INT)1 << width)
+ /* Can we zero-extend right away? */
+ if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
{
cost
= 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
best_len = cost;
if (attrp)
attrp[2] = first;
- }
+ }
}
}
}
}
}
/* Try to use a scratch register to hold the AND operand. */
- can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT)3 << 30)) == 0;
+ can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
for (i = 0; i <= 2; i++)
{
if (i > right)
/* This is used in length attributes of the unnamed instructions
corresponding to shl_and_kind return values of 1 and 2. */
int
-shl_and_length (insn)
- rtx insn;
+shl_and_length (rtx insn)
{
rtx set_src, left_rtx, mask_rtx;
int attributes[3];
/* This is used in length attribute of the and_shl_scratch instruction. */
int
-shl_and_scr_length (insn)
- rtx insn;
+shl_and_scr_length (rtx insn)
{
rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
int len = shift_insns[INTVAL (XEXP (set_src, 1))];
method of generating them, i.e. returned zero. */
int
-gen_shl_and (dest, left_rtx, mask_rtx, source)
- rtx dest, left_rtx, mask_rtx, source;
+gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
{
int attributes[3];
unsigned HOST_WIDE_INT mask;
int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
int right, total_shift;
- void (*shift_gen_fun) PARAMS ((int, rtx*)) = gen_shifty_hi_op;
+ void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
right = attributes[0];
total_shift = INTVAL (left_rtx) + right;
if (first < 0)
{
emit_insn ((mask << right) <= 0xff
- ? gen_zero_extendqisi2(dest,
- gen_lowpart (QImode, source))
- : gen_zero_extendhisi2(dest,
- gen_lowpart (HImode, source)));
+ ? gen_zero_extendqisi2 (dest,
+ gen_lowpart (QImode, source))
+ : gen_zero_extendhisi2 (dest,
+ gen_lowpart (HImode, source)));
source = dest;
}
if (source != dest)
}
if (first >= 0)
emit_insn (mask <= 0xff
- ? gen_zero_extendqisi2(dest, gen_lowpart (QImode, dest))
- : gen_zero_extendhisi2(dest, gen_lowpart (HImode, dest)));
+ ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
+ : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
if (total_shift > 0)
{
operands[2] = GEN_INT (total_shift);
/* If the topmost bit that matters is set, set the topmost bits
that don't matter. This way, we might be able to get a shorter
signed constant. */
- if (mask & ((HOST_WIDE_INT)1 << (31 - total_shift)))
- mask |= (HOST_WIDE_INT)~0 << (31 - total_shift);
+ if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
+ mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
case 2:
/* Don't expand fine-grained when combining, because that will
make the pattern fail. */
If COSTP is nonzero, assign the calculated cost to *COSTP. */
int
-shl_sext_kind (left_rtx, size_rtx, costp)
- rtx left_rtx, size_rtx;
- int *costp;
+shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
{
int left, size, insize, ext;
int cost = 0, best_cost;
implementing this pattern. */
int
-shl_sext_length (insn)
- rtx insn;
+shl_sext_length (rtx insn)
{
rtx set_src, left_rtx, size_rtx;
int cost;
/* Generate rtl for this pattern */
int
-gen_shl_sext (dest, left_rtx, size_rtx, source)
- rtx dest, left_rtx, size_rtx, source;
+gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
{
int kind;
int left, size, insize, cost;
gen_shifty_hi_op (ASHIFT, operands);
}
emit_insn (kind & 1
- ? gen_extendqisi2(dest, gen_lowpart (QImode, dest))
- : gen_extendhisi2(dest, gen_lowpart (HImode, dest)));
+ ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
+ : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
if (kind <= 2)
{
if (shift2)
{
operands[2] = GEN_INT (shift2 + 1);
gen_shifty_op (ASHIFT, operands);
- operands[2] = GEN_INT (1);
+ operands[2] = const1_rtx;
gen_shifty_op (ASHIFTRT, operands);
break;
}
operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
gen_shifty_op (ASHIFT, operands);
if (kind == 7)
- emit_insn (gen_ashrsi3_k (dest, dest, GEN_INT (1)));
+ emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
break;
default:
return -1;
/* Prefix a symbol_ref name with "datalabel". */
rtx
-gen_datalabel_ref (sym)
- rtx sym;
+gen_datalabel_ref (rtx sym)
{
if (GET_CODE (sym) == LABEL_REF)
return gen_rtx_CONST (GET_MODE (sym),
/* Add a constant to the pool and return its label. */
static rtx
-add_constant (x, mode, last_value)
- rtx x;
- enum machine_mode mode;
- rtx last_value;
+add_constant (rtx x, enum machine_mode mode, rtx last_value)
{
int i;
rtx lab, new, ref, newref;
/* Output the literal table. */
static void
-dump_table (scan)
- rtx scan;
+dump_table (rtx scan)
{
int i;
int need_align = 1;
{
lab = XEXP (ref, 0);
emit_insn_before (gen_consttable_window_end (lab),
- align_insn);
+ align_insn);
}
delete_insn (align_insn);
align_insn = NULL_RTX;
mov.w instead of a mov.l. */
static int
-hi_const (src)
- rtx src;
+hi_const (rtx src)
{
return (GET_CODE (src) == CONST_INT
&& INTVAL (src) >= -32768
need to fix it if the input value is CONST_OK_FOR_I08. */
static int
-broken_move (insn)
- rtx insn;
+broken_move (rtx insn)
{
if (GET_CODE (insn) == INSN)
{
}
static int
-mova_p (insn)
- rtx insn;
+mova_p (rtx insn)
{
return (GET_CODE (insn) == INSN
&& GET_CODE (PATTERN (insn)) == SET
the range. */
static rtx
-find_barrier (num_mova, mova, from)
- int num_mova;
- rtx mova, from;
+find_barrier (int num_mova, rtx mova, rtx from)
{
int count_si = 0;
int count_hi = 0;
the limit is the same, but the alignment requirements
are higher. We may waste up to 4 additional bytes
for alignment, and the DF/DI constant may have
- another SF/SI constant placed before it. */
+ another SF/SI constant placed before it. */
if (TARGET_SHCOMPACT
&& ! found_di
&& (mode == DFmode || mode == DImode))
register is not used anywhere else in this instruction - except as the
destination of a set, return this register; else, return 0. */
rtx
-sfunc_uses_reg (insn)
- rtx insn;
+sfunc_uses_reg (rtx insn)
{
int i;
rtx pattern, part, reg_part, reg;
is set by INSN. */
static int
-noncall_uses_reg (reg, insn, set)
- rtx reg;
- rtx insn;
- rtx *set;
+noncall_uses_reg (rtx reg, rtx insn, rtx *set)
{
rtx pattern, reg2;
registers 0..15, respectively, are used as outputs, or are clobbered.
IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
int
-regs_used (x, is_dest)
- rtx x; int is_dest;
+regs_used (rtx x, int is_dest)
{
enum rtx_code code;
const char *fmt;
If a blocking instruction is made or recognized, return it. */
static rtx
-gen_block_redirect (jump, addr, need_block)
- rtx jump;
- int addr, need_block;
+gen_block_redirect (rtx jump, int addr, int need_block)
{
int dead = 0;
rtx prev = prev_nonnote_insn (jump);
else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
need_block = 0;
}
+ if (GET_CODE (PATTERN (jump)) == RETURN)
+ {
+ if (! need_block)
+ return prev;
+ /* Reorg even does nasty things with return insns that cause branches
+ to go out of range - see find_end_label and callers. */
+ return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
+ }
/* We can't use JUMP_LABEL here because it might be undefined
when not optimizing. */
dest = XEXP (SET_SRC (PATTERN (jump)), 0);
if (INSN_DELETED_P (scan))
continue;
code = GET_CODE (scan);
- if (GET_RTX_CLASS (code) == 'i')
+ if (INSN_P (scan))
{
used |= regs_used (PATTERN (scan), 0);
if (code == CALL_INSN)
int address;
};
-static void gen_far_branch PARAMS ((struct far_branch *));
+static void gen_far_branch (struct far_branch *);
enum mdep_reorg_phase_e mdep_reorg_phase;
static void
-gen_far_branch (bp)
- struct far_branch *bp;
+gen_far_branch (struct far_branch *bp)
{
rtx insn = bp->insert_place;
rtx jump;
JUMP_LABEL (jump) = bp->far_label;
if (! invert_jump (insn, label, 1))
abort ();
- (emit_insn_after
- (gen_stuff_delay_slot
- (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
- GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
- insn));
+ /* If we are branching around a jump (rather than a return), prevent
+ reorg from using an insn from the jump target as the delay slot insn -
+ when reorg did this, it pessimized code (we rather hide the delay slot)
+ and it could cause branches to go out of range. */
+ if (bp->far_label)
+ (emit_insn_after
+ (gen_stuff_delay_slot
+ (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
+ GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
+ insn));
/* Prevent reorg from undoing our splits. */
gen_block_redirect (jump, bp->address += 2, 2);
}
/* Fix up ADDR_DIFF_VECs. */
void
-fixup_addr_diff_vecs (first)
- rtx first;
+fixup_addr_diff_vecs (rtx first)
{
rtx insn;
/* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
a barrier. Return the base 2 logarithm of the desired alignment. */
int
-barrier_align (barrier_or_label)
- rtx barrier_or_label;
+barrier_align (rtx barrier_or_label)
{
rtx next = next_real_insn (barrier_or_label), pat, prev;
int slot, credit, jump_to_next = 0;
the table to the minimum for proper code alignment. */
return ((TARGET_SMALLCODE
|| ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
- <= (unsigned)1 << (CACHE_LOG - 2)))
+ <= (unsigned) 1 << (CACHE_LOG - 2)))
? 1 << TARGET_SHMEDIA : align_jumps_log);
}
Applying loop alignment to small constant or switch tables is a waste
of space, so we suppress this too. */
int
-sh_loop_align (label)
- rtx label;
+sh_loop_align (rtx label)
{
rtx next = label;
scheduling. */
static void
-sh_reorg ()
+sh_reorg (void)
{
rtx first, insn, mova = NULL_RTX;
int num_mova;
if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
{
/* This must be an insn that clobbers r0. */
- rtx clobber = XVECEXP (PATTERN (scan), 0,
- XVECLEN (PATTERN (scan), 0) - 1);
+ rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
+ XVECLEN (PATTERN (scan), 0)
+ - 1);
+ rtx clobber = *clobberp;
if (GET_CODE (clobber) != CLOBBER
|| ! rtx_equal_p (XEXP (clobber, 0), r0_rtx))
}
last_float_move = scan;
last_float = src;
- newsrc = gen_rtx (MEM, mode,
+ newsrc = gen_rtx_MEM (mode,
(((TARGET_SH4 && ! TARGET_FMOVD)
|| REGNO (dst) == FPUL_REG)
? r0_inc_rtx
last_float_addr = &XEXP (newsrc, 0);
/* Remove the clobber of r0. */
- XEXP (clobber, 0) = gen_rtx_SCRATCH (Pmode);
+ *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
+ gen_rtx_SCRATCH (Pmode));
RTX_UNCHANGING_P (newsrc) = 1;
}
/* This is a mova needing a label. Create it. */
}
int
-get_dest_uid (label, max_uid)
- rtx label;
- int max_uid;
+get_dest_uid (rtx label, int max_uid)
{
rtx dest = next_real_insn (label);
int dest_uid;
find branches with common targets more easily. */
static void
-split_branches (first)
- rtx first;
+split_branches (rtx first)
{
rtx insn;
struct far_branch **uid_branch, *far_branch_list = 0;
does not bother to update them. */
void
-final_prescan_insn (insn, opvec, noperands)
- rtx insn;
- rtx *opvec ATTRIBUTE_UNUSED;
- int noperands ATTRIBUTE_UNUSED;
+final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
+ int noperands ATTRIBUTE_UNUSED)
{
if (TARGET_DUMPISIZE)
fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
only be labels. */
const char *
-output_jump_label_table ()
+output_jump_label_table (void)
{
int i;
}
static rtx
-frame_insn (x)
- rtx x;
+frame_insn (rtx x)
{
x = emit_insn (x);
RTX_FRAME_RELATED_P (x) = 1;
/* Output RTL to push register RN onto the stack. */
static rtx
-push (rn)
- int rn;
+push (int rn)
{
rtx x;
if (rn == FPUL_REG)
/* Output RTL to pop register RN from the stack. */
static void
-pop (rn)
- int rn;
+pop (int rn)
{
rtx x;
if (rn == FPUL_REG)
/* Generate code to push the regs specified in the mask. */
static void
-push_regs (mask, interrupt_handler)
- HARD_REG_SET *mask;
- int interrupt_handler;
+push_regs (HARD_REG_SET *mask, int interrupt_handler)
{
int i;
int skip_fpscr = 0;
HARD_REG_SET unsaved;
push (FPSCR_REG);
- COMPL_HARD_REG_SET(unsaved, *mask);
+ COMPL_HARD_REG_SET (unsaved, *mask);
fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
skip_fpscr = 1;
}
if ((! call_used_regs[reg] || interrupt_handler)
&& ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
/* Leave space to save this target register on the stack,
- in case target register allocation wants to use it. */
+ in case target register allocation wants to use it. */
stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
return stack_space;
}
make sure that all the regs it clobbers are safe too. */
static int
-calc_live_regs (live_regs_mask)
- HARD_REG_SET *live_regs_mask;
+calc_live_regs (HARD_REG_SET *live_regs_mask)
{
int reg;
int count;
stack for register saves. Return the frame size, padded
appropriately so that the stack stays properly aligned. */
static HOST_WIDE_INT
-rounded_frame_size (pushed)
- int pushed;
+rounded_frame_size (int pushed)
{
HOST_WIDE_INT size = get_frame_size ();
HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
unchanged along the whole function. We set it up as the return
value in the prologue. */
int
-sh_media_register_for_return ()
+sh_media_register_for_return (void)
{
int regno;
int tr0_used;
&& ! (current_function_needs_context && i == STATIC_CHAIN_REGNUM)
&& ! (current_function_calls_eh_return
&& (i == EH_RETURN_STACKADJ_REGNO
- || ((unsigned)i <= EH_RETURN_DATA_REGNO (0)
- && (unsigned)i >= EH_RETURN_DATA_REGNO (3)))))
+ || ((unsigned) i <= EH_RETURN_DATA_REGNO (0)
+ && (unsigned) i >= EH_RETURN_DATA_REGNO (3)))))
schedule->temps[tmpx++] = i;
entry->reg = -1;
entry->mode = VOIDmode;
}
void
-sh_expand_prologue ()
+sh_expand_prologue (void)
{
HARD_REG_SET live_regs_mask;
int d, i;
}
void
-sh_expand_epilogue ()
+sh_expand_epilogue (void)
{
HARD_REG_SET live_regs_mask;
int d, i;
static int sh_need_epilogue_known = 0;
int
-sh_need_epilogue ()
+sh_need_epilogue (void)
{
if (! sh_need_epilogue_known)
{
TEMP is available as a scratch register, if needed. */
void
-sh_set_return_address (ra, tmp)
- rtx ra, tmp;
+sh_set_return_address (rtx ra, rtx tmp)
{
HARD_REG_SET live_regs_mask;
int d;
/* Clear variables at function end. */
static void
-sh_output_function_epilogue (file, size)
- FILE *file ATTRIBUTE_UNUSED;
- HOST_WIDE_INT size ATTRIBUTE_UNUSED;
+sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
trap_exit = pragma_interrupt = pragma_trapa = pragma_nosave_low_regs = 0;
sh_need_epilogue_known = 0;
}
static rtx
-sh_builtin_saveregs ()
+sh_builtin_saveregs (void)
{
/* First unnamed integer register. */
int first_intreg = current_function_args_info.arg_count[(int) SH_ARG_INT];
mem = gen_rtx_MEM (DFmode, fpregs);
set_mem_alias_set (mem, alias_set);
emit_move_insn (mem,
- gen_rtx (REG, DFmode, BASE_ARG_REG (DFmode) + regno));
+ gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
}
regno = first_floatreg;
if (regno & 1)
{
- emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (- UNITS_PER_WORD)));
+ emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
mem = gen_rtx_MEM (SFmode, fpregs);
set_mem_alias_set (mem, alias_set);
emit_move_insn (mem,
- gen_rtx (REG, SFmode, BASE_ARG_REG (SFmode) + regno
+ gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
- (TARGET_LITTLE_ENDIAN != 0)));
}
}
{
rtx mem;
- emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (- UNITS_PER_WORD)));
+ emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
mem = gen_rtx_MEM (SFmode, fpregs);
set_mem_alias_set (mem, alias_set);
emit_move_insn (mem,
/* Define the `__builtin_va_list' type for the ABI. */
-tree
-sh_build_va_list ()
+static tree
+sh_build_builtin_va_list (void)
{
tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
tree record;
|| TARGET_HITACHI || sh_cfun_attr_renesas_p ())
return ptr_type_node;
- record = make_node (RECORD_TYPE);
+ record = (*lang_hooks.types.make_type) (RECORD_TYPE);
f_next_o = build_decl (FIELD_DECL, get_identifier ("__va_next_o"),
ptr_type_node);
/* Implement `va_start' for varargs and stdarg. */
void
-sh_va_start (valist, nextarg)
- tree valist;
- rtx nextarg;
+sh_va_start (tree valist, rtx nextarg)
{
tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
/* Implement `va_arg'. */
rtx
-sh_va_arg (valist, type)
- tree valist, type;
+sh_va_arg (tree valist, tree type)
{
HOST_WIDE_INT size, rsize;
tree tmp, pptr_type_node;
return result;
}
-static bool
-sh_promote_prototypes (type)
- tree type;
+bool
+sh_promote_prototypes (tree type)
{
if (TARGET_HITACHI)
return 0;
rtx
-sh_function_arg (ca, mode, type, named)
- CUMULATIVE_ARGS *ca;
- enum machine_mode mode;
- tree type;
- int named;
+sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
+ tree type, int named)
{
if (! TARGET_SH5 && mode == VOIDmode)
return GEN_INT (ca->renesas_abi ? 1 : 0);
BASE_ARG_REG (mode)
+ (ROUND_REG (*ca, mode) ^ 1)),
const0_rtx);
- rtx r2 = gen_rtx_EXPR_LIST(VOIDmode,
- gen_rtx_REG (SFmode,
- BASE_ARG_REG (mode)
- + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
- GEN_INT (4));
+ rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SFmode,
+ BASE_ARG_REG (mode)
+ + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
+ GEN_INT (4));
return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
}
available.) */
void
-sh_function_arg_advance (ca, mode, type, named)
- CUMULATIVE_ARGS *ca;
- enum machine_mode mode;
- tree type;
- int named;
-{
- if (ca->force_mem)
- ca->force_mem = 0;
- else if (TARGET_SH5)
- {
- tree type2 = (ca->byref && type
- ? TREE_TYPE (type)
- : type);
- enum machine_mode mode2 = (ca->byref && type
- ? TYPE_MODE (type2)
- : mode);
- int dwords = ((ca->byref
- ? ca->byref
- : mode2 == BLKmode
- ? int_size_in_bytes (type2)
- : GET_MODE_SIZE (mode2)) + 7) / 8;
- int numregs = MIN (dwords, NPARM_REGS (SImode)
- - ca->arg_count[(int) SH_ARG_INT]);
-
- if (numregs)
- {
- ca->arg_count[(int) SH_ARG_INT] += numregs;
- if (TARGET_SHCOMPACT
- && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
- {
- ca->call_cookie
- |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
- - numregs, 1);
- /* N.B. We want this also for outgoing. */
- ca->stack_regs += numregs;
- }
- else if (ca->byref)
- {
- if (! ca->outgoing)
- ca->stack_regs += numregs;
- ca->byref_regs += numregs;
- ca->byref = 0;
- do
- ca->call_cookie
- |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
- - numregs, 2);
- while (--numregs);
- ca->call_cookie
- |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
- - 1, 1);
- }
- else if (dwords > numregs)
- {
- int pushregs = numregs;
-
- if (TARGET_SHCOMPACT)
- ca->stack_regs += numregs;
- while (pushregs < NPARM_REGS (SImode) - 1
- && (CALL_COOKIE_INT_REG_GET
- (ca->call_cookie,
- NPARM_REGS (SImode) - pushregs)
- == 1))
- {
- ca->call_cookie
- &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
- - pushregs, 1);
- pushregs++;
- }
- if (numregs == NPARM_REGS (SImode))
- ca->call_cookie
- |= CALL_COOKIE_INT_REG (0, 1)
- | CALL_COOKIE_STACKSEQ (numregs - 1);
- else
- ca->call_cookie
- |= CALL_COOKIE_STACKSEQ (numregs);
- }
- }
- if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
- && (named || ! ca->prototype_p))
- {
- if (mode2 == SFmode && ca->free_single_fp_reg)
- ca->free_single_fp_reg = 0;
- else if (ca->arg_count[(int) SH_ARG_FLOAT]
- < NPARM_REGS (SFmode))
- {
- int numfpregs
- = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
- NPARM_REGS (SFmode)
- - ca->arg_count[(int) SH_ARG_FLOAT]);
-
- ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
-
- if (TARGET_SHCOMPACT && ! ca->prototype_p)
- {
- if (ca->outgoing && numregs > 0)
- do
- {
- ca->call_cookie
- |= (CALL_COOKIE_INT_REG
- (ca->arg_count[(int) SH_ARG_INT]
- - numregs + ((numfpregs - 2) / 2),
- 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
- - numfpregs) / 2));
- }
- while (numfpregs -= 2);
- }
- else if (mode2 == SFmode && (named)
- && (ca->arg_count[(int) SH_ARG_FLOAT]
- < NPARM_REGS (SFmode)))
- ca->free_single_fp_reg
- = FIRST_FP_PARM_REG - numfpregs
- + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
- }
- }
- return;
- }
-
- if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
- {
- /* Note that we've used the skipped register. */
- if (mode == SFmode && ca->free_single_fp_reg)
- {
- ca->free_single_fp_reg = 0;
- return;
- }
- /* When we have a DF after an SF, there's an SF register that get
- skipped in order to align the DF value. We note this skipped
- register, because the next SF value will use it, and not the
- SF that follows the DF. */
- if (mode == DFmode
- && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
- {
- ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
- + BASE_ARG_REG (mode));
- }
- }
-
- if (! (TARGET_SH4 || ca->renesas_abi)
- || PASS_IN_REG_P (*ca, mode, type))
- (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
- = (ROUND_REG (*ca, mode)
- + (mode == BLKmode
- ? ROUND_ADVANCE (int_size_in_bytes (type))
- : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
-}
-
-/* If the structure value address is not passed in a register, define
- `STRUCT_VALUE' as an expression returning an RTX for the place
- where the address is passed. If it returns 0, the address is
- passed as an "invisible" first argument. */
+sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
+ tree type, int named)
+{
+ if (ca->force_mem)
+ ca->force_mem = 0;
+ else if (TARGET_SH5)
+ {
+ tree type2 = (ca->byref && type
+ ? TREE_TYPE (type)
+ : type);
+ enum machine_mode mode2 = (ca->byref && type
+ ? TYPE_MODE (type2)
+ : mode);
+ int dwords = ((ca->byref
+ ? ca->byref
+ : mode2 == BLKmode
+ ? int_size_in_bytes (type2)
+ : GET_MODE_SIZE (mode2)) + 7) / 8;
+ int numregs = MIN (dwords, NPARM_REGS (SImode)
+ - ca->arg_count[(int) SH_ARG_INT]);
+
+ if (numregs)
+ {
+ ca->arg_count[(int) SH_ARG_INT] += numregs;
+ if (TARGET_SHCOMPACT
+ && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
+ {
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
+ - numregs, 1);
+ /* N.B. We want this also for outgoing. */
+ ca->stack_regs += numregs;
+ }
+ else if (ca->byref)
+ {
+ if (! ca->outgoing)
+ ca->stack_regs += numregs;
+ ca->byref_regs += numregs;
+ ca->byref = 0;
+ do
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
+ - numregs, 2);
+ while (--numregs);
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
+ - 1, 1);
+ }
+ else if (dwords > numregs)
+ {
+ int pushregs = numregs;
+
+ if (TARGET_SHCOMPACT)
+ ca->stack_regs += numregs;
+ while (pushregs < NPARM_REGS (SImode) - 1
+ && (CALL_COOKIE_INT_REG_GET
+ (ca->call_cookie,
+ NPARM_REGS (SImode) - pushregs)
+ == 1))
+ {
+ ca->call_cookie
+ &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
+ - pushregs, 1);
+ pushregs++;
+ }
+ if (numregs == NPARM_REGS (SImode))
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (0, 1)
+ | CALL_COOKIE_STACKSEQ (numregs - 1);
+ else
+ ca->call_cookie
+ |= CALL_COOKIE_STACKSEQ (numregs);
+ }
+ }
+ if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
+ && (named || ! ca->prototype_p))
+ {
+ if (mode2 == SFmode && ca->free_single_fp_reg)
+ ca->free_single_fp_reg = 0;
+ else if (ca->arg_count[(int) SH_ARG_FLOAT]
+ < NPARM_REGS (SFmode))
+ {
+ int numfpregs
+ = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
+ NPARM_REGS (SFmode)
+ - ca->arg_count[(int) SH_ARG_FLOAT]);
+
+ ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
+
+ if (TARGET_SHCOMPACT && ! ca->prototype_p)
+ {
+ if (ca->outgoing && numregs > 0)
+ do
+ {
+ ca->call_cookie
+ |= (CALL_COOKIE_INT_REG
+ (ca->arg_count[(int) SH_ARG_INT]
+ - numregs + ((numfpregs - 2) / 2),
+ 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
+ - numfpregs) / 2));
+ }
+ while (numfpregs -= 2);
+ }
+ else if (mode2 == SFmode && (named)
+ && (ca->arg_count[(int) SH_ARG_FLOAT]
+ < NPARM_REGS (SFmode)))
+ ca->free_single_fp_reg
+ = FIRST_FP_PARM_REG - numfpregs
+ + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
+ }
+ }
+ return;
+ }
+
+ if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
+ {
+ /* Note that we've used the skipped register. */
+ if (mode == SFmode && ca->free_single_fp_reg)
+ {
+ ca->free_single_fp_reg = 0;
+ return;
+ }
+ /* When we have a DF after an SF, there's an SF register that get
+ skipped in order to align the DF value. We note this skipped
+ register, because the next SF value will use it, and not the
+ SF that follows the DF. */
+ if (mode == DFmode
+ && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
+ {
+ ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
+ + BASE_ARG_REG (mode));
+ }
+ }
+
+ if (! (TARGET_SH4 || ca->renesas_abi)
+ || PASS_IN_REG_P (*ca, mode, type))
+ (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
+ = (ROUND_REG (*ca, mode)
+ + (mode == BLKmode
+ ? ROUND_ADVANCE (int_size_in_bytes (type))
+ : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
+}
+
/* The Renesas calling convention doesn't quite fit into this scheme since
the address is passed like an invisible argument, but one that is always
passed in memory. */
static rtx
-sh_struct_value_rtx (fndecl, incoming)
- tree fndecl;
- int incoming ATTRIBUTE_UNUSED;
+sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
{
if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
return 0;
return gen_rtx_REG (Pmode, 2);
}
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+
static bool
-sh_return_in_memory (type, fndecl)
- tree type;
- tree fndecl;
+sh_return_in_memory (tree type, tree fndecl)
{
if (TARGET_SH5)
{
later. Fortunately, we already have two flags that are part of struct
function that tell if a function uses varargs or stdarg. */
static void
-sh_setup_incoming_varargs (ca, mode, type, pretend_arg_size, second_time)
- CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED;
- enum machine_mode mode ATTRIBUTE_UNUSED;
- tree type ATTRIBUTE_UNUSED;
- int *pretend_arg_size ATTRIBUTE_UNUSED;
- int second_time ATTRIBUTE_UNUSED;
+sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type ATTRIBUTE_UNUSED,
+ int *pretend_arg_size ATTRIBUTE_UNUSED,
+ int second_time ATTRIBUTE_UNUSED)
{
if (! current_function_stdarg)
abort ();
}
static bool
-sh_strict_argument_naming (ca)
- CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED;
+sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
{
return TARGET_SH5;
}
static bool
-sh_pretend_outgoing_varargs_named (ca)
- CUMULATIVE_ARGS *ca;
+sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
{
return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
}
the other its replacement, at the start of a routine. */
int
-initial_elimination_offset (from, to)
- int from;
- int to;
+initial_elimination_offset (int from, int to)
{
int regs_saved;
int regs_saved_rounding = 0;
compiler. */
void
-sh_pr_interrupt (pfile)
- struct cpp_reader *pfile ATTRIBUTE_UNUSED;
+sh_pr_interrupt (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
{
pragma_interrupt = 1;
}
void
-sh_pr_trapa (pfile)
- struct cpp_reader *pfile ATTRIBUTE_UNUSED;
+sh_pr_trapa (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
{
pragma_interrupt = pragma_trapa = 1;
}
void
-sh_pr_nosave_low_regs (pfile)
- struct cpp_reader *pfile ATTRIBUTE_UNUSED;
+sh_pr_nosave_low_regs (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
{
pragma_nosave_low_regs = 1;
}
/* Generate 'handle_interrupt' attribute for decls */
static void
-sh_insert_attributes (node, attributes)
- tree node;
- tree * attributes;
+sh_insert_attributes (tree node, tree *attributes)
{
if (! pragma_interrupt
|| TREE_CODE (node) != FUNCTION_DECL)
/* Handle an "interrupt_handler" attribute; arguments as in
struct attribute_spec.handler. */
static tree
-sh_handle_interrupt_handler_attribute (node, name, args, flags, no_add_attrs)
- tree *node;
- tree name;
- tree args ATTRIBUTE_UNUSED;
- int flags ATTRIBUTE_UNUSED;
- bool *no_add_attrs;
+sh_handle_interrupt_handler_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
/* Handle an "sp_switch" attribute; arguments as in
struct attribute_spec.handler. */
static tree
-sh_handle_sp_switch_attribute (node, name, args, flags, no_add_attrs)
- tree *node;
- tree name;
- tree args;
- int flags ATTRIBUTE_UNUSED;
- bool *no_add_attrs;
+sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
/* Handle an "trap_exit" attribute; arguments as in
struct attribute_spec.handler. */
static tree
-sh_handle_trap_exit_attribute (node, name, args, flags, no_add_attrs)
- tree *node;
- tree name;
- tree args;
- int flags ATTRIBUTE_UNUSED;
- bool *no_add_attrs;
+sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
}
static tree
-sh_handle_renesas_attribute (node, name, args, flags, no_add_attrs)
- tree *node ATTRIBUTE_UNUSED;
- tree name ATTRIBUTE_UNUSED;
- tree args ATTRIBUTE_UNUSED;
- int flags ATTRIBUTE_UNUSED;
- bool *no_add_attrs ATTRIBUTE_UNUSED;
+sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
+ tree name ATTRIBUTE_UNUSED,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs ATTRIBUTE_UNUSED)
{
return NULL_TREE;
}
/* True if __attribute__((renesas)) or -mrenesas. */
int
-sh_attr_renesas_p (td)
- tree td;
+sh_attr_renesas_p (tree td)
{
if (TARGET_HITACHI)
return 1;
/* True if __attribute__((renesas)) or -mrenesas, for the current
function. */
int
-sh_cfun_attr_renesas_p ()
+sh_cfun_attr_renesas_p (void)
{
return sh_attr_renesas_p (current_function_decl);
}
int
-sh_cfun_interrupt_handler_p ()
+sh_cfun_interrupt_handler_p (void)
{
return (lookup_attribute ("interrupt_handler",
DECL_ATTRIBUTES (current_function_decl))
!= NULL_TREE);
}
+
+/* ??? target_switches in toplev.c is static, hence we have to duplicate it. */
+static const struct
+{
+ const char *const name;
+ const int value;
+ const char *const description;
+}
+sh_target_switches[] = TARGET_SWITCHES;
+#define target_switches sh_target_switches
+
+/* Like default_pch_valid_p, but take flag_mask into account. */
+const char *
+sh_pch_valid_p (const void *data_p, size_t len)
+{
+ const char *data = (const char *)data_p;
+ const char *flag_that_differs = NULL;
+ size_t i;
+ int old_flags;
+ int flag_mask
+ = (SH1_BIT | SH2_BIT | SH3_BIT | SH_E_BIT | HARD_SH4_BIT | FPU_SINGLE_BIT
+ | SH4_BIT | HITACHI_BIT | LITTLE_ENDIAN_BIT);
+
+ /* -fpic and -fpie also usually make a PCH invalid. */
+ if (data[0] != flag_pic)
+ return _("created and used with different settings of -fpic");
+ if (data[1] != flag_pie)
+ return _("created and used with different settings of -fpie");
+ data += 2;
+
+ /* Check target_flags. */
+ memcpy (&old_flags, data, sizeof (target_flags));
+ if (((old_flags ^ target_flags) & flag_mask) != 0)
+ {
+ for (i = 0; i < ARRAY_SIZE (target_switches); i++)
+ {
+ int bits;
+
+ bits = target_switches[i].value;
+ if (bits < 0)
+ bits = -bits;
+ bits &= flag_mask;
+ if ((target_flags & bits) != (old_flags & bits))
+ {
+ flag_that_differs = target_switches[i].name;
+ goto make_message;
+ }
+ }
+ abort ();
+ }
+ data += sizeof (target_flags);
+ len -= sizeof (target_flags);
+
+ /* Check string options. */
+#ifdef TARGET_OPTIONS
+ for (i = 0; i < ARRAY_SIZE (target_options); i++)
+ {
+ const char *str = *target_options[i].variable;
+ size_t l;
+ if (! str)
+ str = "";
+ l = strlen (str) + 1;
+ if (len < l || memcmp (data, str, l) != 0)
+ {
+ flag_that_differs = target_options[i].prefix;
+ goto make_message;
+ }
+ data += l;
+ len -= l;
+ }
+#endif
+
+ return NULL;
+
+ make_message:
+ {
+ char *r;
+ asprintf (&r, _("created and used with differing settings of `-m%s'"),
+ flag_that_differs);
+ if (r == NULL)
+ return _("out of memory");
+ return r;
+ }
+}
\f
/* Predicates used by the templates. */
Used only in general_movsrc_operand. */
int
-system_reg_operand (op, mode)
- rtx op;
- enum machine_mode mode ATTRIBUTE_UNUSED;
+system_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
switch (REGNO (op))
{
invalid as are subregs of system registers. */
int
-general_movsrc_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+general_movsrc_operand (rtx op, enum machine_mode mode)
{
if (GET_CODE (op) == MEM)
{
Same as general_operand, but no preinc allowed. */
int
-general_movdst_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+general_movdst_operand (rtx op, enum machine_mode mode)
{
/* Only pre dec allowed. */
if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == POST_INC)
/* Returns 1 if OP is a normal arithmetic register. */
int
-arith_reg_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+arith_reg_operand (rtx op, enum machine_mode mode)
{
if (register_operand (op, mode))
{
because this would lead to missing sign extensions when truncating from
DImode to SImode. */
int
-arith_reg_dest (op, mode)
- rtx op;
- enum machine_mode mode;
+arith_reg_dest (rtx op, enum machine_mode mode)
{
if (mode == DImode && GET_CODE (op) == SUBREG
&& GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))) < 8)
}
int
-int_gpr_dest (op, mode)
- rtx op;
- enum machine_mode mode ATTRIBUTE_UNUSED;
+int_gpr_dest (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
enum machine_mode op_mode = GET_MODE (op);
}
int
-fp_arith_reg_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+fp_arith_reg_operand (rtx op, enum machine_mode mode)
{
if (register_operand (op, mode))
{
/* Returns 1 if OP is a valid source operand for an arithmetic insn. */
int
-arith_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+arith_operand (rtx op, enum machine_mode mode)
{
if (arith_reg_operand (op, mode))
return 1;
/* Returns 1 if OP is a valid source operand for a compare insn. */
int
-arith_reg_or_0_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+arith_reg_or_0_operand (rtx op, enum machine_mode mode)
{
if (arith_reg_operand (op, mode))
return 1;
that takes either a register or a 6-bit immediate. */
int
-shmedia_6bit_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+shmedia_6bit_operand (rtx op, enum machine_mode mode)
{
return (arith_reg_operand (op, mode)
|| (GET_CODE (op) == CONST_INT && CONST_OK_FOR_I06 (INTVAL (op))));
/* Returns 1 if OP is a valid source operand for a logical operation. */
int
-logical_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+logical_operand (rtx op, enum machine_mode mode)
{
if (arith_reg_operand (op, mode))
return 1;
}
int
-and_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+and_operand (rtx op, enum machine_mode mode)
{
if (logical_operand (op, mode))
return 1;
&& mode == DImode
&& GET_CODE (op) == CONST_INT
&& CONST_OK_FOR_J16 (INTVAL (op)))
- return 1;
+ return 1;
return 0;
}
/* Nonzero if OP is a floating point value with value 0.0. */
int
-fp_zero_operand (op)
- rtx op;
+fp_zero_operand (rtx op)
{
REAL_VALUE_TYPE r;
/* Nonzero if OP is a floating point value with value 1.0. */
int
-fp_one_operand (op)
- rtx op;
+fp_one_operand (rtx op)
{
REAL_VALUE_TYPE r;
choosing an fldi alternative during reload and thus failing to
allocate a scratch register for the constant loading. */
int
-fldi_ok ()
+fldi_ok (void)
{
return ! TARGET_SH4 || TARGET_FMOVD || reload_completed;
}
int
-tertiary_reload_operand (op, mode)
- rtx op;
- enum machine_mode mode ATTRIBUTE_UNUSED;
+tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
enum rtx_code code = GET_CODE (op);
return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
}
int
-fpscr_operand (op, mode)
- rtx op;
- enum machine_mode mode ATTRIBUTE_UNUSED;
+fpscr_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
return (GET_CODE (op) == REG && REGNO (op) == FPSCR_REG
&& GET_MODE (op) == PSImode);
}
int
-fpul_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+fpul_operand (rtx op, enum machine_mode mode)
{
if (TARGET_SHMEDIA)
return fp_arith_reg_operand (op, mode);
}
int
-symbol_ref_operand (op, mode)
- rtx op;
- enum machine_mode mode ATTRIBUTE_UNUSED;
+symbol_ref_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
return (GET_CODE (op) == SYMBOL_REF);
}
/* Return the TLS type for TLS symbols, 0 for otherwise. */
int
-tls_symbolic_operand (op, mode)
- rtx op;
- enum machine_mode mode ATTRIBUTE_UNUSED;
+tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
if (GET_CODE (op) != SYMBOL_REF)
return 0;
}
int
-commutative_float_operator (op, mode)
- rtx op;
- enum machine_mode mode;
+commutative_float_operator (rtx op, enum machine_mode mode)
{
if (GET_MODE (op) != mode)
return 0;
}
int
-noncommutative_float_operator (op, mode)
- rtx op;
- enum machine_mode mode;
+noncommutative_float_operator (rtx op, enum machine_mode mode)
{
if (GET_MODE (op) != mode)
return 0;
}
int
-unary_float_operator (op, mode)
- rtx op;
- enum machine_mode mode;
+unary_float_operator (rtx op, enum machine_mode mode)
{
if (GET_MODE (op) != mode)
return 0;
}
int
-binary_float_operator (op, mode)
- rtx op;
- enum machine_mode mode;
+binary_float_operator (rtx op, enum machine_mode mode)
{
if (GET_MODE (op) != mode)
return 0;
}
int
-binary_logical_operator (op, mode)
- rtx op;
- enum machine_mode mode;
+binary_logical_operator (rtx op, enum machine_mode mode)
{
if (GET_MODE (op) != mode)
return 0;
}
int
-equality_comparison_operator (op, mode)
- rtx op;
- enum machine_mode mode;
+equality_comparison_operator (rtx op, enum machine_mode mode)
{
return ((mode == VOIDmode || GET_MODE (op) == mode)
&& (GET_CODE (op) == EQ || GET_CODE (op) == NE));
}
-int greater_comparison_operator (op, mode)
- rtx op;
- enum machine_mode mode;
+int
+greater_comparison_operator (rtx op, enum machine_mode mode)
{
if (mode != VOIDmode && GET_MODE (op) == mode)
return 0;
}
}
-int less_comparison_operator (op, mode)
- rtx op;
- enum machine_mode mode;
+int
+less_comparison_operator (rtx op, enum machine_mode mode)
{
if (mode != VOIDmode && GET_MODE (op) == mode)
return 0;
/* Accept pseudos and branch target registers. */
int
-target_reg_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+target_reg_operand (rtx op, enum machine_mode mode)
{
if (mode != DImode
|| GET_MODE (op) != DImode)
/* Same as target_reg_operand, except that label_refs and symbol_refs
are accepted before reload. */
int
-target_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+target_operand (rtx op, enum machine_mode mode)
{
if (mode != DImode)
return 0;
}
int
-mextr_bit_offset (op, mode)
- rtx op;
- enum machine_mode mode ATTRIBUTE_UNUSED;
+mextr_bit_offset (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
HOST_WIDE_INT i;
if (GET_CODE (op) != CONST_INT)
return 0;
i = INTVAL (op);
- return i >= 1*8 && i <= 7*8 && (i & 7) == 0;
+ return i >= 1 * 8 && i <= 7 * 8 && (i & 7) == 0;
}
int
-extend_reg_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+extend_reg_operand (rtx op, enum machine_mode mode)
{
return (GET_CODE (op) == TRUNCATE
? arith_operand
}
int
-trunc_hi_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+trunc_hi_operand (rtx op, enum machine_mode mode)
{
enum machine_mode op_mode = GET_MODE (op);
}
int
-extend_reg_or_0_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+extend_reg_or_0_operand (rtx op, enum machine_mode mode)
{
return (GET_CODE (op) == TRUNCATE
? arith_operand
}
int
-general_extend_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+general_extend_operand (rtx op, enum machine_mode mode)
{
return (GET_CODE (op) == TRUNCATE
? arith_operand
}
int
-inqhi_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+inqhi_operand (rtx op, enum machine_mode mode)
{
if (GET_CODE (op) != TRUNCATE || mode != GET_MODE (op))
return 0;
}
int
-sh_rep_vec (v, mode)
- rtx v;
- enum machine_mode mode;
+sh_rep_vec (rtx v, enum machine_mode mode)
{
int i;
rtx x, y;
if (GET_MODE_UNIT_SIZE (mode) == 1)
{
y = XVECEXP (v, 0, i);
- for (i -= 2 ; i >= 0; i -= 2)
+ for (i -= 2; i >= 0; i -= 2)
if (! rtx_equal_p (XVECEXP (v, 0, i + 1), x)
|| ! rtx_equal_p (XVECEXP (v, 0, i), y))
return 0;
/* Determine if V is a constant vector matching MODE with only one element
that is not a sign extension. Two byte-sized elements count as one. */
int
-sh_1el_vec (v, mode)
- rtx v;
- enum machine_mode mode;
+sh_1el_vec (rtx v, enum machine_mode mode)
{
int unit_size;
int i, last, least, sign_ix;
}
int
-sh_const_vec (v, mode)
- rtx v;
- enum machine_mode mode;
+sh_const_vec (rtx v, enum machine_mode mode)
{
int i;
/* Return the destination address of a branch. */
static int
-branch_dest (branch)
- rtx branch;
+branch_dest (rtx branch)
{
rtx dest = SET_SRC (PATTERN (branch));
int dest_uid;
We assume REG is a reload reg, and therefore does
not live past labels. It may live past calls or jumps though. */
int
-reg_unused_after (reg, insn)
- rtx reg;
- rtx insn;
+reg_unused_after (rtx reg, rtx insn)
{
enum rtx_code code;
rtx set;
while ((insn = NEXT_INSN (insn)))
{
+ rtx set;
+ if (!INSN_P (insn))
+ continue;
+
code = GET_CODE (insn);
#if 0
else if (code == JUMP_INSN)
return 0;
}
- else if (GET_RTX_CLASS (code) == 'i')
- {
- rtx set = single_set (insn);
- if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
- return 0;
- if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
- return GET_CODE (SET_DEST (set)) != MEM;
- if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
- return 0;
- }
+ set = single_set (insn);
+ if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
+ return 0;
+ if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ return GET_CODE (SET_DEST (set)) != MEM;
+ if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ return 0;
if (code == CALL_INSN && call_used_regs[REGNO (reg)])
return 1;
static GTY(()) rtx fpscr_rtx;
rtx
-get_fpscr_rtx ()
+get_fpscr_rtx (void)
{
if (! fpscr_rtx)
{
- fpscr_rtx = gen_rtx (REG, PSImode, FPSCR_REG);
+ fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
REG_USERVAR_P (fpscr_rtx) = 1;
mark_user_reg (fpscr_rtx);
}
}
void
-emit_sf_insn (pat)
- rtx pat;
+emit_sf_insn (rtx pat)
{
emit_insn (pat);
}
void
-emit_df_insn (pat)
- rtx pat;
+emit_df_insn (rtx pat)
{
emit_insn (pat);
}
void
-expand_sf_unop (fun, operands)
- rtx (*fun) PARAMS ((rtx, rtx, rtx));
- rtx *operands;
+expand_sf_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
{
emit_sf_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
}
void
-expand_sf_binop (fun, operands)
- rtx (*fun) PARAMS ((rtx, rtx, rtx, rtx));
- rtx *operands;
+expand_sf_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
{
emit_sf_insn ((*fun) (operands[0], operands[1], operands[2],
get_fpscr_rtx ()));
}
void
-expand_df_unop (fun, operands)
- rtx (*fun) PARAMS ((rtx, rtx, rtx));
- rtx *operands;
+expand_df_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
{
emit_df_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
}
void
-expand_df_binop (fun, operands)
- rtx (*fun) PARAMS ((rtx, rtx, rtx, rtx));
- rtx *operands;
+expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
{
emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
- get_fpscr_rtx ()));
+ get_fpscr_rtx ()));
}
\f
/* ??? gcc does flow analysis strictly after common subexpression
same basic block. */
static void
-mark_use (x, reg_set_block)
- rtx x, *reg_set_block;
+mark_use (rtx x, rtx *reg_set_block)
{
enum rtx_code code;
}
}
\f
-static rtx get_free_reg PARAMS ((HARD_REG_SET));
+static rtx get_free_reg (HARD_REG_SET);
/* This function returns a register to use to load the address to load
the fpscr from. Currently it always returns r1 or r7, but when we are
the middle. */
static rtx
-get_free_reg (regs_live)
- HARD_REG_SET regs_live;
+get_free_reg (HARD_REG_SET regs_live)
{
if (! TEST_HARD_REG_BIT (regs_live, 1))
return gen_rtx_REG (Pmode, 1);
/* This function will set the fpscr from memory.
MODE is the mode we are setting it to. */
void
-fpscr_set_from_mem (mode, regs_live)
- int mode;
- HARD_REG_SET regs_live;
+fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
{
enum attr_fp_mode fp_mode = mode;
rtx addr_reg = get_free_reg (regs_live);
#endif
int
-sh_insn_length_adjustment (insn)
- rtx insn;
+sh_insn_length_adjustment (rtx insn)
{
/* Instructions with unfilled delay slots take up an extra two bytes for
the nop in the delay slot. */
/* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
isn't protected by a PIC unspec. */
int
-nonpic_symbol_mentioned_p (x)
- rtx x;
+nonpic_symbol_mentioned_p (rtx x)
{
register const char *fmt;
register int i;
|| XINT (x, 1) == UNSPEC_GOTTPOFF
|| XINT (x, 1) == UNSPEC_DTPOFF
|| XINT (x, 1) == UNSPEC_PLT))
- return 0;
+ return 0;
fmt = GET_RTX_FORMAT (GET_CODE (x));
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
@GOTOFF in `reg'. */
rtx
-legitimize_pic_address (orig, mode, reg)
- rtx orig;
- enum machine_mode mode ATTRIBUTE_UNUSED;
- rtx reg;
+legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx reg)
{
if (tls_symbolic_operand (orig, Pmode))
return orig;
/* Mark the use of a constant in the literal table. If the constant
has multiple labels, make it unique. */
static rtx
-mark_constant_pool_use (x)
- rtx x;
+mark_constant_pool_use (rtx x)
{
rtx insn, lab, pattern;
of an unconditional jump BRANCH2. We only want to do this if the
resulting branch will have a short displacement. */
int
-sh_can_redirect_branch (branch1, branch2)
- rtx branch1;
- rtx branch2;
+sh_can_redirect_branch (rtx branch1, rtx branch2)
{
if (flag_expensive_optimizations && simplejump_p (branch2))
{
/* Return nonzero if register old_reg can be renamed to register new_reg. */
int
-sh_hard_regno_rename_ok (old_reg, new_reg)
- unsigned int old_reg ATTRIBUTE_UNUSED;
- unsigned int new_reg;
+sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
+ unsigned int new_reg)
{
-
-/* Interrupt functions can only use registers that have already been
- saved by the prologue, even if they would normally be
- call-clobbered. */
+ /* Interrupt functions can only use registers that have already been
+ saved by the prologue, even if they would normally be
+ call-clobbered. */
if (sh_cfun_interrupt_handler_p () && !regs_ever_live[new_reg])
- return 0;
+ return 0;
- return 1;
+ return 1;
}
/* Function to update the integer COST
the same cost as a data-dependence. The return value should be
the new value for COST. */
static int
-sh_adjust_cost (insn, link, dep_insn, cost)
- rtx insn;
- rtx link ATTRIBUTE_UNUSED;
- rtx dep_insn;
- int cost;
+sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
{
rtx reg, use_pat;
if (TARGET_SHMEDIA)
{
/* On SHmedia, if the dependence is an anti-dependence or
- output-dependence, there is no cost. */
+ output-dependence, there is no cost. */
if (REG_NOTE_KIND (link) != 0)
cost = 0;
&& get_attr_type (insn) == TYPE_DYN_SHIFT
&& get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
&& reg_overlap_mentioned_p (SET_DEST (PATTERN (dep_insn)),
- XEXP (SET_SRC (single_set(insn)),
+ XEXP (SET_SRC (single_set (insn)),
1)))
cost++;
/* When an LS group instruction with a latency of less than
/* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
if DEP_INSN is anti-flow dependent on INSN. */
static int
-flow_dependent_p (insn, dep_insn)
- rtx insn, dep_insn;
+flow_dependent_p (rtx insn, rtx dep_insn)
{
rtx tmp = PATTERN (insn);
/* A helper function for flow_dependent_p called through note_stores. */
static void
-flow_dependent_p_1 (x, pat, data)
- rtx x;
- rtx pat ATTRIBUTE_UNUSED;
- void *data;
+flow_dependent_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
{
rtx * pinsn = (rtx *) data;
do not look like function calls to leaf_function_p. Hence we must
do this extra check. */
int
-sh_pr_n_sets ()
+sh_pr_n_sets (void)
{
return REG_N_SETS (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
}
/* This Function returns nonzero if the DFA based scheduler interface
is to be used. At present this is supported for the SH4 only. */
static int
-sh_use_dfa_interface()
+sh_use_dfa_interface (void)
{
if (TARGET_HARD_SH4)
return 1;
/* This function returns "2" to indicate dual issue for the SH4
processor. To be used by the DFA pipeline description. */
static int
-sh_issue_rate()
+sh_issue_rate (void)
{
if (TARGET_SUPERSCALAR)
return 2;
return 1;
}
+/* Functions for ready queue reordering for sched1. */
+
+/* Get weight for mode for a set x. */
+static short
+find_set_regmode_weight (rtx x, enum machine_mode mode)
+{
+ if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
+ return 1;
+ if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
+ {
+ if (GET_CODE (SET_DEST (x)) == REG)
+ {
+ if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
+ return 1;
+ else
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Get regmode weight for insn. */
+static short
+find_insn_regmode_weight (rtx insn, enum machine_mode mode)
+{
+ short reg_weight = 0;
+ rtx x;
+
+ /* Increment weight for each register born here. */
+ x = PATTERN (insn);
+ reg_weight += find_set_regmode_weight (x, mode);
+ if (GET_CODE (x) == PARALLEL)
+ {
+ int j;
+ for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
+ {
+ x = XVECEXP (PATTERN (insn), 0, j);
+ reg_weight += find_set_regmode_weight (x, mode);
+ }
+ }
+ /* Decrement weight for each register that dies here. */
+ for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
+ {
+ if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
+ {
+ rtx note = XEXP (x, 0);
+ if (GET_CODE (note) == REG && GET_MODE (note) == mode)
+ reg_weight--;
+ }
+ }
+ return reg_weight;
+}
+
+/* Calculate regmode weights for all insns of a basic block. */
+static void
+find_regmode_weight (int b, enum machine_mode mode)
+{
+ rtx insn, next_tail, head, tail;
+
+ get_block_head_tail (b, &head, &tail);
+ next_tail = NEXT_INSN (tail);
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ {
+ /* Handle register life information. */
+ if (!INSN_P (insn))
+ continue;
+
+ if (mode == SFmode)
+ INSN_REGMODE_WEIGHT (insn, mode) =
+ find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DFmode);
+ else if (mode == SImode)
+ INSN_REGMODE_WEIGHT (insn, mode) =
+ find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DImode);
+ }
+}
+
+/* Comparison function for ready queue sorting. */
+static int
+rank_for_reorder (const void *x, const void *y)
+{
+ rtx tmp = *(const rtx *) y;
+ rtx tmp2 = *(const rtx *) x;
+
+ /* The insn in a schedule group should be issued the first. */
+ if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
+ return SCHED_GROUP_P (tmp2) ? 1 : -1;
+
+ /* If insns are equally good, sort by INSN_LUID (original insn order), This
+ minimizes instruction movement, thus minimizing sched's effect on
+ register pressure. */
+ return INSN_LUID (tmp) - INSN_LUID (tmp2);
+}
+
+/* Resort the array A in which only element at index N may be out of order. */
+static void
+swap_reorder (rtx *a, int n)
+{
+ rtx insn = a[n - 1];
+ int i = n - 2;
+
+ while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
+ {
+ a[i + 1] = a[i];
+ i -= 1;
+ }
+ a[i + 1] = insn;
+}
+
+#define SCHED_REORDER(READY, N_READY) \
+ do \
+ { \
+ if ((N_READY) == 2) \
+ swap_reorder (READY, N_READY); \
+ else if ((N_READY) > 2) \
+ qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
+ } \
+ while (0)
+
+/* Sort the ready list READY by ascending priority, using the SCHED_REORDER
+ macro. */
+static void
+ready_reorder (rtx *ready, int nready)
+{
+ SCHED_REORDER (ready, nready);
+}
+
+/* Calculate regmode weights for all insns of all basic block. */
+static void
+sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
+ int old_max_uid)
+{
+ basic_block b;
+
+ regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
+ regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
+
+ FOR_EACH_BB_REVERSE (b)
+ {
+ find_regmode_weight (b->index, SImode);
+ find_regmode_weight (b->index, SFmode);
+ }
+
+ CURR_REGMODE_PRESSURE (SImode) = 0;
+ CURR_REGMODE_PRESSURE (SFmode) = 0;
+
+}
+
+/* Cleanup. */
+static void
+sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED)
+{
+ if (regmode_weight[0])
+ {
+ free (regmode_weight[0]);
+ regmode_weight[0] = NULL;
+ }
+ if (regmode_weight[1])
+ {
+ free (regmode_weight[1]);
+ regmode_weight[1] = NULL;
+ }
+}
+
+/* Cache the can_issue_more so that we can return it from reorder2. Also,
+ keep count of register pressures on SImode and SFmode. */
+static int
+sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx insn,
+ int can_issue_more)
+{
+ if (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ cached_can_issue_more = can_issue_more - 1;
+ else
+ cached_can_issue_more = can_issue_more;
+
+ if (reload_completed)
+ return cached_can_issue_more;
+
+ CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
+ CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
+
+ return cached_can_issue_more;
+}
+
+static void
+sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
+ int veclen ATTRIBUTE_UNUSED)
+{
+ CURR_REGMODE_PRESSURE (SImode) = 0;
+ CURR_REGMODE_PRESSURE (SFmode) = 0;
+}
+
+/* Some magic numbers. */
+/* Pressure on register r0 can lead to spill failures. so avoid sched1 for
+ functions that already have high pressure on r0. */
+#define R0_MAX_LIFE_REGIONS 2
+#define R0_MAX_LIVE_LENGTH 12
+/* Register Pressure thresholds for SImode and SFmode registers. */
+#define SIMODE_MAX_WEIGHT 5
+#define SFMODE_MAX_WEIGHT 10
+
+/* Return true if the pressure is high for MODE. */
+static short
+high_pressure (enum machine_mode mode)
+{
+ /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
+ functions that already have high pressure on r0. */
+ if ((REG_N_SETS (0) - REG_N_DEATHS (0)) >= R0_MAX_LIFE_REGIONS
+ && REG_LIVE_LENGTH (0) >= R0_MAX_LIVE_LENGTH)
+ return 1;
+
+ if (mode == SFmode)
+ return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
+ else
+ return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
+}
+
+/* Reorder ready queue if register pressure is high. */
+static int
+sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx *ready,
+ int *n_readyp,
+ int clock_var ATTRIBUTE_UNUSED)
+{
+ if (reload_completed)
+ return sh_issue_rate ();
+
+ if (high_pressure (SFmode) || high_pressure (SImode))
+ {
+ ready_reorder (ready, *n_readyp);
+ }
+
+ return sh_issue_rate ();
+}
+
+/* Skip cycles if the current register pressure is high. */
+static int
+sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx *ready ATTRIBUTE_UNUSED,
+ int *n_readyp ATTRIBUTE_UNUSED,
+ int clock_var ATTRIBUTE_UNUSED)
+{
+ if (reload_completed)
+ return cached_can_issue_more;
+
+ if (high_pressure(SFmode) || high_pressure (SImode))
+ skip_cycles = 1;
+
+ return cached_can_issue_more;
+}
+
+/* Skip cycles without sorting the ready queue. This will move insn from
+ Q->R. If this is the last cycle we are skipping; allow sorting of ready
+ queue by sh_reorder. */
+
+/* Generally, skipping these many cycles are sufficient for all insns to move
+ from Q -> R. */
+#define MAX_SKIPS 8
+
+static int
+sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx insn ATTRIBUTE_UNUSED,
+ int last_clock_var,
+ int clock_var,
+ int *sort_p)
+{
+ if (reload_completed)
+ return 0;
+
+ if (skip_cycles)
+ {
+ if ((clock_var - last_clock_var) < MAX_SKIPS)
+ {
+ *sort_p = 0;
+ return 1;
+ }
+ /* If this is the last cycle we are skipping, allow reordering of R. */
+ if ((clock_var - last_clock_var) == MAX_SKIPS)
+ {
+ *sort_p = 1;
+ return 1;
+ }
+ }
+
+ skip_cycles = 0;
+
+ return 0;
+}
+
/* SHmedia requires registers for branches, so we can't generate new
branches past reload. */
static bool
-sh_cannot_modify_jumps_p ()
+sh_cannot_modify_jumps_p (void)
{
return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
}
}
static bool
-sh_ms_bitfield_layout_p (record_type)
- tree record_type ATTRIBUTE_UNUSED;
+sh_ms_bitfield_layout_p (tree record_type ATTRIBUTE_UNUSED)
{
return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
}
CXT is an RTX for the static chain value for the function. */
void
-sh_initialize_trampoline (tramp, fnaddr, cxt)
- rtx tramp, fnaddr, cxt;
+sh_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
{
if (TARGET_SHMEDIA64)
{
movishori));
emit_insn (gen_rotrdi3_mextr (quad0, quad0,
GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
- emit_insn (gen_ashldi3_media (quad0, quad0, GEN_INT (2)));
+ emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
emit_move_insn (gen_rtx_MEM (DImode, tramp), quad0);
emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
gen_rtx_SUBREG (V2HImode, cxt, 0),
movishori));
emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
- emit_insn (gen_ashldi3_media (cxtload, cxtload, GEN_INT (2)));
+ emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
if (TARGET_LITTLE_ENDIAN)
{
emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
own stack frame, so it must not pass pointers or references to
these arguments to other functions by means of sibling calls. */
static bool
-sh_function_ok_for_sibcall (decl, exp)
- tree decl;
- tree exp ATTRIBUTE_UNUSED;
+sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
{
return (decl
&& (! TARGET_SHCOMPACT
#define SH_BLTIN_PV 20
{ 0, 8 },
};
-/* mcmv: operands considered unsigned. */
+/* mcmv: operands considered unsigned. */
/* mmulsum_wq, msad_ubq: result considered unsigned long long. */
-/* mperm: control value considered unsigned int. */
-/* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
+/* mperm: control value considered unsigned int. */
+/* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
/* mshards_q: returns signed short. */
/* nsb: takes long long arg, returns unsigned char. */
static const struct builtin_description bdesc[] =
};
static void
-sh_media_init_builtins ()
+sh_media_init_builtins (void)
{
tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
const struct builtin_description *d;
}
static void
-sh_init_builtins ()
+sh_init_builtins (void)
{
if (TARGET_SHMEDIA)
sh_media_init_builtins ();
IGNORE is nonzero if the value is to be ignored. */
static rtx
-sh_expand_builtin (exp, target, subtarget, mode, ignore)
- tree exp;
- rtx target;
- rtx subtarget ATTRIBUTE_UNUSED;
- enum machine_mode mode ATTRIBUTE_UNUSED;
- int ignore;
+sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED, int ignore)
{
tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
tree arglist = TREE_OPERAND (exp, 1);
}
void
-sh_expand_unop_v2sf (code, op0, op1)
- enum rtx_code code;
- rtx op0, op1;
+sh_expand_unop_v2sf (enum rtx_code code, rtx op0, rtx op1)
{
rtx sel0 = const0_rtx;
rtx sel1 = const1_rtx;
- rtx (*fn) PARAMS ((rtx, rtx, rtx, rtx, rtx)) = gen_unary_sf_op;
+ rtx (*fn) (rtx, rtx, rtx, rtx, rtx) = gen_unary_sf_op;
rtx op = gen_rtx_fmt_e (code, SFmode, op1);
emit_insn ((*fn) (op0, op1, op, sel0, sel0));
}
void
-sh_expand_binop_v2sf (code, op0, op1, op2)
- enum rtx_code code;
- rtx op0, op1, op2;
+sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
{
rtx sel0 = const0_rtx;
rtx sel1 = const1_rtx;
- rtx (*fn) PARAMS ((rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx))
+ rtx (*fn) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx)
= gen_binary_sf_op;
rtx op = gen_rtx_fmt_ee (code, SFmode, op1, op2);
/* Return the class of registers for which a mode change from FROM to TO
is invalid. */
bool
-sh_cannot_change_mode_class (from, to, class)
- enum machine_mode from, to;
- enum reg_class class;
+sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
+ enum reg_class class)
{
if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
{
- if (TARGET_LITTLE_ENDIAN)
- {
- if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
- return reg_classes_intersect_p (DF_REGS, class);
- }
- else
- {
- if (GET_MODE_SIZE (from) < 8)
- return reg_classes_intersect_p (DF_HI_REGS, class);
- }
+ if (TARGET_LITTLE_ENDIAN)
+ {
+ if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
+ return reg_classes_intersect_p (DF_REGS, class);
+ }
+ else
+ {
+ if (GET_MODE_SIZE (from) < 8)
+ return reg_classes_intersect_p (DF_HI_REGS, class);
+ }
}
return 0;
}
that label is used. */
void
-sh_mark_label (address, nuses)
- rtx address;
- int nuses;
+sh_mark_label (rtx address, int nuses)
{
if (GOTOFF_P (address))
{
register information here is not used for SFmode. */
int
-sh_register_move_cost (mode, srcclass, dstclass)
- enum machine_mode mode;
- enum reg_class srcclass, dstclass;
+sh_register_move_cost (enum machine_mode mode,
+ enum reg_class srcclass, enum reg_class dstclass)
{
if (dstclass == T_REGS || dstclass == PR_REGS)
return 10;
return 4;
if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
- || (dstclass== MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
+ || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
return 9;
if ((REGCLASS_HAS_FP_REG (dstclass)
&& REGCLASS_HAS_GENERAL_REG (srcclass))
|| (REGCLASS_HAS_GENERAL_REG (dstclass)
&& REGCLASS_HAS_FP_REG (srcclass)))
- return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
- * ((GET_MODE_SIZE (mode) + 7) / 8U));
+ return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
+ * ((GET_MODE_SIZE (mode) + 7) / 8U));
if ((dstclass == FPUL_REGS
&& REGCLASS_HAS_GENERAL_REG (srcclass))
/* Like register_operand, but take into account that SHMEDIA can use
the constant zero like a general register. */
int
-sh_register_operand (op, mode)
- rtx op;
- enum machine_mode mode;
+sh_register_operand (rtx op, enum machine_mode mode)
{
if (op == CONST0_RTX (mode) && TARGET_SHMEDIA)
return 1;
return register_operand (op, mode);
}
-static rtx emit_load_ptr PARAMS ((rtx, rtx));
+int
+cmpsi_operand (rtx op, enum machine_mode mode)
+{
+ if (GET_CODE (op) == REG && REGNO (op) == T_REG
+ && GET_MODE (op) == SImode)
+ return 1;
+ return arith_operand (op, mode);
+}
+
+static rtx emit_load_ptr (rtx, rtx);
static rtx
-emit_load_ptr (reg, addr)
- rtx reg, addr;
+emit_load_ptr (rtx reg, rtx addr)
{
rtx mem = gen_rtx_MEM (ptr_mode, addr);
}
void
-sh_output_mi_thunk (file, thunk_fndecl, delta, vcall_offset, function)
- FILE *file;
- tree thunk_fndecl ATTRIBUTE_UNUSED;
- HOST_WIDE_INT delta;
- HOST_WIDE_INT vcall_offset;
- tree function;
+sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
+ tree function)
{
CUMULATIVE_ARGS cum;
int structure_value_byref = 0;
SH that it's best to do this completely machine independently.
"this" is passed as first argument, unless a structure return pointer
comes first, in which case "this" comes second. */
- INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0);
+ INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
#ifndef PCC_STATIC_STRUCT_RETURN
if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
structure_value_byref = 1;
abort (); /* FIXME */
emit_load_ptr (scratch0, offset_addr);
- if (Pmode != ptr_mode)
+ if (Pmode != ptr_mode)
scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
emit_insn (gen_add2_insn (this, scratch0));
}
if (optimize > 0 && flag_schedule_insns_after_reload)
{
-
- find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
- life_analysis (insns, rtl_dump_file, PROP_FINAL);
+ find_basic_blocks (insns, max_reg_num (), dump_file);
+ life_analysis (insns, dump_file, PROP_FINAL);
split_all_insns (1);
- schedule_insns (rtl_dump_file);
+ schedule_insns (dump_file);
}
sh_reorg ();
if (optimize > 0 && flag_delayed_branch)
- dbr_schedule (insns, rtl_dump_file);
+ dbr_schedule (insns, dump_file);
shorten_branches (insns);
final_start_function (insns, file, 1);
final (insns, file, 1, 0);
return val;
}
+int
+sh_expand_t_scc (enum rtx_code code, rtx target)
+{
+ rtx result = target;
+ HOST_WIDE_INT val;
+
+ if (GET_CODE (sh_compare_op0) != REG || REGNO (sh_compare_op0) != T_REG
+ || GET_CODE (sh_compare_op1) != CONST_INT)
+ return 0;
+ if (GET_CODE (result) != REG)
+ result = gen_reg_rtx (SImode);
+ val = INTVAL (sh_compare_op1);
+ if ((code == EQ && val == 1) || (code == NE && val == 0))
+ emit_insn (gen_movt (result));
+ else if ((code == EQ && val == 0) || (code == NE && val == 1))
+ {
+ emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
+ emit_insn (gen_subc (result, result, result));
+ emit_insn (gen_addsi3 (result, result, const1_rtx));
+ }
+ else if (code == EQ || code == NE)
+ emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
+ else
+ return 0;
+ if (result != target)
+ emit_move_insn (target, result);
+ return 1;
+}
+
+/* INSN is an sfunc; return the rtx that describes the address used. */
+static rtx
+extract_sfunc_addr (rtx insn)
+{
+ rtx pattern, part = NULL_RTX;
+ int len, i;
+
+ pattern = PATTERN (insn);
+ len = XVECLEN (pattern, 0);
+ for (i = 0; i < len; i++)
+ {
+ part = XVECEXP (pattern, 0, i);
+ if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
+ && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
+ return XEXP (part, 0);
+ }
+ if (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE)
+ return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
+ abort ();
+}
+
+/* Verify that the register in use_sfunc_addr still agrees with the address
+ used in the sfunc. This prevents fill_slots_from_thread from changing
+ use_sfunc_addr.
+ INSN is the use_sfunc_addr instruction, and REG is the register it
+ guards. */
+int
+check_use_sfunc_addr (rtx insn, rtx reg)
+{
+ /* Search for the sfunc. It should really come right after INSN. */
+ while ((insn = NEXT_INSN (insn)))
+ {
+ if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
+ break;
+ if (! INSN_P (insn))
+ continue;
+
+ if (GET_CODE (PATTERN (insn)) == SEQUENCE)
+ insn = XVECEXP (PATTERN (insn), 0, 0);
+ if (GET_CODE (PATTERN (insn)) != PARALLEL
+ || get_attr_type (insn) != TYPE_SFUNC)
+ continue;
+ return rtx_equal_p (extract_sfunc_addr (insn), reg);
+ }
+ abort ();
+}
+
#include "gt-sh.h"