Copyright (C) 1993, 1994, 1995, 1997, 1997, 1998, 1999, 2000, 2001, 2002,
2003, 2004 Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
- Improved by Jim Wilson (wilson@cygnus.com).
+ Improved by Jim Wilson (wilson@cygnus.com).
This file is part of GCC.
#include "recog.h"
#include "c-pragma.h"
#include "integrate.h"
+#include "dwarf2.h"
#include "tm_p.h"
#include "target.h"
#include "target-def.h"
#include "ra.h"
#include "cfglayout.h"
#include "intl.h"
+#include "sched-int.h"
+#include "ggc.h"
+#include "tree-gimple.h"
+
int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
interrupted. */
int pragma_nosave_low_regs;
-/* This is used for communication between SETUP_INCOMING_VARARGS and
+/* This is used for communication between TARGET_SETUP_INCOMING_VARARGS and
sh_expand_prologue. */
int current_function_anonymous_args;
/* Which cpu are we scheduling for. */
enum processor_type sh_cpu;
+/* Definitions used in ready queue reordering for first scheduling pass. */
+
+/* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
+static short *regmode_weight[2];
+
+/* Total SFmode and SImode weights of scheduled insns. */
+static int curr_regmode_pressure[2];
+
+/* If true, skip cycles for Q -> R movement. */
+static int skip_cycles = 0;
+
+/* Cached value of can_issue_more. This is cached in sh_variable_issue hook
+ and returned from sh_reorder2. */
+static short cached_can_issue_more;
+
/* Saved operands from the last compare to use when we generate an scc
or bcc insn. */
static void force_into (rtx, rtx);
static void print_slot (rtx);
static rtx add_constant (rtx, enum machine_mode, rtx);
-static void dump_table (rtx);
+static void dump_table (rtx, rtx);
static int hi_const (rtx);
static int broken_move (rtx);
static int mova_p (rtx);
static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
static void sh_insert_attributes (tree, tree *);
static int sh_adjust_cost (rtx, rtx, rtx, int);
-static int sh_use_dfa_interface (void);
static int sh_issue_rate (void);
+static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
+static short find_set_regmode_weight (rtx, enum machine_mode);
+static short find_insn_regmode_weight (rtx, enum machine_mode);
+static void find_regmode_weight (int, enum machine_mode);
+static void sh_md_init_global (FILE *, int, int);
+static void sh_md_finish_global (FILE *, int);
+static int rank_for_reorder (const void *, const void *);
+static void swap_reorder (rtx *, int);
+static void ready_reorder (rtx *, int);
+static short high_pressure (enum machine_mode);
+static int sh_reorder (FILE *, int, rtx *, int *, int);
+static int sh_reorder2 (FILE *, int, rtx *, int *, int);
+static void sh_md_init (FILE *, int, int);
+static int sh_variable_issue (FILE *, int, rtx, int);
+
static bool sh_function_ok_for_sibcall (tree, tree);
static bool sh_cannot_modify_jumps_p (void);
static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
struct save_schedule_s *, int);
-static bool sh_promote_prototypes (tree);
static rtx sh_struct_value_rtx (tree, int);
static bool sh_return_in_memory (tree, tree);
static rtx sh_builtin_saveregs (void);
static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
static tree sh_build_builtin_va_list (void);
+static tree sh_gimplify_va_arg_expr (tree, tree, tree *, tree *);
+static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
+static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
+static int sh_dwarf_calling_convention (tree);
\f
/* Initialize the GCC target structure. */
#undef TARGET_SCHED_ADJUST_COST
#define TARGET_SCHED_ADJUST_COST sh_adjust_cost
-#undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
-#define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE \
- sh_use_dfa_interface
#undef TARGET_SCHED_ISSUE_RATE
#define TARGET_SCHED_ISSUE_RATE sh_issue_rate
+/* The next 5 hooks have been implemented for reenabling sched1. With the
+ help of these macros we are limiting the movement of insns in sched1 to
+ reduce the register pressure. The overall idea is to keep count of SImode
+ and SFmode regs required by already scheduled insns. When these counts
+ cross some threshold values; give priority to insns that free registers.
+ The insn that frees registers is most likely to be the insn with lowest
+ LUID (original insn order); but such an insn might be there in the stalled
+ queue (Q) instead of the ready queue (R). To solve this, we skip cycles
+ upto a max of 8 cycles so that such insns may move from Q -> R.
+
+ The description of the hooks are as below:
+
+ TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
+ scheduler; it is called inside the sched_init function just after
+ find_insn_reg_weights function call. It is used to calculate the SImode
+ and SFmode weights of insns of basic blocks; much similar to what
+ find_insn_reg_weights does.
+ TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
+
+ TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
+ indicated by TARGET_SCHED_REORDER2; doing this may move insns from
+ (Q)->(R).
+
+ TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
+ high; reorder the ready queue so that the insn with lowest LUID will be
+ issued next.
+
+ TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
+ TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
+
+ TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
+ can be returned from TARGET_SCHED_REORDER2.
+
+ TARGET_SCHED_INIT: Reset the register pressure counting variables. */
+
+#undef TARGET_SCHED_DFA_NEW_CYCLE
+#define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
+
+#undef TARGET_SCHED_INIT_GLOBAL
+#define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
+
+#undef TARGET_SCHED_FINISH_GLOBAL
+#define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
+
+#undef TARGET_SCHED_VARIABLE_ISSUE
+#define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
+
+#undef TARGET_SCHED_REORDER
+#define TARGET_SCHED_REORDER sh_reorder
+
+#undef TARGET_SCHED_REORDER2
+#define TARGET_SCHED_REORDER2 sh_reorder2
+
+#undef TARGET_SCHED_INIT
+#define TARGET_SCHED_INIT sh_md_init
+
#undef TARGET_CANNOT_MODIFY_JUMPS_P
#define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
#undef TARGET_BRANCH_TARGET_REGISTER_CLASS
#define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
+#undef TARGET_CALLEE_COPIES
+#define TARGET_CALLEE_COPIES sh_callee_copies
#undef TARGET_BUILD_BUILTIN_VA_LIST
#define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
#undef TARGET_PCH_VALID_P
#define TARGET_PCH_VALID_P sh_pch_valid_p
+#undef TARGET_DWARF_CALLING_CONVENTION
+#define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
+
+/* Return regmode weight for insn. */
+#define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
+
+/* Return current register pressure for regmode. */
+#define CURR_REGMODE_PRESSURE(MODE) curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
+
+#ifdef SYMBIAN
+
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO sh_symbian_encode_section_info
+#undef TARGET_STRIP_NAME_ENCODING
+#define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
+#undef TARGET_CXX_IMPORT_EXPORT_CLASS
+#define TARGET_CXX_IMPORT_EXPORT_CLASS symbian_import_export_class
+
+#endif /* SYMBIAN */
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
/* Print the operand address in x to the stream. */
'T' print the next word of a dp value - same as 'R' in big endian mode.
'M' print an `x' if `m' will print `base,index'.
'N' print 'r63' if the operand is (const_int 0).
+ 'd' print a V2SF reg as dN instead of fpN.
'm' print a pair `base,offset' or `base,index', for LD and ST.
'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
'o' output an operator. */
}
break;
+ case 'd':
+ if (GET_CODE (x) != REG || GET_MODE (x) != V2SFmode)
+ abort ();
+
+ fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
+ break;
+
case 'N':
if (x == CONST0_RTX (GET_MODE (x)))
{
case MEM:
output_address (XEXP (x, 0));
break;
-
+
case CONST:
if (TARGET_SHMEDIA
&& GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
int constp = (GET_CODE (operands[2]) == CONST_INT);
int bytes = (constp ? INTVAL (operands[2]) : 0);
+ if (! constp)
+ return 0;
+
+ /* If we could use mov.l to move words and dest is word-aligned, we
+ can use movua.l for loads and still generate a relatively short
+ and efficient sequence. */
+ if (TARGET_SH4A_ARCH && align < 4
+ && MEM_ALIGN (operands[0]) >= 32
+ && can_move_by_pieces (bytes, 32))
+ {
+ rtx dest = copy_rtx (operands[0]);
+ rtx src = copy_rtx (operands[1]);
+ /* We could use different pseudos for each copied word, but
+ since movua can only load into r0, it's kind of
+ pointless. */
+ rtx temp = gen_reg_rtx (SImode);
+ rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
+ int copied = 0;
+
+ while (copied + 4 <= bytes)
+ {
+ rtx to = adjust_address (dest, SImode, copied);
+ rtx from = adjust_automodify_address (src, SImode, src_addr, copied);
+
+ emit_insn (gen_movua (temp, from));
+ emit_move_insn (src_addr, plus_constant (src_addr, 4));
+ emit_move_insn (to, temp);
+ copied += 4;
+ }
+
+ if (copied < bytes)
+ move_by_pieces (adjust_address (dest, BLKmode, copied),
+ adjust_automodify_address (src, BLKmode,
+ src_addr, copied),
+ bytes - copied, align, 0);
+
+ return 1;
+ }
+
/* If it isn't a constant number of bytes, or if it doesn't have 4 byte
alignment, or if it isn't a multiple of 4 bytes, then fail. */
- if (! constp || align < 4 || (bytes % 4 != 0))
+ if (align < 4 || (bytes % 4 != 0))
return 0;
if (TARGET_HARD_SH4)
tree entry_name;
rtx sym;
rtx func_addr_rtx;
- rtx r4 = gen_rtx (REG, SImode, 4);
- rtx r5 = gen_rtx (REG, SImode, 5);
+ rtx r4 = gen_rtx_REG (SImode, 4);
+ rtx r5 = gen_rtx_REG (SImode, 5);
- entry_name = get_identifier ("__movstrSI12_i4");
+ entry_name = get_identifier ("__movmemSI12_i4");
sym = function_symbol (IDENTIFIER_POINTER (entry_name));
func_addr_rtx = copy_to_mode_reg (Pmode, sym);
rtx sym;
rtx func_addr_rtx;
int dwords;
- rtx r4 = gen_rtx (REG, SImode, 4);
- rtx r5 = gen_rtx (REG, SImode, 5);
- rtx r6 = gen_rtx (REG, SImode, 6);
+ rtx r4 = gen_rtx_REG (SImode, 4);
+ rtx r5 = gen_rtx_REG (SImode, 5);
+ rtx r6 = gen_rtx_REG (SImode, 6);
entry_name = get_identifier (bytes & 4
- ? "__movstr_i4_odd"
- : "__movstr_i4_even");
+ ? "__movmem_i4_odd"
+ : "__movmem_i4_even");
sym = function_symbol (IDENTIFIER_POINTER (entry_name));
func_addr_rtx = copy_to_mode_reg (Pmode, sym);
force_into (XEXP (operands[0], 0), r4);
rtx r4 = gen_rtx_REG (SImode, 4);
rtx r5 = gen_rtx_REG (SImode, 5);
- sprintf (entry, "__movstrSI%d", bytes);
+ sprintf (entry, "__movmemSI%d", bytes);
entry_name = get_identifier (entry);
sym = function_symbol (IDENTIFIER_POINTER (entry_name));
func_addr_rtx = copy_to_mode_reg (Pmode, sym);
rtx r5 = gen_rtx_REG (SImode, 5);
rtx r6 = gen_rtx_REG (SImode, 6);
- entry_name = get_identifier ("__movstr");
+ entry_name = get_identifier ("__movmem");
sym = function_symbol (IDENTIFIER_POINTER (entry_name));
func_addr_rtx = copy_to_mode_reg (Pmode, sym);
force_into (XEXP (operands[0], 0), r4);
{
rtx tga_op1, tga_ret, tmp, tmp2;
-
switch (tls_kind)
{
case TLS_MODEL_GLOBAL_DYNAMIC:
tga_ret = gen_rtx_REG (Pmode, R0_REG);
- emit_insn (gen_tls_global_dynamic (tga_ret, op1));
+ emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
op1 = tga_ret;
break;
case TLS_MODEL_LOCAL_DYNAMIC:
tga_ret = gen_rtx_REG (Pmode, R0_REG);
- emit_insn (gen_tls_local_dynamic (tga_ret, op1));
+ emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
tmp = gen_reg_rtx (Pmode);
emit_move_insn (tmp, tga_ret);
case TLS_MODEL_INITIAL_EXEC:
if (! flag_pic)
- emit_insn (gen_GOTaddr2picreg ());
- tga_op1 = gen_reg_rtx (Pmode);
+ {
+ /* Don't schedule insns for getting GOT address when
+ the first scheduling is enabled, to avoid spill
+ failures for R0. */
+ if (flag_schedule_insns)
+ emit_insn (gen_blockage ());
+ emit_insn (gen_GOTaddr2picreg ());
+ emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode,
+ PIC_REG)));
+ if (flag_schedule_insns)
+ emit_insn (gen_blockage ());
+ }
+ tga_op1 = no_new_pseudos ? op0 : gen_reg_rtx (Pmode);
tmp = gen_sym2GOTTPOFF (op1);
emit_insn (gen_tls_initial_exec (tga_op1, tmp));
op1 = tga_op1;
emit_insn (gen_load_gbr (tmp2));
tmp = gen_reg_rtx (Pmode);
emit_insn (gen_symTPOFF2reg (tmp, op1));
- RTX_UNCHANGING_P (tmp) = 1;
if (register_operand (op0, Pmode))
op1 = op0;
|| (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
sh_compare_op1 = force_reg (mode, sh_compare_op1);
- if (TARGET_SH4 && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
(mode == SFmode ? emit_sf_insn : emit_df_insn)
- (gen_rtx (PARALLEL, VOIDmode, gen_rtvec (2,
- gen_rtx (SET, VOIDmode, t_reg,
- gen_rtx (code, SImode,
- sh_compare_op0, sh_compare_op1)),
- gen_rtx (USE, VOIDmode, get_fpscr_rtx ()))));
+ (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, t_reg,
+ gen_rtx_fmt_ee (code, SImode,
+ sh_compare_op0, sh_compare_op1)),
+ gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
else
- emit_insn (gen_rtx (SET, VOIDmode, t_reg,
- gen_rtx (code, SImode, sh_compare_op0,
- sh_compare_op1)));
+ emit_insn (gen_rtx_SET (VOIDmode, t_reg,
+ gen_rtx_fmt_ee (code, SImode,
+ sh_compare_op0, sh_compare_op1)));
return t_reg;
}
else
insn = gen_rtx_SET (VOIDmode,
gen_rtx_REG (SImode, T_REG),
- gen_rtx (code, SImode, sh_compare_op0,
- sh_compare_op1));
- if (TARGET_SH4 && GET_MODE_CLASS (mode) == MODE_FLOAT)
+ gen_rtx_fmt_ee (code, SImode,
+ sh_compare_op0, sh_compare_op1));
+ if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
{
- insn = gen_rtx (PARALLEL, VOIDmode,
+ insn = gen_rtx_PARALLEL (VOIDmode,
gen_rtvec (2, insn,
- gen_rtx (USE, VOIDmode, get_fpscr_rtx ())));
+ gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
(mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
}
else
static void
print_slot (rtx insn)
{
- final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 0, 1);
+ final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 0, 1, NULL);
INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
}
int label = lf++;
/* The call to print_slot will clobber the operands. */
rtx op0 = operands[0];
-
+
/* If the instruction in the delay slot is annulled (true), then
there is no delay slot where we can put it now. The only safe
place for it is after the label. final will do that by default. */
-
+
if (final_sequence
- && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0)))
+ && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
+ && get_attr_length (XVECEXP (final_sequence, 0, 1)))
{
asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
ASSEMBLER_DIALECT ? "/" : ".", label);
}
else
asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
-
+
output_asm_insn ("bra\t%l0", &op0);
fprintf (asm_out_file, "\tnop\n");
- (*targetm.asm_out.internal_label)(asm_out_file, "LF", label);
-
+ (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
+
return "";
}
/* When relaxing, handle this like a short branch. The linker
case 4:
{
char buffer[10];
-
+
sprintf (buffer, "b%s%ss\t%%l0",
logic ? "t" : "f",
ASSEMBLER_DIALECT ? "/" : ".");
{
default_file_start ();
+#ifdef SYMBIAN
+ /* Declare the .directive section before it is used. */
+ fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
+ fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
+#endif
+
if (TARGET_ELF)
/* We need to show the text section with the proper
attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
- emits it without attributes in TEXT_SECTION, else GAS
+ emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
will complain. We can teach GAS specifically about the
default attributes for our choice of text section, but
then we would have to change GAS again if/when we change
/* Likewise, but for shift amounts < 16, up to three highmost bits
might be clobbered. This is typically used when combined with some
kind of sign or zero extension. */
-
+
static const char ext_shift_insns[] =
{ 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
case ASHIFT:
return shift_insns[shift_count];
default:
- abort();
+ abort ();
}
}
/* Fall through. */
default:
- return 5;
+ return 5;
}
/* Any other constant requires a 2 cycle pc-relative load plus an
/* Output RTL to split a constant shift into its component SH constant
shift instructions. */
-
+
void
gen_shifty_op (int code, rtx *operands)
{
/* Truncate the shift count in case it is out of bounds. */
value = value & 0x1f;
-
+
if (value == 31)
{
if (code == LSHIFTRT)
for (i = 0; i < max; i++)
gen_ashift (code, shift_amounts[value][i], operands[0]);
}
-
+
/* Same as above, but optimized for values where the topmost bits don't
matter. */
mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
else
mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
- /* Can this be expressed as a right shift / left shift pair ? */
+ /* Can this be expressed as a right shift / left shift pair? */
lsb = ((mask ^ (mask - 1)) >> 1) + 1;
right = exact_log2 (lsb);
mask2 = ~(mask + lsb - 1);
int late_right = exact_log2 (lsb2);
best_cost = shift_insns[left + late_right] + shift_insns[late_right];
}
- /* Try to use zero extend */
+ /* Try to use zero extend. */
if (mask2 == ~(lsb2 - 1))
{
int width, first;
for (width = 8; width <= 16; width += 8)
{
- /* Can we zero-extend right away? */
- if (lsb2 == (unsigned HOST_WIDE_INT)1 << width)
+ /* Can we zero-extend right away? */
+ if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
{
cost
= 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
best_len = cost;
if (attrp)
attrp[2] = first;
- }
+ }
}
}
}
}
}
/* Try to use a scratch register to hold the AND operand. */
- can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT)3 << 30)) == 0;
+ can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
for (i = 0; i <= 2; i++)
{
if (i > right)
return len + shift_insns[INTVAL (XEXP (op, 1))];
}
-/* Generating rtl? */
-extern int rtx_equal_function_value_matters;
-
/* Generate rtl for instructions for which shl_and_kind advised a particular
method of generating them, i.e. returned zero. */
unsigned HOST_WIDE_INT mask;
int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
int right, total_shift;
- void (*shift_gen_fun) (int, rtx*) = gen_shifty_hi_op;
+ void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
right = attributes[0];
total_shift = INTVAL (left_rtx) + right;
if (first < 0)
{
emit_insn ((mask << right) <= 0xff
- ? gen_zero_extendqisi2(dest,
- gen_lowpart (QImode, source))
- : gen_zero_extendhisi2(dest,
- gen_lowpart (HImode, source)));
+ ? gen_zero_extendqisi2 (dest,
+ gen_lowpart (QImode, source))
+ : gen_zero_extendhisi2 (dest,
+ gen_lowpart (HImode, source)));
source = dest;
}
if (source != dest)
}
if (first >= 0)
emit_insn (mask <= 0xff
- ? gen_zero_extendqisi2(dest, gen_lowpart (QImode, dest))
- : gen_zero_extendhisi2(dest, gen_lowpart (HImode, dest)));
+ ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
+ : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
if (total_shift > 0)
{
operands[2] = GEN_INT (total_shift);
/* If the topmost bit that matters is set, set the topmost bits
that don't matter. This way, we might be able to get a shorter
signed constant. */
- if (mask & ((HOST_WIDE_INT)1 << (31 - total_shift)))
- mask |= (HOST_WIDE_INT)~0 << (31 - total_shift);
+ if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
+ mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
case 2:
/* Don't expand fine-grained when combining, because that will
make the pattern fail. */
- if (rtx_equal_function_value_matters
+ if (currently_expanding_to_rtl
|| reload_in_progress || reload_completed)
{
rtx operands[3];
-
+
/* Cases 3 and 4 should be handled by this split
only while combining */
if (kind > 2)
/* Don't expand fine-grained when combining, because that will
make the pattern fail. */
- if (! rtx_equal_function_value_matters
+ if (! currently_expanding_to_rtl
&& ! reload_in_progress && ! reload_completed)
{
emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
gen_shifty_hi_op (ASHIFT, operands);
}
emit_insn (kind & 1
- ? gen_extendqisi2(dest, gen_lowpart (QImode, dest))
- : gen_extendhisi2(dest, gen_lowpart (HImode, dest)));
+ ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
+ : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
if (kind <= 2)
{
if (shift2)
{
operands[2] = GEN_INT (shift2 + 1);
gen_shifty_op (ASHIFT, operands);
- operands[2] = GEN_INT (1);
+ operands[2] = const1_rtx;
gen_shifty_op (ASHIFTRT, operands);
break;
}
case 5:
{
int i = 16 - size;
- if (! rtx_equal_function_value_matters
+ if (! currently_expanding_to_rtl
&& ! reload_in_progress && ! reload_completed)
emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
else
case 7:
/* Don't expand fine-grained when combining, because that will
make the pattern fail. */
- if (! rtx_equal_function_value_matters
+ if (! currently_expanding_to_rtl
&& ! reload_in_progress && ! reload_completed)
{
emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
gen_shifty_op (ASHIFT, operands);
if (kind == 7)
- emit_insn (gen_ashrsi3_k (dest, dest, GEN_INT (1)));
+ emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
break;
default:
return -1;
gen_rtx_UNSPEC (GET_MODE (sym),
gen_rtvec (1, sym),
UNSPEC_DATALABEL));
-
+
if (GET_CODE (sym) != SYMBOL_REF)
abort ();
return lab;
}
-/* Output the literal table. */
+/* Output the literal table. START, if nonzero, is the first instruction
+ this table is needed for, and also indicates that there is at least one
+ casesi_worker_2 instruction; We have to emit the operand3 labels from
+ these insns at a 4-byte aligned position. BARRIER is the barrier
+ after which we are to place the table. */
static void
-dump_table (rtx scan)
+dump_table (rtx start, rtx barrier)
{
+ rtx scan = barrier;
int i;
int need_align = 1;
rtx lab, ref;
need_align = 1;
+ if (start)
+ {
+ scan = emit_insn_after (gen_align_4 (), scan);
+ need_align = 0;
+ for (; start != barrier; start = NEXT_INSN (start))
+ if (GET_CODE (start) == INSN
+ && recog_memoized (start) == CODE_FOR_casesi_worker_2)
+ {
+ rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
+ rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
+
+ scan = emit_label_after (lab, scan);
+ }
+ }
if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
{
rtx align_insn = NULL_RTX;
{
lab = XEXP (ref, 0);
emit_insn_before (gen_consttable_window_end (lab),
- align_insn);
+ align_insn);
}
delete_insn (align_insn);
align_insn = NULL_RTX;
pool_size = 0;
}
-
+
for (i = 0; i < pool_size; i++)
{
pool_node *p = &pool_vector[i];
== SCRATCH))
&& GET_CODE (SET_DEST (pat)) == REG
&& FP_REGISTER_P (REGNO (SET_DEST (pat))))
+ && ! (TARGET_SH2A
+ && GET_MODE (SET_DEST (pat)) == SImode
+ && GET_CODE (SET_SRC (pat)) == CONST_INT
+ && CONST_OK_FOR_I20 (INTVAL (SET_SRC (pat))))
&& (GET_CODE (SET_SRC (pat)) != CONST_INT
|| ! CONST_OK_FOR_I08 (INTVAL (SET_SRC (pat)))))
return 1;
&& GET_CODE (XVECEXP (SET_SRC (PATTERN (insn)), 0, 0)) == LABEL_REF);
}
+/* Fix up a mova from a switch that went out of range. */
+static void
+fixup_mova (rtx mova)
+{
+ if (! flag_pic)
+ {
+ SET_SRC (PATTERN (mova)) = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0);
+ INSN_CODE (mova) = -1;
+ }
+ else
+ {
+ rtx worker = mova;
+ rtx lab = gen_label_rtx ();
+ rtx wpat, wpat0, wpat1, wsrc, diff;
+
+ do
+ {
+ worker = NEXT_INSN (worker);
+ if (! worker
+ || GET_CODE (worker) == CODE_LABEL
+ || GET_CODE (worker) == JUMP_INSN)
+ abort ();
+ } while (recog_memoized (worker) != CODE_FOR_casesi_worker_1);
+ wpat = PATTERN (worker);
+ wpat0 = XVECEXP (wpat, 0, 0);
+ wpat1 = XVECEXP (wpat, 0, 1);
+ wsrc = SET_SRC (wpat0);
+ PATTERN (worker) = (gen_casesi_worker_2
+ (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
+ XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
+ XEXP (wpat1, 0)));
+ INSN_CODE (worker) = -1;
+ diff = gen_rtx_MINUS (Pmode, XVECEXP (SET_SRC (PATTERN (mova)), 0, 0),
+ gen_rtx_LABEL_REF (Pmode, lab));
+ diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, diff), UNSPEC_PIC);
+ SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
+ INSN_CODE (mova) = -1;
+ }
+}
+
/* Find the last barrier from insn FROM which is close enough to hold the
constant pool. If we can't find one, then create one near the end of
the range. */
{
/* Try as we might, the leading mova is out of range. Change
it into a load (which will become a pcload) and retry. */
- SET_SRC (PATTERN (mova)) = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0);
- INSN_CODE (mova) = -1;
+ fixup_mova (mova);
return find_barrier (0, 0, mova);
}
else
case SUBREG:
{
rtx y = SUBREG_REG (x);
-
+
if (GET_CODE (y) != REG)
break;
if (REGNO (y) < 16)
pass 1. Pass 2 if a definite blocking insn is needed.
-1 is used internally to avoid deep recursion.
If a blocking instruction is made or recognized, return it. */
-
+
static rtx
gen_block_redirect (rtx jump, int addr, int need_block)
{
else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
need_block = 0;
}
+ if (GET_CODE (PATTERN (jump)) == RETURN)
+ {
+ if (! need_block)
+ return prev;
+ /* Reorg even does nasty things with return insns that cause branches
+ to go out of range - see find_end_label and callers. */
+ return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
+ }
/* We can't use JUMP_LABEL here because it might be undefined
when not optimizing. */
dest = XEXP (SET_SRC (PATTERN (jump)), 0);
it would cause trouble if an interrupt occurred. */
unsigned try = 0x7fff, used;
int jump_left = flag_expensive_optimizations + 1;
-
+
/* It is likely that the most recent eligible instruction is wanted for
the delay slot. Therefore, find out which registers it uses, and
try to avoid using them. */
-
+
for (scan = jump; (scan = PREV_INSN (scan)); )
{
enum rtx_code code;
if (INSN_DELETED_P (scan))
continue;
code = GET_CODE (scan);
- if (GET_RTX_CLASS (code) == 'i')
+ if (INSN_P (scan))
{
used |= regs_used (PATTERN (scan), 0);
if (code == CALL_INSN)
threading with a jump beyond the delay slot insn.
Don't check if we are called recursively; the jump has been or will be
checked in a different invocation then. */
-
+
else if (optimize && need_block >= 0)
{
rtx next = next_active_insn (next_active_insn (dest));
Hence, after delay slot scheduling, we'll have to expect
NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
the jump. */
-
+
INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
return insn;
JUMP_LABEL (jump) = bp->far_label;
if (! invert_jump (insn, label, 1))
abort ();
- (emit_insn_after
- (gen_stuff_delay_slot
- (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
- GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
- insn));
+ /* If we are branching around a jump (rather than a return), prevent
+ reorg from using an insn from the jump target as the delay slot insn -
+ when reorg did this, it pessimized code (we rather hide the delay slot)
+ and it could cause branches to go out of range. */
+ if (bp->far_label)
+ (emit_insn_after
+ (gen_stuff_delay_slot
+ (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
+ GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
+ insn));
/* Prevent reorg from undoing our splits. */
gen_block_redirect (jump, bp->address += 2, 2);
}
if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
break;
}
+ /* FIXME: This is a bug in the optimizer, but it seems harmless
+ to just avoid panicing. */
+ if (!prev)
+ continue;
/* Emit the reference label of the braf where it belongs, right after
the casesi_jump_2 (i.e. braf). */
{
rtx next = next_real_insn (barrier_or_label), pat, prev;
int slot, credit, jump_to_next = 0;
-
+
if (! next)
return 0;
the table to the minimum for proper code alignment. */
return ((TARGET_SMALLCODE
|| ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
- <= (unsigned)1 << (CACHE_LOG - 2)))
+ <= (unsigned) 1 << (CACHE_LOG - 2)))
? 1 << TARGET_SHMEDIA : align_jumps_log);
}
an alignment, against that of fetching unneeded insn in front of the
branch target when there is no alignment. */
- /* There are two delay_slot cases to consider. One is the simple case
- where the preceding branch is to the insn beyond the barrier (simple
- delay slot filling), and the other is where the preceding branch has
- a delay slot that is a duplicate of the insn after the barrier
- (fill_eager_delay_slots) and the branch is to the insn after the insn
+ /* There are two delay_slot cases to consider. One is the simple case
+ where the preceding branch is to the insn beyond the barrier (simple
+ delay slot filling), and the other is where the preceding branch has
+ a delay slot that is a duplicate of the insn after the barrier
+ (fill_eager_delay_slots) and the branch is to the insn after the insn
after the barrier. */
/* PREV is presumed to be the JUMP_INSN for the barrier under
if (GET_CODE (PATTERN (prev)) == SEQUENCE)
{
prev = XVECEXP (PATTERN (prev), 0, 1);
- if (INSN_UID (prev) == INSN_UID (next))
+ if (INSN_UID (prev) == INSN_UID (next))
{
/* Delay slot was filled with insn at jump target. */
jump_to_next = 1;
/* There is no upper bound on redundant instructions
that might have been skipped, but we must not put an
alignment where none had been before. */
- || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
- (INSN_P (x)
+ || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
+ (INSN_P (x)
&& (INSN_CODE (x) == CODE_FOR_block_branch_redirect
|| INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
|| INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
}
}
}
-
+
return align_jumps_log;
}
{
if (mova_p (insn))
{
- if (! num_mova++)
+ /* ??? basic block reordering can move a switch table dispatch
+ below the switch table. Check if that has happened.
+ We only have the addresses available when optimizing; but then,
+ this check shouldn't be needed when not optimizing. */
+ rtx label_ref = XVECEXP (SET_SRC (PATTERN (insn)), 0, 0);
+ if (optimize
+ && (INSN_ADDRESSES (INSN_UID (insn))
+ > INSN_ADDRESSES (INSN_UID (XEXP (label_ref, 0)))))
+ {
+ /* Change the mova into a load.
+ broken_move will then return true for it. */
+ fixup_mova (insn);
+ }
+ else if (! num_mova++)
mova = insn;
}
else if (GET_CODE (insn) == JUMP_INSN
{
/* Change the mova into a load, and restart scanning
there. broken_move will then return true for mova. */
- SET_SRC (PATTERN (mova))
- = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0);
- INSN_CODE (mova) = -1;
+ fixup_mova (mova);
insn = mova;
}
}
- if (broken_move (insn))
+ if (broken_move (insn)
+ || (GET_CODE (insn) == INSN
+ && recog_memoized (insn) == CODE_FOR_casesi_worker_2))
{
rtx scan;
/* Scan ahead looking for a barrier to stick the constant table
behind. */
rtx barrier = find_barrier (num_mova, mova, insn);
rtx last_float_move = NULL_RTX, last_float = 0, *last_float_addr = NULL;
+ int need_aligned_label = 0;
if (num_mova && ! mova_p (mova))
{
{
if (GET_CODE (scan) == CODE_LABEL)
last_float = 0;
+ if (GET_CODE (scan) == INSN
+ && recog_memoized (scan) == CODE_FOR_casesi_worker_2)
+ need_aligned_label = 1;
if (broken_move (scan))
{
rtx *patp = &PATTERN (scan), pat = *patp;
}
dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
}
-
if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
{
/* This must be an insn that clobbers r0. */
- rtx clobber = XVECEXP (PATTERN (scan), 0,
- XVECLEN (PATTERN (scan), 0) - 1);
+ rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
+ XVECLEN (PATTERN (scan), 0)
+ - 1);
+ rtx clobber = *clobberp;
if (GET_CODE (clobber) != CLOBBER
|| ! rtx_equal_p (XEXP (clobber, 0), r0_rtx))
}
last_float_move = scan;
last_float = src;
- newsrc = gen_rtx (MEM, mode,
+ newsrc = gen_rtx_MEM (mode,
(((TARGET_SH4 && ! TARGET_FMOVD)
|| REGNO (dst) == FPUL_REG)
? r0_inc_rtx
last_float_addr = &XEXP (newsrc, 0);
/* Remove the clobber of r0. */
- XEXP (clobber, 0) = gen_rtx_SCRATCH (Pmode);
- RTX_UNCHANGING_P (newsrc) = 1;
+ *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
+ gen_rtx_SCRATCH (Pmode));
}
/* This is a mova needing a label. Create it. */
else if (GET_CODE (src) == UNSPEC
else
{
lab = add_constant (src, mode, 0);
- newsrc = gen_rtx_MEM (mode,
- gen_rtx_LABEL_REF (VOIDmode, lab));
- RTX_UNCHANGING_P (newsrc) = 1;
+ newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
+ newsrc = gen_const_mem (mode, newsrc);
}
*patp = gen_rtx_SET (VOIDmode, dst, newsrc);
INSN_CODE (scan) = -1;
}
}
- dump_table (barrier);
+ dump_table (need_aligned_label ? insn : 0, barrier);
insn = barrier;
}
}
if (type == TYPE_CBRANCH)
{
rtx next, beyond;
-
+
if (get_attr_length (insn) > 4)
{
rtx src = SET_SRC (PATTERN (insn));
rtx label = 0;
int dest_uid = get_dest_uid (olabel, max_uid);
struct far_branch *bp = uid_branch[dest_uid];
-
+
/* redirect_jump needs a valid JUMP_LABEL, and it might delete
the label if the LABEL_NUSES count drops to zero. There is
always a jump_optimize pass that sets these values, but it
beyond
= next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
0));
-
+
if (beyond
&& (GET_CODE (beyond) == JUMP_INSN
|| ((beyond = next_active_insn (beyond))
gen_block_redirect (beyond,
INSN_ADDRESSES (INSN_UID (beyond)), 1);
}
-
+
next = next_active_insn (insn);
if ((GET_CODE (next) == JUMP_INSN
- || GET_CODE (next = next_active_insn (next)) == JUMP_INSN)
+ || ((next = next_active_insn (next))
+ && GET_CODE (next) == JUMP_INSN))
&& GET_CODE (PATTERN (next)) == SET
&& recog_memoized (next) == CODE_FOR_jump_compact
&& ((INSN_ADDRESSES
/* Number of bytes pushed for anonymous args, used to pass information
between expand_prologue and expand_epilogue. */
-static int extra_push;
-
/* Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
- for an epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET
- of all the registers that are about to be restored, and hence dead. */
+ for an epilogue and a negative value means that it's for a sibcall
+ epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
+ all the registers that are about to be restored, and hence dead. */
static void
output_stack_adjust (int size, rtx reg, int epilogue_p,
{
HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
+/* This test is bogus, as output_stack_adjust is used to re-align the
+ stack. */
+#if 0
if (size % align)
abort ();
+#endif
if (CONST_OK_FOR_ADD (size))
emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
/* If TEMP is invalid, we could temporarily save a general
register to MACL. However, there is currently no need
to handle this case, so just abort when we see it. */
- if (current_function_interrupt
- || ! call_used_regs[temp] || fixed_regs[temp])
+ if (epilogue_p < 0
+ || current_function_interrupt
+ || ! call_really_used_regs[temp] || fixed_regs[temp])
temp = -1;
- if (temp < 0 && ! current_function_interrupt)
+ if (temp < 0 && ! current_function_interrupt
+ && (TARGET_SHMEDIA || epilogue_p >= 0))
{
HARD_REG_SET temps;
COPY_HARD_REG_SET (temps, call_used_reg_set);
AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
- if (epilogue_p)
+ if (epilogue_p > 0)
{
- for (i = 0; i < HARD_REGNO_NREGS (FIRST_RET_REG, DImode); i++)
+ int nreg = 0;
+ if (current_function_return_rtx)
+ {
+ enum machine_mode mode;
+ mode = GET_MODE (current_function_return_rtx);
+ if (BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG)
+ nreg = HARD_REGNO_NREGS (FIRST_RET_REG, mode);
+ }
+ for (i = 0; i < nreg; i++)
CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
if (current_function_calls_eh_return)
{
CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
}
}
- else
+ if (TARGET_SHMEDIA && epilogue_p < 0)
+ for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
+ CLEAR_HARD_REG_BIT (temps, i);
+ if (epilogue_p <= 0)
{
for (i = FIRST_PARM_REG;
i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
CLEAR_HARD_REG_BIT (temps, i);
- if (current_function_needs_context)
+ if (cfun->static_chain_decl != NULL)
CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
}
temp = scavenge_reg (&temps);
if (temp < 0 && live_regs_mask)
temp = scavenge_reg (live_regs_mask);
if (temp < 0)
- abort ();
+ {
+ /* If we reached here, the most likely case is the (sibcall)
+ epilogue for non SHmedia. Put a special push/pop sequence
+ for such case as the last resort. This looks lengthy but
+ would not be problem because it seems to be very rare. */
+ if (! TARGET_SHMEDIA && epilogue_p)
+ {
+ rtx adj_reg, tmp_reg, mem;
+
+ /* ??? There is still the slight possibility that r4 or r5
+ have been reserved as fixed registers or assigned as
+ global registers, and they change during an interrupt.
+ There are possible ways to handle this:
+ - If we are adjusting the frame pointer (r14), we can do
+ with a single temp register and an ordinary push / pop
+ on the stack.
+ - Grab any call-used or call-saved registers (i.e. not
+ fixed or globals) for the temps we need. We might
+ also grab r14 if we are adjusting the stack pointer.
+ If we can't find enough available registers, issue
+ a diagnostic and abort - the user must have reserved
+ way too many registers.
+ But since all this is rather unlikely to happen and
+ would require extra testing, we just abort if r4 / r5
+ are not available. */
+ if (fixed_regs[4] || fixed_regs[5]
+ || global_regs[4] || global_regs[5])
+ abort ();
+
+ adj_reg = gen_rtx_REG (GET_MODE (reg), 4);
+ tmp_reg = gen_rtx_REG (GET_MODE (reg), 5);
+ emit_move_insn (gen_rtx_MEM (Pmode, reg), adj_reg);
+ emit_insn (GEN_MOV (adj_reg, GEN_INT (size)));
+ emit_insn (GEN_ADD3 (adj_reg, adj_reg, reg));
+ mem = gen_rtx_MEM (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
+ emit_move_insn (mem, tmp_reg);
+ emit_move_insn (tmp_reg, gen_rtx_MEM (Pmode, reg));
+ mem = gen_rtx_MEM (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
+ emit_move_insn (mem, tmp_reg);
+ emit_move_insn (reg, adj_reg);
+ mem = gen_rtx_MEM (Pmode, gen_rtx_POST_INC (Pmode, reg));
+ emit_move_insn (adj_reg, mem);
+ mem = gen_rtx_MEM (Pmode, gen_rtx_POST_INC (Pmode, reg));
+ emit_move_insn (tmp_reg, mem);
+ return;
+ }
+ else
+ abort ();
+ }
const_reg = gen_rtx_REG (GET_MODE (reg), temp);
/* If SIZE is negative, subtract the positive value.
x = gen_push_fpul ();
else if (rn == FPSCR_REG)
x = gen_push_fpscr ();
- else if (TARGET_SH4 && TARGET_FMOVD && ! TARGET_FPU_SINGLE
+ else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
&& FP_OR_XD_REGISTER_P (rn))
{
if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
x = gen_pop_fpul ();
else if (rn == FPSCR_REG)
x = gen_pop_fpscr ();
- else if (TARGET_SH4 && TARGET_FMOVD && ! TARGET_FPU_SINGLE
+ else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
&& FP_OR_XD_REGISTER_P (rn))
{
if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
x = gen_pop_e (gen_rtx_REG (SFmode, rn));
else
x = gen_pop (gen_rtx_REG (SImode, rn));
-
+
x = emit_insn (x);
REG_NOTES (x)
= gen_rtx_EXPR_LIST (REG_INC,
HARD_REG_SET unsaved;
push (FPSCR_REG);
- COMPL_HARD_REG_SET(unsaved, *mask);
+ COMPL_HARD_REG_SET (unsaved, *mask);
fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
skip_fpscr = 1;
}
int interrupt_handler = sh_cfun_interrupt_handler_p ();
for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
- if ((! call_used_regs[reg] || interrupt_handler)
+ if ((! call_really_used_regs[reg] || interrupt_handler)
&& ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
/* Leave space to save this target register on the stack,
in case target register allocation wants to use it. */
stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
return stack_space;
}
-
+
/* Decide whether we should reserve space for callee-save target registers,
in case target register allocation wants to use them. REGS_SAVED is
the space, in bytes, that is already required for register saves.
static int
calc_live_regs (HARD_REG_SET *live_regs_mask)
{
- int reg;
+ unsigned int reg;
int count;
int interrupt_handler;
int pr_live, has_call;
interrupt_handler = sh_cfun_interrupt_handler_p ();
CLEAR_HARD_REG_SET (*live_regs_mask);
- if (TARGET_SH4 && TARGET_FMOVD && interrupt_handler
+ if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && interrupt_handler
&& regs_ever_live[FPSCR_REG])
target_flags &= ~FPU_SINGLE_BIT;
/* If we can save a lot of saves by switching to double mode, do that. */
- else if (TARGET_SH4 && TARGET_FMOVD && TARGET_FPU_SINGLE)
+ else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && TARGET_FPU_SINGLE)
for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
if (regs_ever_live[reg] && regs_ever_live[reg+1]
- && (! call_used_regs[reg] || (interrupt_handler && ! pragma_trapa))
+ && (! call_really_used_regs[reg]
+ || (interrupt_handler && ! pragma_trapa))
&& ++count > 2)
{
target_flags &= ~FPU_SINGLE_BIT;
|| current_function_has_nonlocal_label))
pr_live = 1;
has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
- for (count = 0, reg = FIRST_PSEUDO_REGISTER - 1; reg >= 0; reg--)
+ for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
{
if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
? pr_live
: (interrupt_handler && ! pragma_trapa)
? (/* Need to save all the regs ever live. */
(regs_ever_live[reg]
- || (call_used_regs[reg]
- && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG)
+ || (call_really_used_regs[reg]
+ && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
+ || reg == PIC_OFFSET_TABLE_REGNUM)
&& has_call)
|| (has_call && REGISTER_NATURAL_MODE (reg) == SImode
&& (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
(TARGET_SHCOMPACT
&& flag_pic
&& current_function_args_info.call_cookie
- && reg == (int) PIC_OFFSET_TABLE_REGNUM)
- || (regs_ever_live[reg] && ! call_used_regs[reg])
+ && reg == PIC_OFFSET_TABLE_REGNUM)
+ || (regs_ever_live[reg] && ! call_really_used_regs[reg])
|| (current_function_calls_eh_return
- && (reg == (int) EH_RETURN_DATA_REGNO (0)
- || reg == (int) EH_RETURN_DATA_REGNO (1)
- || reg == (int) EH_RETURN_DATA_REGNO (2)
- || reg == (int) EH_RETURN_DATA_REGNO (3)))
+ && (reg == EH_RETURN_DATA_REGNO (0)
+ || reg == EH_RETURN_DATA_REGNO (1)
+ || reg == EH_RETURN_DATA_REGNO (2)
+ || reg == EH_RETURN_DATA_REGNO (3)))
|| ((reg == MACL_REG || reg == MACH_REG)
&& regs_ever_live[reg]
&& sh_cfun_attr_renesas_p ())
SET_HARD_REG_BIT (*live_regs_mask, reg);
count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
- if ((TARGET_SH4 || TARGET_SH5) && TARGET_FMOVD
+ if ((TARGET_SH4 || TARGET_SH2A_DOUBLE || TARGET_SH5) && TARGET_FMOVD
&& GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
{
if (FP_REGISTER_P (reg))
&& TARGET_SAVE_ALL_TARGET_REGS
&& shmedia_space_reserved_for_target_registers)
for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
- if ((! call_used_regs[reg] || interrupt_handler)
+ if ((! call_really_used_regs[reg] || interrupt_handler)
&& ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
{
SET_HARD_REG_BIT (*live_regs_mask, reg);
tr0_used = flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM];
for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
- if (call_used_regs[regno] && ! regs_ever_live[regno])
+ if (call_really_used_regs[regno] && ! regs_ever_live[regno])
return regno;
return -1;
use reverse order. Returns the last entry written to (not counting
the delimiter). OFFSET_BASE is a number to be added to all offset
entries. */
-
+
static save_entry *
sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
int offset_base)
if (! current_function_interrupt)
for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
- if (call_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
+ if (call_really_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
&& ! FUNCTION_ARG_REGNO_P (i)
&& i != FIRST_RET_REG
- && ! (current_function_needs_context && i == STATIC_CHAIN_REGNUM)
+ && ! (cfun->static_chain_decl != NULL && i == STATIC_CHAIN_REGNUM)
&& ! (current_function_calls_eh_return
&& (i == EH_RETURN_STACKADJ_REGNO
- || ((unsigned)i <= EH_RETURN_DATA_REGNO (0)
- && (unsigned)i >= EH_RETURN_DATA_REGNO (3)))))
+ || ((unsigned) i >= EH_RETURN_DATA_REGNO (0)
+ && (unsigned) i <= EH_RETURN_DATA_REGNO (3)))))
schedule->temps[tmpx++] = i;
entry->reg = -1;
entry->mode = VOIDmode;
int d, i;
int d_rounding = 0;
int save_flags = target_flags;
+ int pretend_args;
current_function_interrupt = sh_cfun_interrupt_handler_p ();
/* We have pretend args if we had an object sent partially in registers
and partially on the stack, e.g. a large structure. */
- output_stack_adjust (-current_function_pretend_args_size
+ pretend_args = current_function_pretend_args_size;
+ if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
+ && (NPARM_REGS(SImode)
+ > current_function_args_info.arg_count[(int) SH_ARG_INT]))
+ pretend_args = 0;
+ output_stack_adjust (-pretend_args
- current_function_args_info.stack_regs * 8,
stack_pointer_rtx, 0, NULL);
- extra_push = 0;
-
if (TARGET_SHCOMPACT && flag_pic && current_function_args_info.call_cookie)
/* We're going to use the PIC register to load the address of the
incoming-argument decoder and/or of the return trampoline from
/* Emit the code for SETUP_VARARGS. */
if (current_function_stdarg)
{
- /* This is not used by the SH2E calling convention */
- if (TARGET_SH1 && ! TARGET_SH2E && ! TARGET_SH5
- && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
+ if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
{
/* Push arg regs as if they'd been provided by caller in stack. */
for (i = 0; i < NPARM_REGS(SImode); i++)
int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
rtx insn;
- if (i >= (NPARM_REGS(SImode)
+ if (i >= (NPARM_REGS(SImode)
- current_function_args_info.arg_count[(int) SH_ARG_INT]
))
break;
insn = push (rn);
RTX_FRAME_RELATED_P (insn) = 0;
- extra_push += 4;
}
}
}
that already happens to be at the function start into the prologue. */
if (target_flags != save_flags && ! current_function_interrupt)
emit_insn (gen_toggle_sz ());
-
+
if (TARGET_SH5)
{
int offset_base, offset;
save_entry *entry;
int *tmp_pnt;
- if (call_used_regs[R0_REG] && ! fixed_regs[R0_REG]
+ if (call_really_used_regs[R0_REG] && ! fixed_regs[R0_REG]
&& ! current_function_interrupt)
r0 = gen_rtx_REG (Pmode, R0_REG);
for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
{
enum machine_mode mode = entry->mode;
- int reg = entry->reg;
+ unsigned int reg = entry->reg;
rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
+ rtx orig_reg_rtx;
offset = entry->offset;
GEN_INT (offset - offset_in_r0)));
offset_in_r0 += offset - offset_in_r0;
}
-
+
if (pre_dec != NULL_RTX)
{
if (! sp_in_r0)
abort ();
addr_ok:
+ orig_reg_rtx = reg_rtx;
if (TARGET_REGISTER_P (reg)
|| ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
&& mem_rtx != pre_dec))
/* Mark as interesting for dwarf cfi generator */
insn = emit_move_insn (mem_rtx, reg_rtx);
RTX_FRAME_RELATED_P (insn) = 1;
+ /* If we use an intermediate register for the save, we can't
+ describe this exactly in cfi as a copy of the to-be-saved
+ register into the temporary register and then the temporary
+ register on the stack, because the temporary register can
+ have a different natural size than the to-be-saved register.
+ Thus, we gloss over the intermediate copy and pretend we do
+ a direct save from the to-be-saved register. */
+ if (REGNO (reg_rtx) != reg)
+ {
+ rtx set, note_rtx;
- if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
+ set = gen_rtx_SET (VOIDmode, mem_rtx, orig_reg_rtx);
+ note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
+ REG_NOTES (insn));
+ REG_NOTES (insn) = note_rtx;
+ }
+
+ if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
{
rtx reg_rtx = gen_rtx_REG (mode, reg);
rtx set, note_rtx;
}
void
-sh_expand_epilogue (void)
+sh_expand_epilogue (bool sibcall_p)
{
HARD_REG_SET live_regs_mask;
int d, i;
int save_flags = target_flags;
int frame_size, save_size;
int fpscr_deferred = 0;
+ int e = sibcall_p ? -1 : 1;
d = calc_live_regs (&live_regs_mask);
if (frame_pointer_needed)
{
- output_stack_adjust (frame_size, frame_pointer_rtx, 1, &live_regs_mask);
+ /* We must avoid scheduling the epilogue with previous basic blocks
+ when exception handling is enabled. See PR/18032. */
+ if (flag_exceptions)
+ emit_insn (gen_blockage ());
+ output_stack_adjust (frame_size, frame_pointer_rtx, e, &live_regs_mask);
/* We must avoid moving the stack pointer adjustment past code
which reads from the local frame, else an interrupt could
occur after the SP adjustment and clobber data in the local
frame. */
emit_insn (gen_blockage ());
- output_stack_adjust (frame_size, stack_pointer_rtx, 1, &live_regs_mask);
+ output_stack_adjust (frame_size, stack_pointer_rtx, e, &live_regs_mask);
}
if (SHMEDIA_REGS_STACK_ADJUST ())
post_inc = NULL_RTX;
break;
-
+
post_inc_ok:
mem_rtx = NULL_RTX;
}
while (0);
-
+
if (mem_rtx != NULL_RTX)
goto addr_ok;
GEN_INT (offset - offset_in_r0)));
offset_in_r0 += offset - offset_in_r0;
}
-
+
if (post_inc != NULL_RTX)
{
if (! sp_in_r0)
(Pmode, r0, stack_pointer_rtx));
sp_in_r0 = 1;
}
-
+
mem_rtx = post_inc;
offset_in_r0 += GET_MODE_SIZE (mode);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
int j = (FIRST_PSEUDO_REGISTER - 1) - i;
-
+
if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
&& hard_regs_intersect_p (&live_regs_mask,
®_class_contents[DF_REGS]))
emit_insn (gen_toggle_sz ());
target_flags = save_flags;
- output_stack_adjust (extra_push + current_function_pretend_args_size
+ output_stack_adjust (current_function_pretend_args_size
+ save_size + d_rounding
+ current_function_args_info.stack_regs * 8,
- stack_pointer_rtx, 1, NULL);
+ stack_pointer_rtx, e, NULL);
if (current_function_calls_eh_return)
emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
rtx epilogue;
start_sequence ();
- sh_expand_epilogue ();
+ sh_expand_epilogue (0);
epilogue = get_insns ();
end_sequence ();
sh_need_epilogue_known = (epilogue == NULL ? -1 : 1);
int offset;
save_schedule schedule;
save_entry *entry;
-
+
entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
offset = entry[1].offset;
for (; entry->mode != VOIDmode; entry--)
if (TARGET_SHCOMPACT)
return const0_rtx;
}
-
+
if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
{
error ("__builtin_saveregs not supported by this subtarget");
emit_move_insn (fpregs, XEXP (regbuf, 0));
emit_insn (gen_addsi3 (fpregs, fpregs,
GEN_INT (n_floatregs * UNITS_PER_WORD)));
- if (TARGET_SH4)
+ if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
{
rtx mem;
for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
GEN_INT (-2 * UNITS_PER_WORD)));
mem = gen_rtx_MEM (DFmode, fpregs);
set_mem_alias_set (mem, alias_set);
- emit_move_insn (mem,
- gen_rtx (REG, DFmode, BASE_ARG_REG (DFmode) + regno));
+ emit_move_insn (mem,
+ gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
}
regno = first_floatreg;
if (regno & 1)
{
- emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (- UNITS_PER_WORD)));
+ emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
mem = gen_rtx_MEM (SFmode, fpregs);
set_mem_alias_set (mem, alias_set);
emit_move_insn (mem,
- gen_rtx (REG, SFmode, BASE_ARG_REG (SFmode) + regno
+ gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
- (TARGET_LITTLE_ENDIAN != 0)));
}
}
{
rtx mem;
- emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (- UNITS_PER_WORD)));
+ emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
mem = gen_rtx_MEM (SFmode, fpregs);
set_mem_alias_set (mem, alias_set);
emit_move_insn (mem,
f_next_fp_limit = TREE_CHAIN (f_next_fp);
f_next_stack = TREE_CHAIN (f_next_fp_limit);
- next_o = build (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o);
+ next_o = build (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
+ NULL_TREE);
next_o_limit = build (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
- valist, f_next_o_limit);
- next_fp = build (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp);
+ valist, f_next_o_limit, NULL_TREE);
+ next_fp = build (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
+ NULL_TREE);
next_fp_limit = build (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
- valist, f_next_fp_limit);
+ valist, f_next_fp_limit, NULL_TREE);
next_stack = build (COMPONENT_REF, TREE_TYPE (f_next_stack),
- valist, f_next_stack);
+ valist, f_next_stack, NULL_TREE);
/* Call __builtin_saveregs. */
u = make_tree (ptr_type_node, expand_builtin_saveregs ());
else
nfp = 0;
u = fold (build (PLUS_EXPR, ptr_type_node, u,
- build_int_2 (UNITS_PER_WORD * nfp, 0)));
+ build_int_cst (NULL_TREE, UNITS_PER_WORD * nfp)));
t = build (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
else
nint = 0;
u = fold (build (PLUS_EXPR, ptr_type_node, u,
- build_int_2 (UNITS_PER_WORD * nint, 0)));
+ build_int_cst (NULL_TREE, UNITS_PER_WORD * nint)));
t = build (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
/* Implement `va_arg'. */
-rtx
-sh_va_arg (tree valist, tree type)
+static tree
+sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
+ tree *post_p ATTRIBUTE_UNUSED)
{
HOST_WIDE_INT size, rsize;
tree tmp, pptr_type_node;
- rtx addr_rtx, r;
- rtx result_ptr, result = NULL_RTX;
- int pass_by_ref = MUST_PASS_IN_STACK (TYPE_MODE (type), type);
- rtx lab_over;
+ tree addr, lab_over = NULL, result = NULL;
+ int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
+
+ if (pass_by_ref)
+ type = build_pointer_type (type);
size = int_size_in_bytes (type);
rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
pptr_type_node = build_pointer_type (ptr_type_node);
- if (pass_by_ref)
- type = build_pointer_type (type);
-
if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
&& ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
{
tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
int pass_as_float;
- rtx lab_false;
+ tree lab_false;
f_next_o = TYPE_FIELDS (va_list_type_node);
f_next_o_limit = TREE_CHAIN (f_next_o);
f_next_fp_limit = TREE_CHAIN (f_next_fp);
f_next_stack = TREE_CHAIN (f_next_fp_limit);
- next_o = build (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o);
+ next_o = build (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
+ NULL_TREE);
next_o_limit = build (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
- valist, f_next_o_limit);
+ valist, f_next_o_limit, NULL_TREE);
next_fp = build (COMPONENT_REF, TREE_TYPE (f_next_fp),
- valist, f_next_fp);
+ valist, f_next_fp, NULL_TREE);
next_fp_limit = build (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
- valist, f_next_fp_limit);
+ valist, f_next_fp_limit, NULL_TREE);
next_stack = build (COMPONENT_REF, TREE_TYPE (f_next_stack),
- valist, f_next_stack);
+ valist, f_next_stack, NULL_TREE);
/* Structures with a single member with a distinct mode are passed
like their member. This is relevant if the latter has a REAL_TYPE
|| TREE_CODE (TREE_TYPE (TYPE_FIELDS (type))) == COMPLEX_TYPE)
&& TREE_CHAIN (TYPE_FIELDS (type)) == NULL_TREE)
type = TREE_TYPE (TYPE_FIELDS (type));
+
if (TARGET_SH4)
{
pass_as_float = ((TREE_CODE (type) == REAL_TYPE && size <= 8)
pass_as_float = (TREE_CODE (type) == REAL_TYPE && size == 4);
}
- addr_rtx = gen_reg_rtx (Pmode);
- lab_false = gen_label_rtx ();
- lab_over = gen_label_rtx ();
+ addr = create_tmp_var (pptr_type_node, NULL);
+ lab_false = create_artificial_label ();
+ lab_over = create_artificial_label ();
- tmp = make_tree (pptr_type_node, addr_rtx);
- valist = build1 (INDIRECT_REF, ptr_type_node, tmp);
+ valist = build1 (INDIRECT_REF, ptr_type_node, addr);
if (pass_as_float)
{
= current_function_args_info.arg_count[(int) SH_ARG_FLOAT];
int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
- emit_cmp_and_jump_insns (expand_expr (next_fp, NULL_RTX, Pmode,
- EXPAND_NORMAL),
- expand_expr (next_fp_limit, NULL_RTX,
- Pmode, EXPAND_NORMAL),
- GE, const1_rtx, Pmode, 1, lab_false);
+ tmp = build (GE_EXPR, boolean_type_node, next_fp, next_fp_limit);
+ tmp = build (COND_EXPR, void_type_node, tmp,
+ build (GOTO_EXPR, void_type_node, lab_false),
+ NULL);
+ gimplify_and_add (tmp, pre_p);
if (TYPE_ALIGN (type) > BITS_PER_WORD
|| (((TREE_CODE (type) == REAL_TYPE && size == 8) || size == 16)
&& (n_floatregs & 1)))
{
- tmp = build (BIT_AND_EXPR, ptr_type_node, next_fp,
- build_int_2 (UNITS_PER_WORD, 0));
+ tmp = fold_convert (ptr_type_node, size_int (UNITS_PER_WORD));
+ tmp = build (BIT_AND_EXPR, ptr_type_node, next_fp, tmp);
tmp = build (PLUS_EXPR, ptr_type_node, next_fp, tmp);
tmp = build (MODIFY_EXPR, ptr_type_node, next_fp, tmp);
- TREE_SIDE_EFFECTS (tmp) = 1;
- expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ gimplify_and_add (tmp, pre_p);
}
tmp = build1 (ADDR_EXPR, pptr_type_node, next_fp);
- r = expand_expr (tmp, addr_rtx, Pmode, EXPAND_NORMAL);
- if (r != addr_rtx)
- emit_move_insn (addr_rtx, r);
+ tmp = build (MODIFY_EXPR, void_type_node, addr, tmp);
+ gimplify_and_add (tmp, pre_p);
#ifdef FUNCTION_ARG_SCmode_WART
if (TYPE_MODE (type) == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
{
- rtx addr, real, imag, result_value, slot;
tree subtype = TREE_TYPE (type);
+ tree real, imag;
- addr = std_expand_builtin_va_arg (valist, subtype);
-#ifdef POINTERS_EXTEND_UNSIGNED
- if (GET_MODE (addr) != Pmode)
- addr = convert_memory_address (Pmode, addr);
-#endif
- imag = gen_rtx_MEM (TYPE_MODE (type), addr);
- set_mem_alias_set (imag, get_varargs_alias_set ());
+ imag = std_gimplify_va_arg_expr (valist, subtype, pre_p, NULL);
+ imag = get_initialized_tmp_var (imag, pre_p, NULL);
- addr = std_expand_builtin_va_arg (valist, subtype);
-#ifdef POINTERS_EXTEND_UNSIGNED
- if (GET_MODE (addr) != Pmode)
- addr = convert_memory_address (Pmode, addr);
-#endif
- real = gen_rtx_MEM (TYPE_MODE (type), addr);
- set_mem_alias_set (real, get_varargs_alias_set ());
-
- result_value = gen_rtx_CONCAT (SCmode, real, imag);
- /* ??? this interface is stupid - why require a pointer? */
- result = gen_reg_rtx (Pmode);
- slot = assign_stack_temp (SCmode, 8, 0);
- emit_move_insn (slot, result_value);
- emit_move_insn (result, XEXP (slot, 0));
+ real = std_gimplify_va_arg_expr (valist, subtype, pre_p, NULL);
+ real = get_initialized_tmp_var (real, pre_p, NULL);
+
+ result = build (COMPLEX_EXPR, type, real, imag);
+ result = get_initialized_tmp_var (result, pre_p, NULL);
}
#endif /* FUNCTION_ARG_SCmode_WART */
- emit_jump_insn (gen_jump (lab_over));
- emit_barrier ();
- emit_label (lab_false);
+ tmp = build (GOTO_EXPR, void_type_node, lab_over);
+ gimplify_and_add (tmp, pre_p);
+
+ tmp = build (LABEL_EXPR, void_type_node, lab_false);
+ gimplify_and_add (tmp, pre_p);
tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
- r = expand_expr (tmp, addr_rtx, Pmode, EXPAND_NORMAL);
- if (r != addr_rtx)
- emit_move_insn (addr_rtx, r);
+ tmp = build (MODIFY_EXPR, void_type_node, addr, tmp);
+ gimplify_and_add (tmp, pre_p);
}
else
{
- tmp = build (PLUS_EXPR, ptr_type_node, next_o,
- build_int_2 (rsize, 0));
-
- emit_cmp_and_jump_insns (expand_expr (tmp, NULL_RTX, Pmode,
- EXPAND_NORMAL),
- expand_expr (next_o_limit, NULL_RTX,
- Pmode, EXPAND_NORMAL),
- GT, const1_rtx, Pmode, 1, lab_false);
+ tmp = fold_convert (ptr_type_node, size_int (rsize));
+ tmp = build (PLUS_EXPR, ptr_type_node, next_o, tmp);
+ tmp = build (GT_EXPR, boolean_type_node, tmp, next_o_limit);
+ tmp = build (COND_EXPR, void_type_node, tmp,
+ build (GOTO_EXPR, void_type_node, lab_false),
+ NULL);
+ gimplify_and_add (tmp, pre_p);
tmp = build1 (ADDR_EXPR, pptr_type_node, next_o);
- r = expand_expr (tmp, addr_rtx, Pmode, EXPAND_NORMAL);
- if (r != addr_rtx)
- emit_move_insn (addr_rtx, r);
+ tmp = build (MODIFY_EXPR, void_type_node, addr, tmp);
+ gimplify_and_add (tmp, pre_p);
- emit_jump_insn (gen_jump (lab_over));
- emit_barrier ();
- emit_label (lab_false);
+ tmp = build (GOTO_EXPR, void_type_node, lab_over);
+ gimplify_and_add (tmp, pre_p);
+
+ tmp = build (LABEL_EXPR, void_type_node, lab_false);
+ gimplify_and_add (tmp, pre_p);
if (size > 4 && ! TARGET_SH4)
{
tmp = build (MODIFY_EXPR, ptr_type_node, next_o, next_o_limit);
- TREE_SIDE_EFFECTS (tmp) = 1;
- expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ gimplify_and_add (tmp, pre_p);
}
tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
- r = expand_expr (tmp, addr_rtx, Pmode, EXPAND_NORMAL);
- if (r != addr_rtx)
- emit_move_insn (addr_rtx, r);
+ tmp = build (MODIFY_EXPR, void_type_node, addr, tmp);
+ gimplify_and_add (tmp, pre_p);
}
- if (! result)
- emit_label (lab_over);
+ if (!result)
+ {
+ tmp = build (LABEL_EXPR, void_type_node, lab_over);
+ gimplify_and_add (tmp, pre_p);
+ }
}
/* ??? In va-sh.h, there had been code to make values larger than
size 8 indirect. This does not match the FUNCTION_ARG macros. */
- result_ptr = std_expand_builtin_va_arg (valist, type);
+ tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
if (result)
{
- emit_move_insn (result, result_ptr);
- emit_label (lab_over);
+ tmp = build (MODIFY_EXPR, void_type_node, result, tmp);
+ gimplify_and_add (tmp, pre_p);
+
+ tmp = build (LABEL_EXPR, void_type_node, lab_over);
+ gimplify_and_add (tmp, pre_p);
}
else
- result = result_ptr;
+ result = tmp;
if (pass_by_ref)
- {
-#ifdef POINTERS_EXTEND_UNSIGNED
- if (GET_MODE (addr) != Pmode)
- addr = convert_memory_address (Pmode, result);
-#endif
- result = gen_rtx_MEM (ptr_mode, force_reg (Pmode, result));
- set_mem_alias_set (result, get_varargs_alias_set ());
- }
- /* ??? expand_builtin_va_arg will also set the alias set of the dereferenced
- argument to the varargs alias set. */
+ result = build_fold_indirect_ref (result);
+
return result;
}
-static bool
+bool
sh_promote_prototypes (tree type)
{
if (TARGET_HITACHI)
return ! sh_attr_renesas_p (type);
}
+/* Whether an argument must be passed by reference. On SHcompact, we
+ pretend arguments wider than 32-bits that would have been passed in
+ registers are passed by reference, so that an SHmedia trampoline
+ loads them into the full 64-bits registers. */
+
+static int
+shcompact_byref (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, bool named)
+{
+ unsigned HOST_WIDE_INT size;
+
+ if (type)
+ size = int_size_in_bytes (type);
+ else
+ size = GET_MODE_SIZE (mode);
+
+ if (cum->arg_count[SH_ARG_INT] < NPARM_REGS (SImode)
+ && (!named
+ || GET_SH_ARG_CLASS (mode) == SH_ARG_INT
+ || (GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT
+ && cum->arg_count[SH_ARG_FLOAT] >= NPARM_REGS (SFmode)))
+ && size > 4
+ && !SHCOMPACT_FORCE_ON_STACK (mode, type)
+ && !SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
+ return size;
+ else
+ return 0;
+}
+
+static bool
+sh_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, bool named)
+{
+ if (targetm.calls.must_pass_in_stack (mode, type))
+ return true;
+
+ /* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
+ wants to know about pass-by-reference semantics for incoming
+ arguments. */
+ if (! cum)
+ return false;
+
+ if (TARGET_SHCOMPACT)
+ {
+ cum->byref = shcompact_byref (cum, mode, type, named);
+ return cum->byref != 0;
+ }
+
+ return false;
+}
+
+static bool
+sh_callee_copies (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, bool named ATTRIBUTE_UNUSED)
+{
+ /* ??? How can it possibly be correct to return true only on the
+ caller side of the equation? Is there someplace else in the
+ sh backend that's magically producing the copies? */
+ return (cum->outgoing
+ && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
+ % SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
+}
+
/* Define where to put the arguments to a function.
Value is zero to push the argument on the stack,
or a hard register in which to store the argument.
BASE_ARG_REG (mode)
+ (ROUND_REG (*ca, mode) ^ 1)),
const0_rtx);
- rtx r2 = gen_rtx_EXPR_LIST(VOIDmode,
- gen_rtx_REG (SFmode,
- BASE_ARG_REG (mode)
- + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
- GEN_INT (4));
+ rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SFmode,
+ BASE_ARG_REG (mode)
+ + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
+ GEN_INT (4));
return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
}
return gen_rtx_REG (mode, regno);
}
-
+
if (TARGET_SH5)
{
if (mode == VOIDmode && TARGET_SHCOMPACT)
return 0;
}
-
+
/* Update the data in CUM to advance over an argument
of mode MODE and data type TYPE.
(TYPE is null for libcalls where that information may not be
sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
tree type, int named)
{
- if (ca->force_mem)
- ca->force_mem = 0;
- else if (TARGET_SH5)
- {
- tree type2 = (ca->byref && type
- ? TREE_TYPE (type)
- : type);
- enum machine_mode mode2 = (ca->byref && type
- ? TYPE_MODE (type2)
- : mode);
- int dwords = ((ca->byref
- ? ca->byref
- : mode2 == BLKmode
- ? int_size_in_bytes (type2)
- : GET_MODE_SIZE (mode2)) + 7) / 8;
- int numregs = MIN (dwords, NPARM_REGS (SImode)
- - ca->arg_count[(int) SH_ARG_INT]);
-
- if (numregs)
- {
- ca->arg_count[(int) SH_ARG_INT] += numregs;
- if (TARGET_SHCOMPACT
- && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
- {
- ca->call_cookie
- |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
- - numregs, 1);
- /* N.B. We want this also for outgoing. */
- ca->stack_regs += numregs;
- }
- else if (ca->byref)
- {
- if (! ca->outgoing)
- ca->stack_regs += numregs;
- ca->byref_regs += numregs;
- ca->byref = 0;
- do
- ca->call_cookie
- |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
- - numregs, 2);
- while (--numregs);
- ca->call_cookie
- |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
- - 1, 1);
- }
- else if (dwords > numregs)
- {
- int pushregs = numregs;
-
- if (TARGET_SHCOMPACT)
- ca->stack_regs += numregs;
- while (pushregs < NPARM_REGS (SImode) - 1
- && (CALL_COOKIE_INT_REG_GET
- (ca->call_cookie,
- NPARM_REGS (SImode) - pushregs)
- == 1))
- {
- ca->call_cookie
- &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
- - pushregs, 1);
- pushregs++;
- }
- if (numregs == NPARM_REGS (SImode))
- ca->call_cookie
- |= CALL_COOKIE_INT_REG (0, 1)
- | CALL_COOKIE_STACKSEQ (numregs - 1);
- else
- ca->call_cookie
- |= CALL_COOKIE_STACKSEQ (numregs);
- }
- }
- if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
- && (named || ! ca->prototype_p))
- {
- if (mode2 == SFmode && ca->free_single_fp_reg)
- ca->free_single_fp_reg = 0;
- else if (ca->arg_count[(int) SH_ARG_FLOAT]
- < NPARM_REGS (SFmode))
- {
- int numfpregs
- = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
- NPARM_REGS (SFmode)
- - ca->arg_count[(int) SH_ARG_FLOAT]);
-
- ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
-
- if (TARGET_SHCOMPACT && ! ca->prototype_p)
- {
- if (ca->outgoing && numregs > 0)
- do
- {
- ca->call_cookie
- |= (CALL_COOKIE_INT_REG
- (ca->arg_count[(int) SH_ARG_INT]
- - numregs + ((numfpregs - 2) / 2),
- 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
- - numfpregs) / 2));
- }
- while (numfpregs -= 2);
- }
- else if (mode2 == SFmode && (named)
- && (ca->arg_count[(int) SH_ARG_FLOAT]
- < NPARM_REGS (SFmode)))
- ca->free_single_fp_reg
- = FIRST_FP_PARM_REG - numfpregs
- + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
- }
- }
- return;
- }
-
- if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
- {
- /* Note that we've used the skipped register. */
- if (mode == SFmode && ca->free_single_fp_reg)
- {
- ca->free_single_fp_reg = 0;
- return;
- }
- /* When we have a DF after an SF, there's an SF register that get
- skipped in order to align the DF value. We note this skipped
- register, because the next SF value will use it, and not the
- SF that follows the DF. */
- if (mode == DFmode
- && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
- {
- ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
- + BASE_ARG_REG (mode));
- }
- }
-
- if (! (TARGET_SH4 || ca->renesas_abi)
- || PASS_IN_REG_P (*ca, mode, type))
- (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
- = (ROUND_REG (*ca, mode)
- + (mode == BLKmode
- ? ROUND_ADVANCE (int_size_in_bytes (type))
- : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
-}
-
-/* If the structure value address is not passed in a register, define
- `STRUCT_VALUE' as an expression returning an RTX for the place
- where the address is passed. If it returns 0, the address is
- passed as an "invisible" first argument. */
+ if (ca->force_mem)
+ ca->force_mem = 0;
+ else if (TARGET_SH5)
+ {
+ tree type2 = (ca->byref && type
+ ? TREE_TYPE (type)
+ : type);
+ enum machine_mode mode2 = (ca->byref && type
+ ? TYPE_MODE (type2)
+ : mode);
+ int dwords = ((ca->byref
+ ? ca->byref
+ : mode2 == BLKmode
+ ? int_size_in_bytes (type2)
+ : GET_MODE_SIZE (mode2)) + 7) / 8;
+ int numregs = MIN (dwords, NPARM_REGS (SImode)
+ - ca->arg_count[(int) SH_ARG_INT]);
+
+ if (numregs)
+ {
+ ca->arg_count[(int) SH_ARG_INT] += numregs;
+ if (TARGET_SHCOMPACT
+ && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
+ {
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
+ - numregs, 1);
+ /* N.B. We want this also for outgoing. */
+ ca->stack_regs += numregs;
+ }
+ else if (ca->byref)
+ {
+ if (! ca->outgoing)
+ ca->stack_regs += numregs;
+ ca->byref_regs += numregs;
+ ca->byref = 0;
+ do
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
+ - numregs, 2);
+ while (--numregs);
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
+ - 1, 1);
+ }
+ else if (dwords > numregs)
+ {
+ int pushregs = numregs;
+
+ if (TARGET_SHCOMPACT)
+ ca->stack_regs += numregs;
+ while (pushregs < NPARM_REGS (SImode) - 1
+ && (CALL_COOKIE_INT_REG_GET
+ (ca->call_cookie,
+ NPARM_REGS (SImode) - pushregs)
+ == 1))
+ {
+ ca->call_cookie
+ &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
+ - pushregs, 1);
+ pushregs++;
+ }
+ if (numregs == NPARM_REGS (SImode))
+ ca->call_cookie
+ |= CALL_COOKIE_INT_REG (0, 1)
+ | CALL_COOKIE_STACKSEQ (numregs - 1);
+ else
+ ca->call_cookie
+ |= CALL_COOKIE_STACKSEQ (numregs);
+ }
+ }
+ if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
+ && (named || ! ca->prototype_p))
+ {
+ if (mode2 == SFmode && ca->free_single_fp_reg)
+ ca->free_single_fp_reg = 0;
+ else if (ca->arg_count[(int) SH_ARG_FLOAT]
+ < NPARM_REGS (SFmode))
+ {
+ int numfpregs
+ = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
+ NPARM_REGS (SFmode)
+ - ca->arg_count[(int) SH_ARG_FLOAT]);
+
+ ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
+
+ if (TARGET_SHCOMPACT && ! ca->prototype_p)
+ {
+ if (ca->outgoing && numregs > 0)
+ do
+ {
+ ca->call_cookie
+ |= (CALL_COOKIE_INT_REG
+ (ca->arg_count[(int) SH_ARG_INT]
+ - numregs + ((numfpregs - 2) / 2),
+ 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
+ - numfpregs) / 2));
+ }
+ while (numfpregs -= 2);
+ }
+ else if (mode2 == SFmode && (named)
+ && (ca->arg_count[(int) SH_ARG_FLOAT]
+ < NPARM_REGS (SFmode)))
+ ca->free_single_fp_reg
+ = FIRST_FP_PARM_REG - numfpregs
+ + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
+ }
+ }
+ return;
+ }
+
+ if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
+ {
+ /* Note that we've used the skipped register. */
+ if (mode == SFmode && ca->free_single_fp_reg)
+ {
+ ca->free_single_fp_reg = 0;
+ return;
+ }
+ /* When we have a DF after an SF, there's an SF register that get
+ skipped in order to align the DF value. We note this skipped
+ register, because the next SF value will use it, and not the
+ SF that follows the DF. */
+ if (mode == DFmode
+ && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
+ {
+ ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
+ + BASE_ARG_REG (mode));
+ }
+ }
+
+ if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
+ || PASS_IN_REG_P (*ca, mode, type))
+ (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
+ = (ROUND_REG (*ca, mode)
+ + (mode == BLKmode
+ ? ROUND_ADVANCE (int_size_in_bytes (type))
+ : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
+}
+
/* The Renesas calling convention doesn't quite fit into this scheme since
the address is passed like an invisible argument, but one that is always
passed in memory. */
return gen_rtx_REG (Pmode, 2);
}
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+
static bool
sh_return_in_memory (tree type, tree fndecl)
{
later. Fortunately, we already have two flags that are part of struct
function that tell if a function uses varargs or stdarg. */
static void
-sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
- tree type ATTRIBUTE_UNUSED,
- int *pretend_arg_size ATTRIBUTE_UNUSED,
+sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
+ enum machine_mode mode,
+ tree type,
+ int *pretend_arg_size,
int second_time ATTRIBUTE_UNUSED)
{
if (! current_function_stdarg)
abort ();
+ if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
+ {
+ int named_parm_regs, anon_parm_regs;
+
+ named_parm_regs = (ROUND_REG (*ca, mode)
+ + (mode == BLKmode
+ ? ROUND_ADVANCE (int_size_in_bytes (type))
+ : ROUND_ADVANCE (GET_MODE_SIZE (mode))));
+ anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
+ if (anon_parm_regs > 0)
+ *pretend_arg_size = anon_parm_regs * 4;
+ }
}
static bool
int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
save_schedule schedule;
save_entry *entry;
-
+
n += total_auto_space;
/* If it wasn't saved, there's not much we can do. */
return;
/* We are only interested in fields. */
- if (TREE_CODE_CLASS (TREE_CODE (node)) != 'd')
+ if (!DECL_P (node))
return;
/* Add a 'handle_interrupt' attribute. */
{ "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
{ "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
{ "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
+#ifdef SYMBIAN
+ /* Symbian support adds three new attributes:
+ dllexport - for exporting a function/variable that will live in a dll
+ dllimport - for importing a function/variable from a dll
+
+ Microsoft allows multiple declspecs in one __declspec, separating
+ them with spaces. We do NOT support this. Instead, use __declspec
+ multiple times. */
+ { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
+ { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
+#endif
{ NULL, 0, 0, false, false, false, NULL }
};
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
- warning ("`%s' attribute only applies to functions",
+ warning ("%qs attribute only applies to functions",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
- warning ("`%s' attribute only applies to functions",
+ warning ("%qs attribute only applies to functions",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
else if (!pragma_interrupt)
{
/* The sp_switch attribute only has meaning for interrupt functions. */
- warning ("`%s' attribute only applies to interrupt functions",
+ warning ("%qs attribute only applies to interrupt functions",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
{
/* The argument must be a constant string. */
- warning ("`%s' attribute argument not a string constant",
+ warning ("%qs attribute argument not a string constant",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
else
{
- sp_switch = gen_rtx_SYMBOL_REF (VOIDmode,
- TREE_STRING_POINTER (TREE_VALUE (args)));
+ const char *s = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (args)));
+ sp_switch = gen_rtx_SYMBOL_REF (VOIDmode, s);
}
return NULL_TREE;
{
if (TREE_CODE (*node) != FUNCTION_DECL)
{
- warning ("`%s' attribute only applies to functions",
+ warning ("%qs attribute only applies to functions",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
else if (!pragma_interrupt)
{
/* The trap_exit attribute only has meaning for interrupt functions. */
- warning ("`%s' attribute only applies to interrupt functions",
+ warning ("%qs attribute only applies to interrupt functions",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
{
/* The argument must be a constant integer. */
- warning ("`%s' attribute argument not an integer constant",
+ warning ("%qs attribute argument not an integer constant",
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
int flag_mask
= (SH1_BIT | SH2_BIT | SH3_BIT | SH_E_BIT | HARD_SH4_BIT | FPU_SINGLE_BIT
| SH4_BIT | HITACHI_BIT | LITTLE_ENDIAN_BIT);
-
+
/* -fpic and -fpie also usually make a PCH invalid. */
if (data[0] != flag_pic)
return _("created and used with different settings of -fpic");
}
data += sizeof (target_flags);
len -= sizeof (target_flags);
-
+
/* Check string options. */
#ifdef TARGET_OPTIONS
for (i = 0; i < ARRAY_SIZE (target_options); i++)
#endif
return NULL;
-
+
make_message:
{
char *r;
- asprintf (&r, _("created and used with differing settings of `-m%s'"),
+ asprintf (&r, _("created and used with differing settings of '-m%s'"),
flag_that_differs);
if (r == NULL)
return _("out of memory");
&& mode == DImode
&& GET_CODE (op) == CONST_INT
&& CONST_OK_FOR_J16 (INTVAL (op)))
- return 1;
+ return 1;
return 0;
}
int
fpscr_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
- return (GET_CODE (op) == REG && REGNO (op) == FPSCR_REG
+ return (GET_CODE (op) == REG
+ && (REGNO (op) == FPSCR_REG
+ || (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ && !(reload_in_progress || reload_completed)))
&& GET_MODE (op) == PSImode);
}
&& (GET_CODE (op) == EQ || GET_CODE (op) == NE));
}
-int greater_comparison_operator (rtx op, enum machine_mode mode)
+int
+greater_comparison_operator (rtx op, enum machine_mode mode)
{
if (mode != VOIDmode && GET_MODE (op) == mode)
return 0;
}
}
-int less_comparison_operator (rtx op, enum machine_mode mode)
+int
+less_comparison_operator (rtx op, enum machine_mode mode)
{
if (mode != VOIDmode && GET_MODE (op) == mode)
return 0;
if (GET_CODE (op) != CONST_INT)
return 0;
i = INTVAL (op);
- return i >= 1*8 && i <= 7*8 && (i & 7) == 0;
+ return i >= 1 * 8 && i <= 7 * 8 && (i & 7) == 0;
}
int
if (GET_MODE_UNIT_SIZE (mode) == 1)
{
y = XVECEXP (v, 0, i);
- for (i -= 2 ; i >= 0; i -= 2)
+ for (i -= 2; i >= 0; i -= 2)
if (! rtx_equal_p (XVECEXP (v, 0, i + 1), x)
|| ! rtx_equal_p (XVECEXP (v, 0, i), y))
return 0;
}
\f
/* Return the destination address of a branch. */
-
+
static int
branch_dest (rtx branch)
{
while ((insn = NEXT_INSN (insn)))
{
+ rtx set;
+ if (!INSN_P (insn))
+ continue;
+
code = GET_CODE (insn);
#if 0
else if (code == JUMP_INSN)
return 0;
}
- else if (GET_RTX_CLASS (code) == 'i')
- {
- rtx set = single_set (insn);
- if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
- return 0;
- if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
- return GET_CODE (SET_DEST (set)) != MEM;
- if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
- return 0;
- }
+ set = single_set (insn);
+ if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
+ return 0;
+ if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ return GET_CODE (SET_DEST (set)) != MEM;
+ if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ return 0;
- if (code == CALL_INSN && call_used_regs[REGNO (reg)])
+ if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
return 1;
}
return 1;
{
if (! fpscr_rtx)
{
- fpscr_rtx = gen_rtx (REG, PSImode, FPSCR_REG);
+ fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
REG_USERVAR_P (fpscr_rtx) = 1;
mark_user_reg (fpscr_rtx);
}
expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
{
emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
- get_fpscr_rtx ()));
+ get_fpscr_rtx ()));
}
\f
/* ??? gcc does flow analysis strictly after common subexpression
remove assignments that are dead due to a following assignment in the
same basic block. */
-static void
+static void
mark_use (rtx x, rtx *reg_set_block)
{
enum rtx_code code;
abort ();
}
-/* This function will set the fpscr from memory.
+/* This function will set the fpscr from memory.
MODE is the mode we are setting it to. */
void
fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
return 2;
/* sh-dsp parallel processing insn take four bytes instead of two. */
-
+
if (GET_CODE (insn) == INSN)
{
int sum = 0;
|| XINT (x, 1) == UNSPEC_GOTTPOFF
|| XINT (x, 1) == UNSPEC_DTPOFF
|| XINT (x, 1) == UNSPEC_PLT))
- return 0;
+ return 0;
fmt = GET_RTX_FORMAT (GET_CODE (x));
for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
/* Return true if it's possible to redirect BRANCH1 to the destination
of an unconditional jump BRANCH2. We only want to do this if the
resulting branch will have a short displacement. */
-int
+int
sh_can_redirect_branch (rtx branch1, rtx branch2)
{
if (flag_expensive_optimizations && simplejump_p (branch2))
rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
rtx insn;
int distance;
-
- for (distance = 0, insn = NEXT_INSN (branch1);
- insn && distance < 256;
+
+ for (distance = 0, insn = NEXT_INSN (branch1);
+ insn && distance < 256;
insn = PREV_INSN (insn))
{
- if (insn == dest)
+ if (insn == dest)
return 1;
else
distance += get_attr_length (insn);
}
- for (distance = 0, insn = NEXT_INSN (branch1);
- insn && distance < 256;
+ for (distance = 0, insn = NEXT_INSN (branch1);
+ insn && distance < 256;
insn = NEXT_INSN (insn))
{
- if (insn == dest)
+ if (insn == dest)
return 1;
else
distance += get_attr_length (insn);
sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
unsigned int new_reg)
{
-
-/* Interrupt functions can only use registers that have already been
- saved by the prologue, even if they would normally be
- call-clobbered. */
+ /* Interrupt functions can only use registers that have already been
+ saved by the prologue, even if they would normally be
+ call-clobbered. */
if (sh_cfun_interrupt_handler_p () && !regs_ever_live[new_reg])
- return 0;
+ return 0;
- return 1;
+ return 1;
}
/* Function to update the integer COST
&& get_attr_type (insn) == TYPE_DYN_SHIFT
&& get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
&& reg_overlap_mentioned_p (SET_DEST (PATTERN (dep_insn)),
- XEXP (SET_SRC (single_set(insn)),
+ XEXP (SET_SRC (single_set (insn)),
1)))
cost++;
/* When an LS group instruction with a latency of less than
return REG_N_SETS (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
}
-/* This Function returns nonzero if the DFA based scheduler interface
- is to be used. At present this is supported for the SH4 only. */
+/* This function returns "2" to indicate dual issue for the SH4
+ processor. To be used by the DFA pipeline description. */
static int
-sh_use_dfa_interface(void)
+sh_issue_rate (void)
{
- if (TARGET_HARD_SH4)
- return 1;
+ if (TARGET_SUPERSCALAR)
+ return 2;
else
- return 0;
+ return 1;
}
-/* This function returns "2" to indicate dual issue for the SH4
- processor. To be used by the DFA pipeline description. */
+/* Functions for ready queue reordering for sched1. */
+
+/* Get weight for mode for a set x. */
+static short
+find_set_regmode_weight (rtx x, enum machine_mode mode)
+{
+ if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
+ return 1;
+ if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
+ {
+ if (GET_CODE (SET_DEST (x)) == REG)
+ {
+ if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
+ return 1;
+ else
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/* Get regmode weight for insn. */
+static short
+find_insn_regmode_weight (rtx insn, enum machine_mode mode)
+{
+ short reg_weight = 0;
+ rtx x;
+
+ /* Increment weight for each register born here. */
+ x = PATTERN (insn);
+ reg_weight += find_set_regmode_weight (x, mode);
+ if (GET_CODE (x) == PARALLEL)
+ {
+ int j;
+ for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
+ {
+ x = XVECEXP (PATTERN (insn), 0, j);
+ reg_weight += find_set_regmode_weight (x, mode);
+ }
+ }
+ /* Decrement weight for each register that dies here. */
+ for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
+ {
+ if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
+ {
+ rtx note = XEXP (x, 0);
+ if (GET_CODE (note) == REG && GET_MODE (note) == mode)
+ reg_weight--;
+ }
+ }
+ return reg_weight;
+}
+
+/* Calculate regmode weights for all insns of a basic block. */
+static void
+find_regmode_weight (int b, enum machine_mode mode)
+{
+ rtx insn, next_tail, head, tail;
+
+ get_block_head_tail (b, &head, &tail);
+ next_tail = NEXT_INSN (tail);
+
+ for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
+ {
+ /* Handle register life information. */
+ if (!INSN_P (insn))
+ continue;
+
+ if (mode == SFmode)
+ INSN_REGMODE_WEIGHT (insn, mode) =
+ find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DFmode);
+ else if (mode == SImode)
+ INSN_REGMODE_WEIGHT (insn, mode) =
+ find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DImode);
+ }
+}
+
+/* Comparison function for ready queue sorting. */
static int
-sh_issue_rate(void)
+rank_for_reorder (const void *x, const void *y)
{
- if (TARGET_SUPERSCALAR)
- return 2;
+ rtx tmp = *(const rtx *) y;
+ rtx tmp2 = *(const rtx *) x;
+
+ /* The insn in a schedule group should be issued the first. */
+ if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
+ return SCHED_GROUP_P (tmp2) ? 1 : -1;
+
+ /* If insns are equally good, sort by INSN_LUID (original insn order), This
+ minimizes instruction movement, thus minimizing sched's effect on
+ register pressure. */
+ return INSN_LUID (tmp) - INSN_LUID (tmp2);
+}
+
+/* Resort the array A in which only element at index N may be out of order. */
+static void
+swap_reorder (rtx *a, int n)
+{
+ rtx insn = a[n - 1];
+ int i = n - 2;
+
+ while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
+ {
+ a[i + 1] = a[i];
+ i -= 1;
+ }
+ a[i + 1] = insn;
+}
+
+#define SCHED_REORDER(READY, N_READY) \
+ do \
+ { \
+ if ((N_READY) == 2) \
+ swap_reorder (READY, N_READY); \
+ else if ((N_READY) > 2) \
+ qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
+ } \
+ while (0)
+
+/* Sort the ready list READY by ascending priority, using the SCHED_REORDER
+ macro. */
+static void
+ready_reorder (rtx *ready, int nready)
+{
+ SCHED_REORDER (ready, nready);
+}
+
+/* Calculate regmode weights for all insns of all basic block. */
+static void
+sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
+ int old_max_uid)
+{
+ basic_block b;
+
+ regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
+ regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
+
+ FOR_EACH_BB_REVERSE (b)
+ {
+ find_regmode_weight (b->index, SImode);
+ find_regmode_weight (b->index, SFmode);
+ }
+
+ CURR_REGMODE_PRESSURE (SImode) = 0;
+ CURR_REGMODE_PRESSURE (SFmode) = 0;
+
+}
+
+/* Cleanup. */
+static void
+sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED)
+{
+ if (regmode_weight[0])
+ {
+ free (regmode_weight[0]);
+ regmode_weight[0] = NULL;
+ }
+ if (regmode_weight[1])
+ {
+ free (regmode_weight[1]);
+ regmode_weight[1] = NULL;
+ }
+}
+
+/* Cache the can_issue_more so that we can return it from reorder2. Also,
+ keep count of register pressures on SImode and SFmode. */
+static int
+sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx insn,
+ int can_issue_more)
+{
+ if (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ cached_can_issue_more = can_issue_more - 1;
else
+ cached_can_issue_more = can_issue_more;
+
+ if (reload_completed)
+ return cached_can_issue_more;
+
+ CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
+ CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
+
+ return cached_can_issue_more;
+}
+
+static void
+sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
+ int veclen ATTRIBUTE_UNUSED)
+{
+ CURR_REGMODE_PRESSURE (SImode) = 0;
+ CURR_REGMODE_PRESSURE (SFmode) = 0;
+}
+
+/* Some magic numbers. */
+/* Pressure on register r0 can lead to spill failures. so avoid sched1 for
+ functions that already have high pressure on r0. */
+#define R0_MAX_LIFE_REGIONS 2
+#define R0_MAX_LIVE_LENGTH 12
+/* Register Pressure thresholds for SImode and SFmode registers. */
+#define SIMODE_MAX_WEIGHT 5
+#define SFMODE_MAX_WEIGHT 10
+
+/* Return true if the pressure is high for MODE. */
+static short
+high_pressure (enum machine_mode mode)
+{
+ /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
+ functions that already have high pressure on r0. */
+ if ((REG_N_SETS (0) - REG_N_DEATHS (0)) >= R0_MAX_LIFE_REGIONS
+ && REG_LIVE_LENGTH (0) >= R0_MAX_LIVE_LENGTH)
return 1;
+
+ if (mode == SFmode)
+ return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
+ else
+ return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
+}
+
+/* Reorder ready queue if register pressure is high. */
+static int
+sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx *ready,
+ int *n_readyp,
+ int clock_var ATTRIBUTE_UNUSED)
+{
+ if (reload_completed)
+ return sh_issue_rate ();
+
+ if (high_pressure (SFmode) || high_pressure (SImode))
+ {
+ ready_reorder (ready, *n_readyp);
+ }
+
+ return sh_issue_rate ();
+}
+
+/* Skip cycles if the current register pressure is high. */
+static int
+sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx *ready ATTRIBUTE_UNUSED,
+ int *n_readyp ATTRIBUTE_UNUSED,
+ int clock_var ATTRIBUTE_UNUSED)
+{
+ if (reload_completed)
+ return cached_can_issue_more;
+
+ if (high_pressure(SFmode) || high_pressure (SImode))
+ skip_cycles = 1;
+
+ return cached_can_issue_more;
+}
+
+/* Skip cycles without sorting the ready queue. This will move insn from
+ Q->R. If this is the last cycle we are skipping; allow sorting of ready
+ queue by sh_reorder. */
+
+/* Generally, skipping these many cycles are sufficient for all insns to move
+ from Q -> R. */
+#define MAX_SKIPS 8
+
+static int
+sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx insn ATTRIBUTE_UNUSED,
+ int last_clock_var,
+ int clock_var,
+ int *sort_p)
+{
+ if (reload_completed)
+ return 0;
+
+ if (skip_cycles)
+ {
+ if ((clock_var - last_clock_var) < MAX_SKIPS)
+ {
+ *sort_p = 0;
+ return 1;
+ }
+ /* If this is the last cycle we are skipping, allow reordering of R. */
+ if ((clock_var - last_clock_var) == MAX_SKIPS)
+ {
+ *sort_p = 1;
+ return 1;
+ }
+ }
+
+ skip_cycles = 0;
+
+ return 0;
}
/* SHmedia requires registers for branches, so we can't generate new
}
static bool
-sh_ms_bitfield_layout_p (record_type)
- tree record_type ATTRIBUTE_UNUSED;
+sh_ms_bitfield_layout_p (tree record_type ATTRIBUTE_UNUSED)
{
return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
}
\f
-/*
+/*
On the SH1..SH4, the trampoline looks like
2 0002 D202 mov.l l2,r2
1 0000 D301 mov.l l1,r3
emit_move_insn (gen_rtx_MEM (Pmode,
plus_constant (tramp,
fixed_len
- + GET_MODE_SIZE (Pmode))),
+ + GET_MODE_SIZE (Pmode))),
cxt);
emit_insn (gen_ic_invalidate_line (tramp));
return;
movishori));
emit_insn (gen_rotrdi3_mextr (quad0, quad0,
GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
- emit_insn (gen_ashldi3_media (quad0, quad0, GEN_INT (2)));
+ emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
emit_move_insn (gen_rtx_MEM (DImode, tramp), quad0);
emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
gen_rtx_SUBREG (V2HImode, cxt, 0),
movishori));
emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
- emit_insn (gen_ashldi3_media (cxtload, cxtload, GEN_INT (2)));
+ emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
if (TARGET_LITTLE_ENDIAN)
{
emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
static bool
sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
{
- return (decl
+ return (decl
&& (! TARGET_SHCOMPACT
|| current_function_args_info.stack_regs == 0)
&& ! sh_cfun_interrupt_handler_p ());
if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
shared[signature] = type;
}
- builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
- NULL, NULL_TREE);
+ lang_hooks.builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
+ NULL, NULL_TREE);
}
}
+/* Implements target hook vector_mode_supported_p. */
+bool
+sh_vector_mode_supported_p (enum machine_mode mode)
+{
+ if (TARGET_FPU_ANY
+ && ((mode == V2SFmode)
+ || (mode == V4SFmode)
+ || (mode == V16SFmode)))
+ return true;
+
+ else if (TARGET_SHMEDIA
+ && ((mode == V8QImode)
+ || (mode == V2HImode)
+ || (mode == V4HImode)
+ || (mode == V2SImode)))
+ return true;
+
+ return false;
+}
+
+/* Implements target hook dwarf_calling_convention. Return an enum
+ of dwarf_calling_convention. */
+int
+sh_dwarf_calling_convention (tree func)
+{
+ if (sh_attr_renesas_p (func))
+ return DW_CC_GNU_renesas_sh;
+
+ return DW_CC_normal;
+}
+
static void
sh_init_builtins (void)
{
sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
enum reg_class class)
{
+ /* We want to enable the use of SUBREGs as a means to
+ VEC_SELECT a single element of a vector. */
+ if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
+ return (reg_classes_intersect_p (GENERAL_REGS, class));
+
if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
{
- if (TARGET_LITTLE_ENDIAN)
- {
- if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
- return reg_classes_intersect_p (DF_REGS, class);
- }
- else
- {
- if (GET_MODE_SIZE (from) < 8)
- return reg_classes_intersect_p (DF_HI_REGS, class);
- }
+ if (TARGET_LITTLE_ENDIAN)
+ {
+ if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
+ return reg_classes_intersect_p (DF_REGS, class);
+ }
+ else
+ {
+ if (GET_MODE_SIZE (from) < 8)
+ return reg_classes_intersect_p (DF_HI_REGS, class);
+ }
}
return 0;
}
return 4;
if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
- || (dstclass== MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
+ || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
return 9;
if ((REGCLASS_HAS_FP_REG (dstclass)
&& REGCLASS_HAS_GENERAL_REG (srcclass))
|| (REGCLASS_HAS_GENERAL_REG (dstclass)
&& REGCLASS_HAS_FP_REG (srcclass)))
- return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
- * ((GET_MODE_SIZE (mode) + 7) / 8U));
+ return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
+ * ((GET_MODE_SIZE (mode) + 7) / 8U));
if ((dstclass == FPUL_REGS
&& REGCLASS_HAS_GENERAL_REG (srcclass))
if ((dstclass == FPUL_REGS
&& (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
- || (srcclass == FPUL_REGS
+ || (srcclass == FPUL_REGS
&& (dstclass == PR_REGS || dstclass == MAC_REGS)))
return 7;
epilogue_completed = 1;
no_new_pseudos = 1;
current_function_uses_only_leaf_regs = 1;
+ reset_block_changes ();
emit_note (NOTE_INSN_PROLOGUE_END);
/* Find the "this" pointer. We have such a wide range of ABIs for the
SH that it's best to do this completely machine independently.
- "this" is passed as first argument, unless a structure return pointer
+ "this" is passed as first argument, unless a structure return pointer
comes first, in which case "this" comes second. */
- INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0);
+ INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
#ifndef PCC_STATIC_STRUCT_RETURN
if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
structure_value_byref = 1;
#endif /* not PCC_STATIC_STRUCT_RETURN */
if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
- {
+ {
tree ptype = build_pointer_type (TREE_TYPE (funtype));
FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
abort (); /* FIXME */
emit_load_ptr (scratch0, offset_addr);
- if (Pmode != ptr_mode)
+ if (Pmode != ptr_mode)
scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
emit_insn (gen_add2_insn (this, scratch0));
}
if (optimize > 0 && flag_schedule_insns_after_reload)
{
-
- find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
- life_analysis (insns, rtl_dump_file, PROP_FINAL);
+ if (! basic_block_info)
+ init_flow ();
+ rtl_register_cfg_hooks ();
+ find_basic_blocks (insns, max_reg_num (), dump_file);
+ life_analysis (dump_file, PROP_FINAL);
split_all_insns (1);
- schedule_insns (rtl_dump_file);
+ schedule_insns (dump_file);
}
sh_reorg ();
if (optimize > 0 && flag_delayed_branch)
- dbr_schedule (insns, rtl_dump_file);
+ dbr_schedule (insns, dump_file);
shorten_branches (insns);
final_start_function (insns, file, 1);
final (insns, file, 1, 0);
if (optimize > 0 && flag_schedule_insns_after_reload)
{
/* Release all memory allocated by flow. */
- free_basic_block_vars (0);
-
- /* Release all memory held by regsets now. */
- regset_release_memory ();
+ free_basic_block_vars ();
}
reload_completed = 0;
{
emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
emit_insn (gen_subc (result, result, result));
- emit_insn (gen_addsi3 (result, result, GEN_INT (1)));
+ emit_insn (gen_addsi3 (result, result, const1_rtx));
}
else if (code == EQ || code == NE)
emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
break;
if (! INSN_P (insn))
continue;
-
+
if (GET_CODE (PATTERN (insn)) == SEQUENCE)
insn = XVECEXP (PATTERN (insn), 0, 0);
if (GET_CODE (PATTERN (insn)) != PARALLEL
abort ();
}
+/* Returns 1 if OP is a MEM that can be source of a simple move operation. */
+
+int
+unaligned_load_operand (rtx op, enum machine_mode mode)
+{
+ rtx inside;
+
+ if (GET_CODE (op) != MEM || GET_MODE (op) != mode)
+ return 0;
+
+ inside = XEXP (op, 0);
+
+ if (GET_CODE (inside) == POST_INC)
+ inside = XEXP (inside, 0);
+
+ if (GET_CODE (inside) == REG)
+ return 1;
+
+ return 0;
+}
+
+/* This function returns a constant rtx that represents pi / 2**15 in
+ SFmode. it's used to scale SFmode angles, in radians, to a
+ fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
+ maps to 0x10000). */
+
+static GTY(()) rtx sh_fsca_sf2int_rtx;
+
+rtx
+sh_fsca_sf2int (void)
+{
+ if (! sh_fsca_sf2int_rtx)
+ {
+ REAL_VALUE_TYPE rv;
+
+ real_from_string (&rv, "10430.378350470453");
+ sh_fsca_sf2int_rtx = const_double_from_real_value (rv, SFmode);
+ }
+
+ return sh_fsca_sf2int_rtx;
+}
+
+/* This function returns a constant rtx that represents pi / 2**15 in
+ DFmode. it's used to scale DFmode angles, in radians, to a
+ fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
+ maps to 0x10000). */
+
+static GTY(()) rtx sh_fsca_df2int_rtx;
+
+rtx
+sh_fsca_df2int (void)
+{
+ if (! sh_fsca_df2int_rtx)
+ {
+ REAL_VALUE_TYPE rv;
+
+ real_from_string (&rv, "10430.378350470453");
+ sh_fsca_df2int_rtx = const_double_from_real_value (rv, DFmode);
+ }
+
+ return sh_fsca_df2int_rtx;
+}
+
+/* This function returns a constant rtx that represents 2**15 / pi in
+ SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
+ of a full circle back to a SFmode value, i.e., 0x10000 maps to
+ 2*pi). */
+
+static GTY(()) rtx sh_fsca_int2sf_rtx;
+
+rtx
+sh_fsca_int2sf (void)
+{
+ if (! sh_fsca_int2sf_rtx)
+ {
+ REAL_VALUE_TYPE rv;
+
+ real_from_string (&rv, "9.587379924285257e-5");
+ sh_fsca_int2sf_rtx = const_double_from_real_value (rv, SFmode);
+ }
+
+ return sh_fsca_int2sf_rtx;
+}
+
+/* Initialize the CUMULATIVE_ARGS structure. */
+
+void
+sh_init_cumulative_args (CUMULATIVE_ARGS * pcum,
+ tree fntype,
+ rtx libname ATTRIBUTE_UNUSED,
+ tree fndecl,
+ signed int n_named_args,
+ enum machine_mode mode)
+{
+ pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
+ pcum->free_single_fp_reg = 0;
+ pcum->stack_regs = 0;
+ pcum->byref_regs = 0;
+ pcum->byref = 0;
+ pcum->outgoing = (n_named_args == -1) ? 0 : 1;
+
+ /* XXX - Should we check TARGET_HITACHI here ??? */
+ pcum->renesas_abi = sh_attr_renesas_p (fntype) ? 1 : 0;
+
+ if (fntype)
+ {
+ pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
+ && aggregate_value_p (TREE_TYPE (fntype), fndecl));
+ pcum->prototype_p = TYPE_ARG_TYPES (fntype) ? TRUE : FALSE;
+ pcum->arg_count [(int) SH_ARG_INT]
+ = TARGET_SH5 && aggregate_value_p (TREE_TYPE (fntype), fndecl);
+
+ pcum->call_cookie
+ = CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
+ && pcum->arg_count [(int) SH_ARG_INT] == 0
+ && (TYPE_MODE (TREE_TYPE (fntype)) == BLKmode
+ ? int_size_in_bytes (TREE_TYPE (fntype))
+ : GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (fntype)))) > 4
+ && (BASE_RETURN_VALUE_REG (TYPE_MODE (TREE_TYPE (fntype)))
+ == FIRST_RET_REG));
+ }
+ else
+ {
+ pcum->arg_count [(int) SH_ARG_INT] = 0;
+ pcum->prototype_p = FALSE;
+ if (mode != VOIDmode)
+ {
+ pcum->call_cookie =
+ CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
+ && GET_MODE_SIZE (mode) > 4
+ && BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG);
+
+ /* If the default ABI is the Renesas ABI then all library
+ calls must assume that the library will be using the
+ Renesas ABI. So if the function would return its result
+ in memory then we must force the address of this memory
+ block onto the stack. Ideally we would like to call
+ targetm.calls.return_in_memory() here but we do not have
+ the TYPE or the FNDECL available so we synthesize the
+ contents of that function as best we can. */
+ pcum->force_mem =
+ (TARGET_DEFAULT & HITACHI_BIT)
+ && (mode == BLKmode
+ || (GET_MODE_SIZE (mode) > 4
+ && !(mode == DFmode
+ && TARGET_FPU_DOUBLE)));
+ }
+ else
+ {
+ pcum->call_cookie = 0;
+ pcum->force_mem = FALSE;
+ }
+ }
+}
+
#include "gt-sh.h"