/* Output routines for GCC for Renesas / SuperH SH.
Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
- 2003, 2004, 2005 Free Software Foundation, Inc.
+ 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
Contributed by Steve Chamberlain (sac@cygnus.com).
Improved by Jim Wilson (wilson@cygnus.com).
#include "ggc.h"
#include "tree-gimple.h"
#include "cfgloop.h"
+#include "alloc-pool.h"
int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
/* Set to 1 by expand_prologue() when the function is an interrupt handler. */
int current_function_interrupt;
-/* ??? The pragma interrupt support will not work for SH3. */
-/* This is set by #pragma interrupt and #pragma trapa, and causes gcc to
- output code for the next function appropriate for an interrupt handler. */
-int pragma_interrupt;
-
-/* This is set by the trap_exit attribute for functions. It specifies
- a trap number to be used in a trapa instruction at function exit
- (instead of an rte instruction). */
-int trap_exit;
-
-/* This is used by the sp_switch attribute for functions. It specifies
- a variable holding the address of the stack the interrupt function
- should switch to/from at entry/exit. */
-rtx sp_switch;
-
-/* This is set by #pragma trapa, and is similar to the above, except that
- the compiler doesn't emit code to preserve all registers. */
-static int pragma_trapa;
-
-/* This is set by #pragma nosave_low_regs. This is useful on the SH3,
- which has a separate set of low regs for User and Supervisor modes.
- This should only be used for the lowest level of interrupts. Higher levels
- of interrupts must save the registers in case they themselves are
- interrupted. */
-int pragma_nosave_low_regs;
-
-/* This is used for communication between TARGET_SETUP_INCOMING_VARARGS and
- sh_expand_prologue. */
-int current_function_anonymous_args;
+tree sh_deferred_function_attributes;
+tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
/* Global variables for machine-dependent things. */
#define TARGET_ADJUST_UNROLL_MAX sh_adjust_unroll_max
#endif
+#undef TARGET_SECONDARY_RELOAD
+#define TARGET_SECONDARY_RELOAD sh_secondary_reload
+
struct gcc_target targetm = TARGET_INITIALIZER;
\f
/* Implement TARGET_HANDLE_OPTION. */
switch (code)
{
+ tree trapa_attr;
+
case '.':
if (final_sequence
&& ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
break;
case '@':
- if (trap_exit)
- fprintf (stream, "trapa #%d", trap_exit);
+ trapa_attr = lookup_attribute ("trap_exit",
+ DECL_ATTRIBUTES (current_function_decl));
+ if (trapa_attr)
+ fprintf (stream, "trapa #%ld",
+ (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr))));
else if (sh_cfun_interrupt_handler_p ())
fprintf (stream, "rte");
else
case CONST:
if (TARGET_SHMEDIA
- && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
+ || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
&& (GET_MODE (XEXP (x, 0)) == DImode
|| GET_MODE (XEXP (x, 0)) == SImode)
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == TRUNCATE
&& GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)
{
rtx val = XEXP (XEXP (XEXP (x, 0), 0), 0);
+ rtx val2 = val;
+ bool nested_expr = false;
fputc ('(', stream);
if (GET_CODE (val) == ASHIFTRT)
{
fputc ('(', stream);
- if (GET_CODE (XEXP (val, 0)) == CONST)
- fputc ('(', stream);
- output_addr_const (stream, XEXP (val, 0));
- if (GET_CODE (XEXP (val, 0)) == CONST)
- fputc (')', stream);
+ val2 = XEXP (val, 0);
+ }
+ if (GET_CODE (val2) == CONST
+ || GET_RTX_CLASS (GET_CODE (val2)) != RTX_OBJ)
+ {
+ fputc ('(', stream);
+ nested_expr = true;
+ }
+ output_addr_const (stream, val2);
+ if (nested_expr)
+ fputc (')', stream);
+ if (GET_CODE (val) == ASHIFTRT)
+ {
fputs (" >> ", stream);
output_addr_const (stream, XEXP (val, 1));
fputc (')', stream);
}
- else
- {
- if (GET_CODE (val) == CONST)
- fputc ('(', stream);
- output_addr_const (stream, val);
- if (GET_CODE (val) == CONST)
- fputc (')', stream);
- }
fputs (" & 65535)", stream);
break;
}
if (mode == Pmode || mode == ptr_mode)
{
- rtx op0, op1;
+ rtx op0, op1, opc;
enum tls_model tls_kind;
op0 = operands[0];
op1 = operands[1];
+ if (GET_CODE (op1) == CONST
+ && GET_CODE (XEXP (op1, 0)) == PLUS
+ && tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode))
+ {
+ opc = XEXP (XEXP (op1, 0), 1);
+ op1 = XEXP (XEXP (op1, 0), 0);
+ }
+ else
+ opc = NULL_RTX;
+
if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
{
rtx tga_op1, tga_ret, tmp, tmp2;
default:
gcc_unreachable ();
}
+ if (opc)
+ emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc)));
operands[1] = op1;
}
}
else
/* Switch to the data section so that the coffsem symbol
isn't in the text section. */
- data_section ();
+ switch_to_section (data_section);
if (TARGET_LITTLE_ENDIAN)
fputs ("\t.little\n", asm_out_file);
if (TARGET_SHMEDIA)
{
- if ((GET_CODE (XEXP (x, 1)) == CONST_INT
- && CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
- || EXTRA_CONSTRAINT_C16 (XEXP (x, 1)))
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (CONST_OK_FOR_I10 (INTVAL (XEXP (x, 1)))
+ || CONST_OK_FOR_J16 (INTVAL (XEXP (x, 1)))))
return 1;
else
- return 2;
+ return 1 + rtx_cost (XEXP (x, 1), AND);
}
/* These constants are single cycle extu.[bw] instructions. */
else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
*total = COSTS_N_INSNS ((outer_code != SET) + 1);
else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
- *total = COSTS_N_INSNS (3);
+ *total = COSTS_N_INSNS ((outer_code != SET) + 2);
else
- *total = COSTS_N_INSNS (4);
+ *total = COSTS_N_INSNS ((outer_code != SET) + 3);
return true;
}
if (CONST_OK_FOR_I08 (INTVAL (x)))
}
\f
+static alloc_pool label_ref_list_pool;
+
+typedef struct label_ref_list_d
+{
+ rtx label;
+ struct label_ref_list_d *next;
+} *label_ref_list_t;
+
/* The SH cannot load a large constant into a register, constants have to
come from a pc relative load. The reference of a pc relative load
instruction must be less than 1k in front of the instruction. This
{
rtx value; /* Value in table. */
rtx label; /* Label of value. */
- rtx wend; /* End of window. */
+ label_ref_list_t wend; /* End of window. */
enum machine_mode mode; /* Mode of value. */
/* True if this constant is accessed as part of a post-increment
add_constant (rtx x, enum machine_mode mode, rtx last_value)
{
int i;
- rtx lab, new, ref, newref;
+ rtx lab, new;
+ label_ref_list_t ref, newref;
/* First see if we've already got it. */
for (i = 0; i < pool_size; i++)
}
if (lab && pool_window_label)
{
- newref = gen_rtx_LABEL_REF (VOIDmode, pool_window_label);
+ newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
+ newref->label = pool_window_label;
ref = pool_vector[pool_window_last].wend;
- LABEL_NEXTREF (newref) = ref;
+ newref->next = ref;
pool_vector[pool_window_last].wend = newref;
}
if (new)
lab = gen_label_rtx ();
pool_vector[pool_size].mode = mode;
pool_vector[pool_size].label = lab;
- pool_vector[pool_size].wend = NULL_RTX;
+ pool_vector[pool_size].wend = NULL;
pool_vector[pool_size].part_of_sequence_p = (lab == 0);
if (lab && pool_window_label)
{
- newref = gen_rtx_LABEL_REF (VOIDmode, pool_window_label);
+ newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
+ newref->label = pool_window_label;
ref = pool_vector[pool_window_last].wend;
- LABEL_NEXTREF (newref) = ref;
+ newref->next = ref;
pool_vector[pool_window_last].wend = newref;
}
if (lab)
rtx scan = barrier;
int i;
int need_align = 1;
- rtx lab, ref;
+ rtx lab;
+ label_ref_list_t ref;
int have_df = 0;
/* Do two passes, first time dump out the HI sized constants. */
scan = emit_label_after (lab, scan);
scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
scan);
- for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
+ for (ref = p->wend; ref; ref = ref->next)
{
- lab = XEXP (ref, 0);
+ lab = ref->label;
scan = emit_insn_after (gen_consttable_window_end (lab), scan);
}
}
emit_label_before (lab, align_insn);
emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
align_insn);
- for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
+ for (ref = p->wend; ref; ref = ref->next)
{
- lab = XEXP (ref, 0);
+ lab = ref->label;
emit_insn_before (gen_consttable_window_end (lab),
align_insn);
}
if (p->mode != HImode)
{
- for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
+ for (ref = p->wend; ref; ref = ref->next)
{
- lab = XEXP (ref, 0);
+ lab = ref->label;
scan = emit_insn_after (gen_consttable_window_end (lab),
scan);
}
if (p->mode != HImode)
{
- for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
+ for (ref = p->wend; ref; ref = ref->next)
{
- lab = XEXP (ref, 0);
+ lab = ref->label;
scan = emit_insn_after (gen_consttable_window_end (lab), scan);
}
}
gcc_assert (worker
&& GET_CODE (worker) != CODE_LABEL
&& GET_CODE (worker) != JUMP_INSN);
- } while (recog_memoized (worker) != CODE_FOR_casesi_worker_1);
+ } while (GET_CODE (worker) == NOTE
+ || recog_memoized (worker) != CODE_FOR_casesi_worker_1);
wpat = PATTERN (worker);
wpat0 = XVECEXP (wpat, 0, 0);
wpat1 = XVECEXP (wpat, 0, 1);
mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
shorten_branches (first);
}
+
/* Scan the function looking for move instructions which have to be
changed to pc-relative loads and insert the literal tables. */
-
+ label_ref_list_pool = create_alloc_pool ("label references list",
+ sizeof (struct label_ref_list_d),
+ 30);
mdep_reorg_phase = SH_FIXUP_PCLOAD;
for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
{
insn = barrier;
}
}
-
+ free_alloc_pool (label_ref_list_pool);
+
mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
INSN_ADDRESSES_FREE ();
split_branches (first);
{
unsigned int reg;
int count;
- int interrupt_handler;
+ tree attrs;
+ bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
+ bool nosave_low_regs;
int pr_live, has_call;
- interrupt_handler = sh_cfun_interrupt_handler_p ();
+ attrs = DECL_ATTRIBUTES (current_function_decl);
+ interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
+ trapa_handler = lookup_attribute ("trapa_handler", attrs) != NULL_TREE;
+ interrupt_handler = interrupt_or_trapa_handler && ! trapa_handler;
+ nosave_low_regs = lookup_attribute ("nosave_low_regs", attrs) != NULL_TREE;
CLEAR_HARD_REG_SET (*live_regs_mask);
if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && interrupt_handler
for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
if (regs_ever_live[reg] && regs_ever_live[reg+1]
&& (! call_really_used_regs[reg]
- || (interrupt_handler && ! pragma_trapa))
+ || interrupt_handler)
&& ++count > 2)
{
target_flags &= ~MASK_FPU_SINGLE;
{
if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
? pr_live
- : (interrupt_handler && ! pragma_trapa)
+ : interrupt_handler
? (/* Need to save all the regs ever live. */
(regs_ever_live[reg]
|| (call_really_used_regs[reg]
&& (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
|| reg == PIC_OFFSET_TABLE_REGNUM)
&& has_call)
- || (has_call && REGISTER_NATURAL_MODE (reg) == SImode
+ || (TARGET_SHMEDIA && has_call
+ && REGISTER_NATURAL_MODE (reg) == SImode
&& (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
&& reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
&& reg != RETURN_ADDRESS_POINTER_REGNUM
&& flag_pic
&& current_function_args_info.call_cookie
&& reg == PIC_OFFSET_TABLE_REGNUM)
- || (regs_ever_live[reg] && ! call_really_used_regs[reg])
+ || (regs_ever_live[reg]
+ && (!call_really_used_regs[reg]
+ || (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
|| (current_function_calls_eh_return
&& (reg == EH_RETURN_DATA_REGNO (0)
|| reg == EH_RETURN_DATA_REGNO (1)
}
}
}
+ if (nosave_low_regs && reg == R8_REG)
+ break;
}
/* If we have a target register optimization pass after prologue / epilogue
threading, we need to assume all target registers will be live even if
int d_rounding = 0;
int save_flags = target_flags;
int pretend_args;
+ tree sp_switch_attr
+ = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
current_function_interrupt = sh_cfun_interrupt_handler_p ();
}
/* If we're supposed to switch stacks at function entry, do so now. */
- if (sp_switch)
- emit_insn (gen_sp_switch_1 ());
+ if (sp_switch_attr)
+ {
+ /* The argument specifies a variable holding the address of the
+ stack the interrupt function should switch to/from at entry/exit. */
+ const char *s
+ = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (sp_switch_attr)));
+ rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
+
+ emit_insn (gen_sp_switch_1 (sp_switch));
+ }
d = calc_live_regs (&live_regs_mask);
/* ??? Maybe we could save some switching if we can move a mode switch
EH_RETURN_STACKADJ_RTX));
/* Switch back to the normal stack if necessary. */
- if (sp_switch)
+ if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl)))
emit_insn (gen_sp_switch_2 ());
/* Tell flow the insn that pops PR isn't dead. */
sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
- trap_exit = pragma_interrupt = pragma_trapa = pragma_nosave_low_regs = 0;
sh_need_epilogue_known = 0;
- sp_switch = NULL_RTX;
}
static rtx
f_next_fp_limit = TREE_CHAIN (f_next_fp);
f_next_stack = TREE_CHAIN (f_next_fp_limit);
- next_o = build (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
- NULL_TREE);
- next_o_limit = build (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
- valist, f_next_o_limit, NULL_TREE);
- next_fp = build (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
+ next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
NULL_TREE);
- next_fp_limit = build (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
- valist, f_next_fp_limit, NULL_TREE);
- next_stack = build (COMPONENT_REF, TREE_TYPE (f_next_stack),
- valist, f_next_stack, NULL_TREE);
+ next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
+ valist, f_next_o_limit, NULL_TREE);
+ next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
+ NULL_TREE);
+ next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
+ valist, f_next_fp_limit, NULL_TREE);
+ next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
+ valist, f_next_stack, NULL_TREE);
/* Call __builtin_saveregs. */
u = make_tree (ptr_type_node, expand_builtin_saveregs ());
- t = build (MODIFY_EXPR, ptr_type_node, next_fp, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, next_fp, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
nfp = 8 - nfp;
else
nfp = 0;
- u = fold (build (PLUS_EXPR, ptr_type_node, u,
- build_int_cst (NULL_TREE, UNITS_PER_WORD * nfp)));
- t = build (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
+ u = fold_build2 (PLUS_EXPR, ptr_type_node, u,
+ build_int_cst (NULL_TREE, UNITS_PER_WORD * nfp));
+ t = build2 (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
- t = build (MODIFY_EXPR, ptr_type_node, next_o, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, next_o, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
nint = 4 - nint;
else
nint = 0;
- u = fold (build (PLUS_EXPR, ptr_type_node, u,
- build_int_cst (NULL_TREE, UNITS_PER_WORD * nint)));
- t = build (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
+ u = fold_build2 (PLUS_EXPR, ptr_type_node, u,
+ build_int_cst (NULL_TREE, UNITS_PER_WORD * nint));
+ t = build2 (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
u = make_tree (ptr_type_node, nextarg);
- t = build (MODIFY_EXPR, ptr_type_node, next_stack, u);
+ t = build2 (MODIFY_EXPR, ptr_type_node, next_stack, u);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
}
tree tmp, pptr_type_node;
tree addr, lab_over = NULL, result = NULL;
int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
+ tree eff_type;
if (pass_by_ref)
type = build_pointer_type (type);
f_next_fp_limit = TREE_CHAIN (f_next_fp);
f_next_stack = TREE_CHAIN (f_next_fp_limit);
- next_o = build (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
- NULL_TREE);
- next_o_limit = build (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
- valist, f_next_o_limit, NULL_TREE);
- next_fp = build (COMPONENT_REF, TREE_TYPE (f_next_fp),
- valist, f_next_fp, NULL_TREE);
- next_fp_limit = build (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
- valist, f_next_fp_limit, NULL_TREE);
- next_stack = build (COMPONENT_REF, TREE_TYPE (f_next_stack),
- valist, f_next_stack, NULL_TREE);
+ next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
+ NULL_TREE);
+ next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
+ valist, f_next_o_limit, NULL_TREE);
+ next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp),
+ valist, f_next_fp, NULL_TREE);
+ next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
+ valist, f_next_fp_limit, NULL_TREE);
+ next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
+ valist, f_next_stack, NULL_TREE);
/* Structures with a single member with a distinct mode are passed
like their member. This is relevant if the latter has a REAL_TYPE
or COMPLEX_TYPE type. */
- while (TREE_CODE (type) == RECORD_TYPE
- && (member = find_sole_member (type))
+ eff_type = type;
+ while (TREE_CODE (eff_type) == RECORD_TYPE
+ && (member = find_sole_member (eff_type))
&& (TREE_CODE (TREE_TYPE (member)) == REAL_TYPE
|| TREE_CODE (TREE_TYPE (member)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE))
{
tree field_type = TREE_TYPE (member);
- if (TYPE_MODE (type) == TYPE_MODE (field_type))
- type = field_type;
+ if (TYPE_MODE (eff_type) == TYPE_MODE (field_type))
+ eff_type = field_type;
else
{
- gcc_assert ((TYPE_ALIGN (type)
+ gcc_assert ((TYPE_ALIGN (eff_type)
< GET_MODE_ALIGNMENT (TYPE_MODE (field_type)))
- || (TYPE_ALIGN (type)
+ || (TYPE_ALIGN (eff_type)
> GET_MODE_BITSIZE (TYPE_MODE (field_type))));
break;
}
if (TARGET_SH4)
{
- pass_as_float = ((TREE_CODE (type) == REAL_TYPE && size <= 8)
- || (TREE_CODE (type) == COMPLEX_TYPE
- && TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
+ pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
+ || (TREE_CODE (eff_type) == COMPLEX_TYPE
+ && TREE_CODE (TREE_TYPE (eff_type)) == REAL_TYPE
&& size <= 16));
}
else
{
- pass_as_float = (TREE_CODE (type) == REAL_TYPE && size == 4);
+ pass_as_float = (TREE_CODE (eff_type) == REAL_TYPE && size == 4);
}
addr = create_tmp_var (pptr_type_node, NULL);
{
tree next_fp_tmp = create_tmp_var (TREE_TYPE (f_next_fp), NULL);
tree cmp;
- bool is_double = size == 8 && TREE_CODE (type) == REAL_TYPE;
+ bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
tmp = build1 (ADDR_EXPR, pptr_type_node, next_fp);
tmp = build2 (MODIFY_EXPR, void_type_node, addr, tmp);
if (size > 4 && !is_double)
tmp = build2 (PLUS_EXPR, TREE_TYPE (tmp), tmp,
fold_convert (TREE_TYPE (tmp), size_int (4 - size)));
- tmp = build (GE_EXPR, boolean_type_node, next_fp_tmp, tmp);
- cmp = build (COND_EXPR, void_type_node, tmp,
- build (GOTO_EXPR, void_type_node, lab_false),
- NULL);
+ tmp = build2 (GE_EXPR, boolean_type_node, next_fp_tmp, tmp);
+ cmp = build3 (COND_EXPR, void_type_node, tmp,
+ build1 (GOTO_EXPR, void_type_node, lab_false),
+ NULL_TREE);
if (!is_double)
gimplify_and_add (cmp, pre_p);
- if (TYPE_ALIGN (type) > BITS_PER_WORD || (is_double || size == 16))
+ if (TYPE_ALIGN (eff_type) > BITS_PER_WORD
+ || (is_double || size == 16))
{
tmp = fold_convert (ptr_type_node, size_int (UNITS_PER_WORD));
- tmp = build (BIT_AND_EXPR, ptr_type_node, next_fp_tmp, tmp);
- tmp = build (PLUS_EXPR, ptr_type_node, next_fp_tmp, tmp);
- tmp = build (MODIFY_EXPR, ptr_type_node, next_fp_tmp, tmp);
+ tmp = build2 (BIT_AND_EXPR, ptr_type_node, next_fp_tmp, tmp);
+ tmp = build2 (PLUS_EXPR, ptr_type_node, next_fp_tmp, tmp);
+ tmp = build2 (MODIFY_EXPR, ptr_type_node, next_fp_tmp, tmp);
gimplify_and_add (tmp, pre_p);
}
if (is_double)
gimplify_and_add (cmp, pre_p);
#ifdef FUNCTION_ARG_SCmode_WART
- if (TYPE_MODE (type) == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
+ if (TYPE_MODE (eff_type) == SCmode
+ && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
{
- tree subtype = TREE_TYPE (type);
+ tree subtype = TREE_TYPE (eff_type);
tree real, imag;
imag
= std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
real = get_initialized_tmp_var (real, pre_p, NULL);
- result = build (COMPLEX_EXPR, type, real, imag);
+ result = build2 (COMPLEX_EXPR, type, real, imag);
result = get_initialized_tmp_var (result, pre_p, NULL);
}
#endif /* FUNCTION_ARG_SCmode_WART */
- tmp = build (GOTO_EXPR, void_type_node, lab_over);
+ tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
gimplify_and_add (tmp, pre_p);
- tmp = build (LABEL_EXPR, void_type_node, lab_false);
+ tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
gimplify_and_add (tmp, pre_p);
tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
- tmp = build (MODIFY_EXPR, void_type_node, addr, tmp);
+ tmp = build2 (MODIFY_EXPR, void_type_node, addr, tmp);
gimplify_and_add (tmp, pre_p);
tmp = build2 (MODIFY_EXPR, ptr_type_node, next_fp_tmp, valist);
gimplify_and_add (tmp, pre_p);
else
{
tmp = fold_convert (ptr_type_node, size_int (rsize));
- tmp = build (PLUS_EXPR, ptr_type_node, next_o, tmp);
- tmp = build (GT_EXPR, boolean_type_node, tmp, next_o_limit);
- tmp = build (COND_EXPR, void_type_node, tmp,
- build (GOTO_EXPR, void_type_node, lab_false),
- NULL);
+ tmp = build2 (PLUS_EXPR, ptr_type_node, next_o, tmp);
+ tmp = build2 (GT_EXPR, boolean_type_node, tmp, next_o_limit);
+ tmp = build3 (COND_EXPR, void_type_node, tmp,
+ build1 (GOTO_EXPR, void_type_node, lab_false),
+ NULL_TREE);
gimplify_and_add (tmp, pre_p);
tmp = build1 (ADDR_EXPR, pptr_type_node, next_o);
- tmp = build (MODIFY_EXPR, void_type_node, addr, tmp);
+ tmp = build2 (MODIFY_EXPR, void_type_node, addr, tmp);
gimplify_and_add (tmp, pre_p);
- tmp = build (GOTO_EXPR, void_type_node, lab_over);
+ tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
gimplify_and_add (tmp, pre_p);
- tmp = build (LABEL_EXPR, void_type_node, lab_false);
+ tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
gimplify_and_add (tmp, pre_p);
if (size > 4 && ! TARGET_SH4)
{
- tmp = build (MODIFY_EXPR, ptr_type_node, next_o, next_o_limit);
+ tmp = build2 (MODIFY_EXPR, ptr_type_node, next_o, next_o_limit);
gimplify_and_add (tmp, pre_p);
}
tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
- tmp = build (MODIFY_EXPR, void_type_node, addr, tmp);
+ tmp = build2 (MODIFY_EXPR, void_type_node, addr, tmp);
gimplify_and_add (tmp, pre_p);
}
if (!result)
{
- tmp = build (LABEL_EXPR, void_type_node, lab_over);
+ tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
gimplify_and_add (tmp, pre_p);
}
}
tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
if (result)
{
- tmp = build (MODIFY_EXPR, void_type_node, result, tmp);
+ tmp = build2 (MODIFY_EXPR, void_type_node, result, tmp);
gimplify_and_add (tmp, pre_p);
- tmp = build (LABEL_EXPR, void_type_node, lab_over);
+ tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
gimplify_and_add (tmp, pre_p);
}
else
return total_auto_space;
}
\f
-/* Handle machine specific pragmas to be semi-compatible with Renesas
- compiler. */
-
-void
-sh_pr_interrupt (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
-{
- pragma_interrupt = 1;
-}
-
-void
-sh_pr_trapa (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
-{
- pragma_interrupt = pragma_trapa = 1;
-}
-
-void
-sh_pr_nosave_low_regs (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
-{
- pragma_nosave_low_regs = 1;
-}
-
-/* Generate 'handle_interrupt' attribute for decls */
-
+/* Insert any deferred function attributes from earlier pragmas. */
static void
sh_insert_attributes (tree node, tree *attributes)
{
- if (! pragma_interrupt
- || TREE_CODE (node) != FUNCTION_DECL)
+ tree attrs;
+
+ if (TREE_CODE (node) != FUNCTION_DECL)
return;
/* We are only interested in fields. */
if (!DECL_P (node))
return;
- /* Add a 'handle_interrupt' attribute. */
- * attributes = tree_cons (get_identifier ("interrupt_handler"), NULL, * attributes);
+ /* Append the attributes to the deferred attributes. */
+ *sh_deferred_function_attributes_tail = *attributes;
+ attrs = sh_deferred_function_attributes;
+ if (!attrs)
+ return;
+
+ /* Some attributes imply or require the interrupt attribute. */
+ if (!lookup_attribute ("interrupt_handler", attrs)
+ && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node)))
+ {
+ /* If we have a trapa_handler, but no interrupt_handler attribute,
+ insert an interrupt_handler attribute. */
+ if (lookup_attribute ("trapa_handler", attrs) != NULL_TREE)
+ /* We can't use sh_pr_interrupt here because that's not in the
+ java frontend. */
+ attrs
+ = tree_cons (get_identifier("interrupt_handler"), NULL_TREE, attrs);
+ /* However, for sp_switch, trap_exit and nosave_low_regs, if the
+ interrupt attribute is missing, we ignore the attribute and warn. */
+ else if (lookup_attribute ("sp_switch", attrs)
+ || lookup_attribute ("trap_exit", attrs)
+ || lookup_attribute ("nosave_low_regs", attrs))
+ {
+ tree *tail;
+
+ for (tail = attributes; attrs; attrs = TREE_CHAIN (attrs))
+ {
+ if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs))
+ || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs))
+ || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs)))
+ warning (OPT_Wattributes,
+ "%qs attribute only applies to interrupt functions",
+ IDENTIFIER_POINTER (TREE_PURPOSE (attrs)));
+ else
+ {
+ *tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
+ NULL_TREE);
+ tail = &TREE_CHAIN (*tail);
+ }
+ }
+ attrs = *attributes;
+ }
+ }
+
+ /* Install the processed list. */
+ *attributes = attrs;
+
+ /* Clear deferred attributes. */
+ sh_deferred_function_attributes = NULL_TREE;
+ sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
return;
}
interrupt_handler -- specifies this function is an interrupt handler.
+ trapa_handler - like above, but don't save all registers.
+
sp_switch -- specifies an alternate stack for an interrupt handler
to run on.
trap_exit -- use a trapa to exit an interrupt function instead of
an rte instruction.
+ nosave_low_regs - don't save r0..r7 in an interrupt handler.
+ This is useful on the SH3 and upwards,
+ which has a separate set of low regs for User and Supervisor modes.
+ This should only be used for the lowest level of interrupts. Higher levels
+ of interrupts must save the registers in case they themselves are
+ interrupted.
+
renesas -- use Renesas calling/layout conventions (functions and
structures).
{ "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
{ "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
{ "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
+ { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
+ { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
#ifdef SYMBIAN
/* Symbian support adds three new attributes:
dllexport - for exporting a function/variable that will live in a dll
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
- else if (!pragma_interrupt)
- {
- /* The sp_switch attribute only has meaning for interrupt functions. */
- warning (OPT_Wattributes, "%qs attribute only applies to "
- "interrupt functions", IDENTIFIER_POINTER (name));
- *no_add_attrs = true;
- }
else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
{
/* The argument must be a constant string. */
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
- else
- {
- const char *s = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (args)));
- sp_switch = gen_rtx_SYMBOL_REF (VOIDmode, s);
- }
return NULL_TREE;
}
IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
- else if (!pragma_interrupt)
- {
- /* The trap_exit attribute only has meaning for interrupt functions. */
- warning (OPT_Wattributes, "%qs attribute only applies to "
- "interrupt functions", IDENTIFIER_POINTER (name));
- *no_add_attrs = true;
- }
+ /* The argument specifies a trap number to be used in a trapa instruction
+ at function exit (instead of an rte instruction). */
else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
{
/* The argument must be a constant integer. */
"integer constant", IDENTIFIER_POINTER (name));
*no_add_attrs = true;
}
- else
- {
- trap_exit = TREE_INT_CST_LOW (TREE_VALUE (args));
- }
return NULL_TREE;
}
DECL_IGNORED_P (t) = 1;
DECL_EXTERNAL (t) = 1;
TREE_STATIC (t) = 1;
+ TREE_PUBLIC (t) = 1;
TREE_USED (t) = 1;
fpscr_values = t;
if (flag_schedule_insns_after_reload)
{
- life_analysis (dump_file, PROP_FINAL);
+ life_analysis (PROP_FINAL);
split_all_insns (1);
- schedule_insns (dump_file);
+ schedule_insns ();
}
/* We must split jmp insn in PIC case. */
else if (flag_pic)
sh_reorg ();
if (optimize > 0 && flag_delayed_branch)
- dbr_schedule (insns, dump_file);
+ dbr_schedule (insns);
shorten_branches (insns);
final_start_function (insns, file, 1);
if (TREE_CODE (type) != ARRAY_TYPE
|| ! TYPE_SIZE (type) || ! TYPE_SIZE_UNIT (type))
break;
- size_tree = fold (build (TRUNC_DIV_EXPR,
+ size_tree = fold_build2 (TRUNC_DIV_EXPR,
bitsizetype,
TYPE_SIZE (type),
- TYPE_SIZE_UNIT (type)));
+ TYPE_SIZE_UNIT (type));
if (TREE_CODE (size_tree) == INTEGER_CST
&& ! TREE_INT_CST_HIGH (size_tree)
&& TREE_INT_CST_LOW (size_tree) < max_iterations)
return fnaddr;
}
+enum reg_class
+sh_secondary_reload (bool in_p, rtx x, enum reg_class class,
+ enum machine_mode mode, secondary_reload_info *sri)
+{
+ if (in_p)
+ {
+ if (REGCLASS_HAS_FP_REG (class)
+ && ! TARGET_SHMEDIA
+ && immediate_operand ((x), mode)
+ && ! ((fp_zero_operand (x) || fp_one_operand (x))
+ && mode == SFmode && fldi_ok ()))
+ switch (mode)
+ {
+ case SFmode:
+ sri->icode = CODE_FOR_reload_insf__frn;
+ return NO_REGS;
+ case DFmode:
+ sri->icode = CODE_FOR_reload_indf__frn;
+ return NO_REGS;
+ case SImode:
+ /* ??? If we knew that we are in the appropriate mode -
+ single precision - we could use a reload pattern directly. */
+ return FPUL_REGS;
+ default:
+ abort ();
+ }
+ if (class == FPUL_REGS
+ && ((GET_CODE (x) == REG
+ && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
+ || REGNO (x) == T_REG))
+ || GET_CODE (x) == PLUS))
+ return GENERAL_REGS;
+ if (class == FPUL_REGS && immediate_operand (x, mode))
+ {
+ if (GET_CODE (x) == CONST_INT && CONST_OK_FOR_I08 (INTVAL (x)))
+ return GENERAL_REGS;
+ sri->icode = CODE_FOR_reload_insi__i_fpul;
+ return NO_REGS;
+ }
+ if (class == FPSCR_REGS
+ && ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
+ || (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS)))
+ return GENERAL_REGS;
+ if (REGCLASS_HAS_FP_REG (class)
+ && TARGET_SHMEDIA
+ && immediate_operand (x, mode)
+ && x != CONST0_RTX (GET_MODE (x))
+ && GET_MODE (x) != V4SFmode)
+ return GENERAL_REGS;
+ if ((mode == QImode || mode == HImode)
+ && TARGET_SHMEDIA && inqhi_operand (x, mode))
+ {
+ sri->icode = ((mode == QImode)
+ ? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi);
+ return NO_REGS;
+ }
+ if (TARGET_SHMEDIA && class == GENERAL_REGS
+ && (GET_CODE (x) == LABEL_REF || PIC_DIRECT_ADDR_P (x)))
+ return TARGET_REGS;
+ } /* end of input-only processing. */
+
+ if (((REGCLASS_HAS_FP_REG (class)
+ && (GET_CODE (x) == REG
+ && (GENERAL_OR_AP_REGISTER_P (REGNO (x))
+ || (FP_REGISTER_P (REGNO (x)) && mode == SImode
+ && TARGET_FMOVD))))
+ || (REGCLASS_HAS_GENERAL_REG (class)
+ && GET_CODE (x) == REG
+ && FP_REGISTER_P (REGNO (x))))
+ && ! TARGET_SHMEDIA
+ && (mode == SFmode || mode == SImode))
+ return FPUL_REGS;
+ if ((class == FPUL_REGS
+ || (REGCLASS_HAS_FP_REG (class)
+ && ! TARGET_SHMEDIA && mode == SImode))
+ && (GET_CODE (x) == MEM
+ || (GET_CODE (x) == REG
+ && (REGNO (x) >= FIRST_PSEUDO_REGISTER
+ || REGNO (x) == T_REG
+ || system_reg_operand (x, VOIDmode)))))
+ {
+ if (class == FPUL_REGS)
+ return GENERAL_REGS;
+ return FPUL_REGS;
+ }
+ if ((class == TARGET_REGS
+ || (TARGET_SHMEDIA && class == SIBCALL_REGS))
+ && !EXTRA_CONSTRAINT_Csy (x)
+ && (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x))))
+ return GENERAL_REGS;
+ if ((class == MAC_REGS || class == PR_REGS)
+ && GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x))
+ && class != REGNO_REG_CLASS (REGNO (x)))
+ return GENERAL_REGS;
+ if (class != GENERAL_REGS && GET_CODE (x) == REG
+ && TARGET_REGISTER_P (REGNO (x)))
+ return GENERAL_REGS;
+ return NO_REGS;
+}
+
enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;
/* This defines the storage for the variable part of a -mboard= option.