static bool arm_align_anon_bitfield (void);
static bool arm_return_in_msb (const_tree);
static bool arm_must_pass_in_stack (enum machine_mode, const_tree);
+static bool arm_return_in_memory (const_tree, const_tree);
#ifdef TARGET_UNWIND_INFO
static void arm_unwind_emit (FILE *, rtx);
static bool arm_output_ttype (rtx);
static bool arm_tls_symbol_p (rtx x);
static int arm_issue_rate (void);
static void arm_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
+static bool arm_allocate_stack_slots_for_args (void);
\f
/* Initialize the GCC target structure. */
#undef TARGET_SETUP_INCOMING_VARARGS
#define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
+#undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
+#define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS arm_allocate_stack_slots_for_args
+
#undef TARGET_DEFAULT_SHORT_ENUMS
#define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
#undef TARGET_RETURN_IN_MSB
#define TARGET_RETURN_IN_MSB arm_return_in_msb
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY arm_return_in_memory
+
#undef TARGET_MUST_PASS_IN_STACK
#define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
return cfun->machine->func_type;
}
+
+bool
+arm_allocate_stack_slots_for_args (void)
+{
+ /* Naked functions should not allocate stack slots for arguments. */
+ return !IS_NAKED (arm_current_func_type ());
+}
+
\f
/* Return 1 if it is possible to return using a single instruction.
If SIBLING is non-null, this is a test for a return before a sibling
if (crtl->args.pretend_args_size
|| cfun->machine->uses_anonymous_args
/* Or if the function calls __builtin_eh_return () */
- || current_function_calls_eh_return
+ || crtl->calls_eh_return
/* Or if the function calls alloca */
- || current_function_calls_alloca
+ || cfun->calls_alloca
/* Or if there is a stack adjustment. However, if the stack pointer
is saved on the stack, we can use a pre-incrementing stack load. */
|| !(stack_adjust == 0 || (TARGET_APCS_FRAME && frame_pointer_needed
}
/* Decide whether a type should be returned in memory (true)
- or in a register (false). This is called by the macro
- RETURN_IN_MEMORY. */
-int
-arm_return_in_memory (const_tree type)
+ or in a register (false). This is called as the target hook
+ TARGET_RETURN_IN_MEMORY. */
+static bool
+arm_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
{
HOST_WIDE_INT size;
/* ... Aggregates that are not themselves valid for returning in
a register are not allowed. */
- if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ if (arm_return_in_memory (TREE_TYPE (field), NULL_TREE))
return 1;
/* Now check the remaining fields, if any. Only bitfields are allowed,
if (FLOAT_TYPE_P (TREE_TYPE (field)))
return 1;
- if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ if (arm_return_in_memory (TREE_TYPE (field), NULL_TREE))
return 1;
}
We don't want those calls to affect any assumptions about the real
function; and further, we can't call entry_of_function() until we
start the real expansion process. */
- if (!current_function_uses_pic_offset_table)
+ if (!crtl->uses_pic_offset_table)
{
gcc_assert (can_create_pseudo_p ());
if (arm_pic_register != INVALID_REGNUM)
if we are being called as part of the cost-estimation
process. */
if (current_ir_type () != IR_GIMPLE)
- current_function_uses_pic_offset_table = 1;
+ crtl->uses_pic_offset_table = 1;
}
else
{
process. */
if (current_ir_type () != IR_GIMPLE)
{
- current_function_uses_pic_offset_table = 1;
+ crtl->uses_pic_offset_table = 1;
start_sequence ();
arm_load_pic_register (0UL);
rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx, pic_reg;
rtx global_offset_table;
- if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
+ if (crtl->uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
return;
gcc_assert (flag_pic);
/* Need to emit this whether or not we obey regdecls,
since setjmp/longjmp can cause life info to screw up. */
- emit_insn (gen_rtx_USE (VOIDmode, pic_reg));
+ emit_use (pic_reg);
}
if (flag_pic
&& !TARGET_SINGLE_PIC_BASE
&& arm_pic_register != INVALID_REGNUM
- && current_function_uses_pic_offset_table)
+ && crtl->uses_pic_offset_table)
save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
}
else
&& !TARGET_SINGLE_PIC_BASE
&& arm_pic_register != INVALID_REGNUM
&& (df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM)
- || current_function_uses_pic_offset_table))
+ || crtl->uses_pic_offset_table))
save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
/* The prologue will copy SP into R0, so save it. */
}
/* Save registers so the exception handler can modify them. */
- if (current_function_calls_eh_return)
+ if (crtl->calls_eh_return)
{
unsigned int i;
|| (save_reg_mask
&& optimize_size
&& ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
- && !current_function_calls_eh_return))
+ && !crtl->calls_eh_return))
save_reg_mask |= 1 << LR_REGNUM;
if (cfun->machine->lr_save_eliminated)
if (flag_pic
&& !TARGET_SINGLE_PIC_BASE
&& arm_pic_register != INVALID_REGNUM
- && current_function_uses_pic_offset_table)
+ && crtl->uses_pic_offset_table)
mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
/* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
return "";
}
- gcc_assert (!current_function_calls_alloca || really_return);
+ gcc_assert (!cfun->calls_alloca || really_return);
sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
if (cfun->machine->lr_save_eliminated)
asm_fprintf (f, "\t%@ link register save eliminated.\n");
- if (current_function_calls_eh_return)
+ if (crtl->calls_eh_return)
asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
return_used_this_function = 0;
/* If we are throwing an exception, then we really must be doing a
return, so we can't tail-call. */
- gcc_assert (!current_function_calls_eh_return || really_return);
+ gcc_assert (!crtl->calls_eh_return || really_return);
offsets = arm_get_frame_offsets ();
saved_regs_mask = offsets->saved_regs_mask;
special function exit sequence, or we are not really returning. */
if (really_return
&& ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
- && !current_function_calls_eh_return)
+ && !crtl->calls_eh_return)
/* Delete the LR from the register mask, so that the LR on
the stack is loaded into the PC in the register mask. */
saved_regs_mask &= ~ (1 << LR_REGNUM);
occur. If the stack pointer already points at the right
place, then omit the subtraction. */
if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
- || current_function_calls_alloca)
+ || cfun->calls_alloca)
asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
4 * bit_count (saved_regs_mask));
print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask, 0);
count = offsets->saved_regs - offsets->saved_args;
if (optimize_size
&& count != 0
- && !current_function_calls_eh_return
+ && !crtl->calls_eh_return
&& bit_count(saved_regs_mask) * 4 == count
&& !IS_INTERRUPT (func_type)
- && !cfun->tail_call_emit)
+ && !crtl->tail_call_emit)
{
unsigned long mask;
mask = (1 << (arm_size_return_regs() / 4)) - 1;
&& really_return
&& crtl->args.pretend_args_size == 0
&& saved_regs_mask & (1 << LR_REGNUM)
- && !current_function_calls_eh_return)
+ && !crtl->calls_eh_return)
{
saved_regs_mask &= ~ (1 << LR_REGNUM);
saved_regs_mask |= (1 << PC_REGNUM);
return "";
/* Stack adjustment for exception handler. */
- if (current_function_calls_eh_return)
+ if (crtl->calls_eh_return)
asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
ARM_EH_STACKADJ_REGNUM);
}
if (reg == -1 && arm_size_return_regs () <= 12
- && !cfun->tail_call_emit)
+ && !crtl->tail_call_emit)
{
/* Push/pop an argument register (r3) if all callee saved
registers are already being pushed. */
scheduling in the prolog. Similarly if we want non-call exceptions
using the EABI unwinder, to prevent faulting instructions from being
swapped with a stack adjustment. */
- if (current_function_profile || !TARGET_SCHED_PROLOG
+ if (crtl->profile || !TARGET_SCHED_PROLOG
|| (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
emit_insn (gen_blockage ());
{
unsigned int i, fcode = ARM_BUILTIN_NEON_BASE;
+ tree neon_intQI_type_node;
+ tree neon_intHI_type_node;
+ tree neon_polyQI_type_node;
+ tree neon_polyHI_type_node;
+ tree neon_intSI_type_node;
+ tree neon_intDI_type_node;
+ tree neon_float_type_node;
+
+ tree intQI_pointer_node;
+ tree intHI_pointer_node;
+ tree intSI_pointer_node;
+ tree intDI_pointer_node;
+ tree float_pointer_node;
+
+ tree const_intQI_node;
+ tree const_intHI_node;
+ tree const_intSI_node;
+ tree const_intDI_node;
+ tree const_float_node;
+
+ tree const_intQI_pointer_node;
+ tree const_intHI_pointer_node;
+ tree const_intSI_pointer_node;
+ tree const_intDI_pointer_node;
+ tree const_float_pointer_node;
+
+ tree V8QI_type_node;
+ tree V4HI_type_node;
+ tree V2SI_type_node;
+ tree V2SF_type_node;
+ tree V16QI_type_node;
+ tree V8HI_type_node;
+ tree V4SI_type_node;
+ tree V4SF_type_node;
+ tree V2DI_type_node;
+
+ tree intUQI_type_node;
+ tree intUHI_type_node;
+ tree intUSI_type_node;
+ tree intUDI_type_node;
+
+ tree intEI_type_node;
+ tree intOI_type_node;
+ tree intCI_type_node;
+ tree intXI_type_node;
+
+ tree V8QI_pointer_node;
+ tree V4HI_pointer_node;
+ tree V2SI_pointer_node;
+ tree V2SF_pointer_node;
+ tree V16QI_pointer_node;
+ tree V8HI_pointer_node;
+ tree V4SI_pointer_node;
+ tree V4SF_pointer_node;
+ tree V2DI_pointer_node;
+
+ tree void_ftype_pv8qi_v8qi_v8qi;
+ tree void_ftype_pv4hi_v4hi_v4hi;
+ tree void_ftype_pv2si_v2si_v2si;
+ tree void_ftype_pv2sf_v2sf_v2sf;
+ tree void_ftype_pdi_di_di;
+ tree void_ftype_pv16qi_v16qi_v16qi;
+ tree void_ftype_pv8hi_v8hi_v8hi;
+ tree void_ftype_pv4si_v4si_v4si;
+ tree void_ftype_pv4sf_v4sf_v4sf;
+ tree void_ftype_pv2di_v2di_v2di;
+
+ tree reinterp_ftype_dreg[5][5];
+ tree reinterp_ftype_qreg[5][5];
+ tree dreg_types[5], qreg_types[5];
+
/* Create distinguished type nodes for NEON vector element types,
and pointers to values of such types, so we can detect them later. */
- tree neon_intQI_type_node = make_signed_type (GET_MODE_PRECISION (QImode));
- tree neon_intHI_type_node = make_signed_type (GET_MODE_PRECISION (HImode));
- tree neon_polyQI_type_node = make_signed_type (GET_MODE_PRECISION (QImode));
- tree neon_polyHI_type_node = make_signed_type (GET_MODE_PRECISION (HImode));
- tree neon_intSI_type_node = make_signed_type (GET_MODE_PRECISION (SImode));
- tree neon_intDI_type_node = make_signed_type (GET_MODE_PRECISION (DImode));
- tree neon_float_type_node = make_node (REAL_TYPE);
-
- tree intQI_pointer_node = build_pointer_type (neon_intQI_type_node);
- tree intHI_pointer_node = build_pointer_type (neon_intHI_type_node);
- tree intSI_pointer_node = build_pointer_type (neon_intSI_type_node);
- tree intDI_pointer_node = build_pointer_type (neon_intDI_type_node);
- tree float_pointer_node = build_pointer_type (neon_float_type_node);
+ neon_intQI_type_node = make_signed_type (GET_MODE_PRECISION (QImode));
+ neon_intHI_type_node = make_signed_type (GET_MODE_PRECISION (HImode));
+ neon_polyQI_type_node = make_signed_type (GET_MODE_PRECISION (QImode));
+ neon_polyHI_type_node = make_signed_type (GET_MODE_PRECISION (HImode));
+ neon_intSI_type_node = make_signed_type (GET_MODE_PRECISION (SImode));
+ neon_intDI_type_node = make_signed_type (GET_MODE_PRECISION (DImode));
+ neon_float_type_node = make_node (REAL_TYPE);
+ TYPE_PRECISION (neon_float_type_node) = FLOAT_TYPE_SIZE;
+ layout_type (neon_float_type_node);
+
+ /* Define typedefs which exactly correspond to the modes we are basing vector
+ types on. If you change these names you'll need to change
+ the table used by arm_mangle_type too. */
+ (*lang_hooks.types.register_builtin_type) (neon_intQI_type_node,
+ "__builtin_neon_qi");
+ (*lang_hooks.types.register_builtin_type) (neon_intHI_type_node,
+ "__builtin_neon_hi");
+ (*lang_hooks.types.register_builtin_type) (neon_intSI_type_node,
+ "__builtin_neon_si");
+ (*lang_hooks.types.register_builtin_type) (neon_float_type_node,
+ "__builtin_neon_sf");
+ (*lang_hooks.types.register_builtin_type) (neon_intDI_type_node,
+ "__builtin_neon_di");
+ (*lang_hooks.types.register_builtin_type) (neon_polyQI_type_node,
+ "__builtin_neon_poly8");
+ (*lang_hooks.types.register_builtin_type) (neon_polyHI_type_node,
+ "__builtin_neon_poly16");
+
+ intQI_pointer_node = build_pointer_type (neon_intQI_type_node);
+ intHI_pointer_node = build_pointer_type (neon_intHI_type_node);
+ intSI_pointer_node = build_pointer_type (neon_intSI_type_node);
+ intDI_pointer_node = build_pointer_type (neon_intDI_type_node);
+ float_pointer_node = build_pointer_type (neon_float_type_node);
/* Next create constant-qualified versions of the above types. */
- tree const_intQI_node = build_qualified_type (neon_intQI_type_node,
- TYPE_QUAL_CONST);
- tree const_intHI_node = build_qualified_type (neon_intHI_type_node,
- TYPE_QUAL_CONST);
- tree const_intSI_node = build_qualified_type (neon_intSI_type_node,
- TYPE_QUAL_CONST);
- tree const_intDI_node = build_qualified_type (neon_intDI_type_node,
- TYPE_QUAL_CONST);
- tree const_float_node = build_qualified_type (neon_float_type_node,
- TYPE_QUAL_CONST);
-
- tree const_intQI_pointer_node = build_pointer_type (const_intQI_node);
- tree const_intHI_pointer_node = build_pointer_type (const_intHI_node);
- tree const_intSI_pointer_node = build_pointer_type (const_intSI_node);
- tree const_intDI_pointer_node = build_pointer_type (const_intDI_node);
- tree const_float_pointer_node = build_pointer_type (const_float_node);
+ const_intQI_node = build_qualified_type (neon_intQI_type_node,
+ TYPE_QUAL_CONST);
+ const_intHI_node = build_qualified_type (neon_intHI_type_node,
+ TYPE_QUAL_CONST);
+ const_intSI_node = build_qualified_type (neon_intSI_type_node,
+ TYPE_QUAL_CONST);
+ const_intDI_node = build_qualified_type (neon_intDI_type_node,
+ TYPE_QUAL_CONST);
+ const_float_node = build_qualified_type (neon_float_type_node,
+ TYPE_QUAL_CONST);
+
+ const_intQI_pointer_node = build_pointer_type (const_intQI_node);
+ const_intHI_pointer_node = build_pointer_type (const_intHI_node);
+ const_intSI_pointer_node = build_pointer_type (const_intSI_node);
+ const_intDI_pointer_node = build_pointer_type (const_intDI_node);
+ const_float_pointer_node = build_pointer_type (const_float_node);
/* Now create vector types based on our NEON element types. */
/* 64-bit vectors. */
- tree V8QI_type_node =
+ V8QI_type_node =
build_vector_type_for_mode (neon_intQI_type_node, V8QImode);
- tree V4HI_type_node =
+ V4HI_type_node =
build_vector_type_for_mode (neon_intHI_type_node, V4HImode);
- tree V2SI_type_node =
+ V2SI_type_node =
build_vector_type_for_mode (neon_intSI_type_node, V2SImode);
- tree V2SF_type_node =
+ V2SF_type_node =
build_vector_type_for_mode (neon_float_type_node, V2SFmode);
/* 128-bit vectors. */
- tree V16QI_type_node =
+ V16QI_type_node =
build_vector_type_for_mode (neon_intQI_type_node, V16QImode);
- tree V8HI_type_node =
+ V8HI_type_node =
build_vector_type_for_mode (neon_intHI_type_node, V8HImode);
- tree V4SI_type_node =
+ V4SI_type_node =
build_vector_type_for_mode (neon_intSI_type_node, V4SImode);
- tree V4SF_type_node =
+ V4SF_type_node =
build_vector_type_for_mode (neon_float_type_node, V4SFmode);
- tree V2DI_type_node =
+ V2DI_type_node =
build_vector_type_for_mode (neon_intDI_type_node, V2DImode);
/* Unsigned integer types for various mode sizes. */
- tree intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
- tree intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
- tree intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
- tree intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
+ intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
+ intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
+ intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
+ intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
+
+ (*lang_hooks.types.register_builtin_type) (intUQI_type_node,
+ "__builtin_neon_uqi");
+ (*lang_hooks.types.register_builtin_type) (intUHI_type_node,
+ "__builtin_neon_uhi");
+ (*lang_hooks.types.register_builtin_type) (intUSI_type_node,
+ "__builtin_neon_usi");
+ (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
+ "__builtin_neon_udi");
/* Opaque integer types for structures of vectors. */
- tree intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
- tree intOI_type_node = make_signed_type (GET_MODE_PRECISION (OImode));
- tree intCI_type_node = make_signed_type (GET_MODE_PRECISION (CImode));
- tree intXI_type_node = make_signed_type (GET_MODE_PRECISION (XImode));
+ intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
+ intOI_type_node = make_signed_type (GET_MODE_PRECISION (OImode));
+ intCI_type_node = make_signed_type (GET_MODE_PRECISION (CImode));
+ intXI_type_node = make_signed_type (GET_MODE_PRECISION (XImode));
+
+ (*lang_hooks.types.register_builtin_type) (intTI_type_node,
+ "__builtin_neon_ti");
+ (*lang_hooks.types.register_builtin_type) (intEI_type_node,
+ "__builtin_neon_ei");
+ (*lang_hooks.types.register_builtin_type) (intOI_type_node,
+ "__builtin_neon_oi");
+ (*lang_hooks.types.register_builtin_type) (intCI_type_node,
+ "__builtin_neon_ci");
+ (*lang_hooks.types.register_builtin_type) (intXI_type_node,
+ "__builtin_neon_xi");
/* Pointers to vector types. */
- tree V8QI_pointer_node = build_pointer_type (V8QI_type_node);
- tree V4HI_pointer_node = build_pointer_type (V4HI_type_node);
- tree V2SI_pointer_node = build_pointer_type (V2SI_type_node);
- tree V2SF_pointer_node = build_pointer_type (V2SF_type_node);
- tree V16QI_pointer_node = build_pointer_type (V16QI_type_node);
- tree V8HI_pointer_node = build_pointer_type (V8HI_type_node);
- tree V4SI_pointer_node = build_pointer_type (V4SI_type_node);
- tree V4SF_pointer_node = build_pointer_type (V4SF_type_node);
- tree V2DI_pointer_node = build_pointer_type (V2DI_type_node);
+ V8QI_pointer_node = build_pointer_type (V8QI_type_node);
+ V4HI_pointer_node = build_pointer_type (V4HI_type_node);
+ V2SI_pointer_node = build_pointer_type (V2SI_type_node);
+ V2SF_pointer_node = build_pointer_type (V2SF_type_node);
+ V16QI_pointer_node = build_pointer_type (V16QI_type_node);
+ V8HI_pointer_node = build_pointer_type (V8HI_type_node);
+ V4SI_pointer_node = build_pointer_type (V4SI_type_node);
+ V4SF_pointer_node = build_pointer_type (V4SF_type_node);
+ V2DI_pointer_node = build_pointer_type (V2DI_type_node);
/* Operations which return results as pairs. */
- tree void_ftype_pv8qi_v8qi_v8qi =
+ void_ftype_pv8qi_v8qi_v8qi =
build_function_type_list (void_type_node, V8QI_pointer_node, V8QI_type_node,
V8QI_type_node, NULL);
- tree void_ftype_pv4hi_v4hi_v4hi =
+ void_ftype_pv4hi_v4hi_v4hi =
build_function_type_list (void_type_node, V4HI_pointer_node, V4HI_type_node,
V4HI_type_node, NULL);
- tree void_ftype_pv2si_v2si_v2si =
+ void_ftype_pv2si_v2si_v2si =
build_function_type_list (void_type_node, V2SI_pointer_node, V2SI_type_node,
V2SI_type_node, NULL);
- tree void_ftype_pv2sf_v2sf_v2sf =
+ void_ftype_pv2sf_v2sf_v2sf =
build_function_type_list (void_type_node, V2SF_pointer_node, V2SF_type_node,
V2SF_type_node, NULL);
- tree void_ftype_pdi_di_di =
+ void_ftype_pdi_di_di =
build_function_type_list (void_type_node, intDI_pointer_node,
neon_intDI_type_node, neon_intDI_type_node, NULL);
- tree void_ftype_pv16qi_v16qi_v16qi =
+ void_ftype_pv16qi_v16qi_v16qi =
build_function_type_list (void_type_node, V16QI_pointer_node,
V16QI_type_node, V16QI_type_node, NULL);
- tree void_ftype_pv8hi_v8hi_v8hi =
+ void_ftype_pv8hi_v8hi_v8hi =
build_function_type_list (void_type_node, V8HI_pointer_node, V8HI_type_node,
V8HI_type_node, NULL);
- tree void_ftype_pv4si_v4si_v4si =
+ void_ftype_pv4si_v4si_v4si =
build_function_type_list (void_type_node, V4SI_pointer_node, V4SI_type_node,
V4SI_type_node, NULL);
- tree void_ftype_pv4sf_v4sf_v4sf =
+ void_ftype_pv4sf_v4sf_v4sf =
build_function_type_list (void_type_node, V4SF_pointer_node, V4SF_type_node,
V4SF_type_node, NULL);
- tree void_ftype_pv2di_v2di_v2di =
+ void_ftype_pv2di_v2di_v2di =
build_function_type_list (void_type_node, V2DI_pointer_node, V2DI_type_node,
V2DI_type_node, NULL);
- tree reinterp_ftype_dreg[5][5];
- tree reinterp_ftype_qreg[5][5];
- tree dreg_types[5], qreg_types[5];
-
- TYPE_PRECISION (neon_float_type_node) = FLOAT_TYPE_SIZE;
- layout_type (neon_float_type_node);
-
- /* Define typedefs which exactly correspond to the modes we are basing vector
- types on. If you change these names you'll need to change
- the table used by arm_mangle_type too. */
- (*lang_hooks.types.register_builtin_type) (neon_intQI_type_node,
- "__builtin_neon_qi");
- (*lang_hooks.types.register_builtin_type) (neon_intHI_type_node,
- "__builtin_neon_hi");
- (*lang_hooks.types.register_builtin_type) (neon_intSI_type_node,
- "__builtin_neon_si");
- (*lang_hooks.types.register_builtin_type) (neon_float_type_node,
- "__builtin_neon_sf");
- (*lang_hooks.types.register_builtin_type) (neon_intDI_type_node,
- "__builtin_neon_di");
-
- (*lang_hooks.types.register_builtin_type) (neon_polyQI_type_node,
- "__builtin_neon_poly8");
- (*lang_hooks.types.register_builtin_type) (neon_polyHI_type_node,
- "__builtin_neon_poly16");
- (*lang_hooks.types.register_builtin_type) (intUQI_type_node,
- "__builtin_neon_uqi");
- (*lang_hooks.types.register_builtin_type) (intUHI_type_node,
- "__builtin_neon_uhi");
- (*lang_hooks.types.register_builtin_type) (intUSI_type_node,
- "__builtin_neon_usi");
- (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
- "__builtin_neon_udi");
-
- (*lang_hooks.types.register_builtin_type) (intTI_type_node,
- "__builtin_neon_ti");
- (*lang_hooks.types.register_builtin_type) (intEI_type_node,
- "__builtin_neon_ei");
- (*lang_hooks.types.register_builtin_type) (intOI_type_node,
- "__builtin_neon_oi");
- (*lang_hooks.types.register_builtin_type) (intCI_type_node,
- "__builtin_neon_ci");
- (*lang_hooks.types.register_builtin_type) (intXI_type_node,
- "__builtin_neon_xi");
-
dreg_types[0] = V8QI_type_node;
dreg_types[1] = V4HI_type_node;
dreg_types[2] = V2SI_type_node;
static int
neon_builtin_compare (const void *a, const void *b)
{
- const neon_builtin_datum *key = a;
- const neon_builtin_datum *memb = b;
+ const neon_builtin_datum *const key = (const neon_builtin_datum *) a;
+ const neon_builtin_datum *const memb = (const neon_builtin_datum *) b;
unsigned int soughtcode = key->base_fcode;
if (soughtcode >= memb->base_fcode
int idx;
key.base_fcode = fcode;
- found = bsearch (&key, &neon_builtin_data[0], ARRAY_SIZE (neon_builtin_data),
+ found = (neon_builtin_datum *)
+ bsearch (&key, &neon_builtin_data[0], ARRAY_SIZE (neon_builtin_data),
sizeof (neon_builtin_data[0]), neon_builtin_compare);
gcc_assert (found);
idx = fcode - (int) found->base_fcode;
{
/* Catch popping the PC. */
if (TARGET_INTERWORK || TARGET_BACKTRACE
- || current_function_calls_eh_return)
+ || crtl->calls_eh_return)
{
/* The PC is never poped directly, instead
it is popped into r3 and then BX is used. */
return. */
if (pops_needed == 0)
{
- if (current_function_calls_eh_return)
+ if (crtl->calls_eh_return)
asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
else if (!TARGET_INTERWORK
&& !TARGET_BACKTRACE
&& !is_called_in_ARM_mode (current_function_decl)
- && !current_function_calls_eh_return)
+ && !crtl->calls_eh_return)
{
asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
return;
/* If returning via __builtin_eh_return, the bottom three registers
all contain information needed for the return. */
- if (current_function_calls_eh_return)
+ if (crtl->calls_eh_return)
size = 12;
else
{
asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
}
- if (current_function_calls_eh_return)
+ if (crtl->calls_eh_return)
asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
/* Return to caller. */
scheduling in the prolog. Similarly if we want non-call exceptions
using the EABI unwinder, to prevent faulting instructions from being
swapped with a stack adjustment. */
- if (current_function_profile || !TARGET_SCHED_PROLOG
+ if (crtl->profile || !TARGET_SCHED_PROLOG
|| (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
emit_insn (gen_blockage ());
the stack adjustment will not be deleted. */
emit_insn (gen_prologue_use (stack_pointer_rtx));
- if (current_function_profile || !TARGET_SCHED_PROLOG)
+ if (crtl->profile || !TARGET_SCHED_PROLOG)
emit_insn (gen_blockage ());
/* Emit a clobber for each insn that will be restored in the epilogue,
so that flow2 will get register lifetimes correct. */
for (regno = 0; regno < 13; regno++)
if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
- emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
+ emit_clobber (gen_rtx_REG (SImode, regno));
if (! df_regs_ever_live_p (LR_REGNUM))
- emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
+ emit_use (gen_rtx_REG (SImode, LR_REGNUM));
}
static void
static void
arm_cxx_determine_class_data_visibility (tree decl)
{
- if (!TARGET_AAPCS_BASED)
+ if (!TARGET_AAPCS_BASED
+ || !TARGET_DLLIMPORT_DECL_ATTRIBUTES)
return;
/* In general, \S 3.2.5.5 of the ARM EABI requires that class data
rtx addr;
unsigned long mask;
- emit_insn (gen_rtx_USE (VOIDmode, source));
+ emit_use (source);
offsets = arm_get_frame_offsets ();
mask = offsets->saved_regs_mask;
if (!ARM_EABI_UNWIND_TABLES)
return;
- if (!(flag_unwind_tables || cfun->uses_eh_lsda)
+ if (!(flag_unwind_tables || crtl->uses_eh_lsda)
&& (TREE_NOTHROW (current_function_decl)
- || cfun->all_throwers_are_sibcalls))
+ || crtl->all_throwers_are_sibcalls))
return;
if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
/* If this function will never be unwound, then mark it as such.
The came condition is used in arm_unwind_emit to suppress
the frame annotations. */
- if (!(flag_unwind_tables || cfun->uses_eh_lsda)
+ if (!(flag_unwind_tables || crtl->uses_eh_lsda)
&& (TREE_NOTHROW (current_function_decl)
- || cfun->all_throwers_are_sibcalls))
+ || crtl->all_throwers_are_sibcalls))
fputs("\t.cantunwind\n", f);
fputs ("\t.fnend\n", f);