align = BITS_PER_UNIT;
/* If we're padding upward, we know that the alignment of the slot
- is FUNCTION_ARG_BOUNDARY. If we're using slot_offset, we're
+ is TARGET_FUNCTION_ARG_BOUNDARY. If we're using slot_offset, we're
intentionally forcing upward padding. Otherwise we have to come
up with a guess at the alignment based on OFFSET_RTX. */
if (data->locate.where_pad != downward || data->entry_parm)
/* Estimate stack alignment from parameter alignment. */
if (SUPPORTS_STACK_ALIGNMENT)
{
- unsigned int align = FUNCTION_ARG_BOUNDARY (data.promoted_mode,
- data.passed_type);
+ unsigned int align
+ = targetm.calls.function_arg_boundary (data.promoted_mode,
+ data.passed_type);
align = MINIMUM_ALIGNMENT (data.passed_type, data.promoted_mode,
align);
if (TYPE_ALIGN (data.nominal_type) > align)
&& compare_tree_int (DECL_SIZE_UNIT (parm),
STACK_CHECK_MAX_VAR_SIZE) > 0))
{
- local = create_tmp_var (type, get_name (parm));
+ local = create_tmp_reg (type, get_name (parm));
DECL_IGNORED_P (local) = 0;
/* If PARM was addressable, move that flag over
to the local copy, as its address will be taken,
tree ptr_type, addr;
ptr_type = build_pointer_type (type);
- addr = create_tmp_var (ptr_type, get_name (parm));
+ addr = create_tmp_reg (ptr_type, get_name (parm));
DECL_IGNORED_P (addr) = 0;
local = build_fold_indirect_ref (addr);
FNDECL is the function in which the argument was defined.
There are two types of rounding that are done. The first, controlled by
- FUNCTION_ARG_BOUNDARY, forces the offset from the start of the argument
- list to be aligned to the specific boundary (in bits). This rounding
- affects the initial and starting offsets, but not the argument size.
+ TARGET_FUNCTION_ARG_BOUNDARY, forces the offset from the start of the
+ argument list to be aligned to the specific boundary (in bits). This
+ rounding affects the initial and starting offsets, but not the argument
+ size.
The second, controlled by FUNCTION_ARG_PADDING and PARM_BOUNDARY,
optionally rounds the size of the parm to PARM_BOUNDARY. The
sizetree
= type ? size_in_bytes (type) : size_int (GET_MODE_SIZE (passed_mode));
where_pad = FUNCTION_ARG_PADDING (passed_mode, type);
- boundary = FUNCTION_ARG_BOUNDARY (passed_mode, type);
+ boundary = targetm.calls.function_arg_boundary (passed_mode, type);
locate->where_pad = where_pad;
/* Alignment can't exceed MAX_SUPPORTED_STACK_ALIGNMENT. */
probe_stack_range (STACK_OLD_CHECK_PROTECT, max_frame_size);
seq = get_insns ();
end_sequence ();
+ set_insn_locators (seq, prologue_locator);
emit_insn_before (seq, stack_check_probe_note);
break;
}
/* Output the label for the actual return from the function. */
emit_label (return_label);
- if (targetm.except_unwind_info () == UI_SJLJ)
+ if (targetm.except_unwind_info (&global_options) == UI_SJLJ)
{
/* Let except.c know where it should emit the call to unregister
the function context for sjlj exceptions. */
may trap are not moved into the epilogue by scheduling, because
we don't always emit unwind information for the epilogue. */
if (cfun->can_throw_non_call_exceptions
- && targetm.except_unwind_info () != UI_SJLJ)
+ && targetm.except_unwind_info (&global_options) != UI_SJLJ)
emit_insn (gen_blockage ());
/* If stack protection is enabled for this function, check the guard. */
}
}
-/* INSN has been duplicated as COPY, as part of duping a basic block.
- If INSN is an epilogue insn, then record COPY as epilogue as well. */
+/* INSN has been duplicated or replaced by as COPY, perhaps by duplicating a
+ basic block, splitting or peepholes. If INSN is a prologue or epilogue
+ insn, then record COPY as well. */
void
-maybe_copy_epilogue_insn (rtx insn, rtx copy)
+maybe_copy_prologue_epilogue_insn (rtx insn, rtx copy)
{
+ htab_t hash;
void **slot;
- if (epilogue_insn_hash == NULL
- || htab_find (epilogue_insn_hash, insn) == NULL)
- return;
+ hash = epilogue_insn_hash;
+ if (!hash || !htab_find (hash, insn))
+ {
+ hash = prologue_insn_hash;
+ if (!hash || !htab_find (hash, insn))
+ return;
+ }
- slot = htab_find_slot (epilogue_insn_hash, copy, INSERT);
+ slot = htab_find_slot (hash, copy, INSERT);
gcc_assert (*slot == NULL);
*slot = copy;
}
thread_prologue_and_epilogue_insns (void)
{
bool inserted;
- rtx seq, epilogue_end;
- edge entry_edge;
+ rtx seq ATTRIBUTE_UNUSED, epilogue_end ATTRIBUTE_UNUSED;
+ edge entry_edge ATTRIBUTE_UNUSED;
edge e;
edge_iterator ei;
basic_block last;
rtx label;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
- if (e->flags & EDGE_FALLTHRU)
- break;
+ e = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
if (e == NULL)
goto epilogue_done;
last = e->src;
There really shouldn't be a mixture -- either all should have
been converted or none, however... */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
- if (e->flags & EDGE_FALLTHRU)
- break;
+ e = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
if (e == NULL)
goto epilogue_done;