#endif
offset = cfa_offset;
}
+ else if (x == virtual_preferred_stack_boundary_rtx)
+ {
+ new_rtx = GEN_INT (crtl->preferred_stack_boundary / BITS_PER_UNIT);
+ offset = 0;
+ }
else
return NULL_RTX;
/* Indicate that, from now on, assign_stack_local should use
frame_pointer_rtx. */
virtuals_instantiated = 1;
+
+ /* See allocate_dynamic_stack_space for the rationale. */
+#ifdef SETJMP_VIA_SAVE_AREA
+ if (flag_stack_usage && cfun->calls_setjmp)
+ {
+ int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
+ dynamic_offset = (dynamic_offset + align - 1) / align * align;
+ current_function_dynamic_stack_size
+ += current_function_dynamic_alloc_count * dynamic_offset;
+ }
+#endif
+
return 0;
}
align = BITS_PER_UNIT;
/* If we're padding upward, we know that the alignment of the slot
- is FUNCTION_ARG_BOUNDARY. If we're using slot_offset, we're
+ is TARGET_FUNCTION_ARG_BOUNDARY. If we're using slot_offset, we're
intentionally forcing upward padding. Otherwise we have to come
up with a guess at the alignment based on OFFSET_RTX. */
if (data->locate.where_pad != downward || data->entry_parm)
/* Estimate stack alignment from parameter alignment. */
if (SUPPORTS_STACK_ALIGNMENT)
{
- unsigned int align = FUNCTION_ARG_BOUNDARY (data.promoted_mode,
- data.passed_type);
+ unsigned int align
+ = targetm.calls.function_arg_boundary (data.promoted_mode,
+ data.passed_type);
align = MINIMUM_ALIGNMENT (data.passed_type, data.promoted_mode,
align);
if (TYPE_ALIGN (data.nominal_type) > align)
&& compare_tree_int (DECL_SIZE_UNIT (parm),
STACK_CHECK_MAX_VAR_SIZE) > 0))
{
- local = create_tmp_var (type, get_name (parm));
+ local = create_tmp_reg (type, get_name (parm));
DECL_IGNORED_P (local) = 0;
/* If PARM was addressable, move that flag over
to the local copy, as its address will be taken,
tree ptr_type, addr;
ptr_type = build_pointer_type (type);
- addr = create_tmp_var (ptr_type, get_name (parm));
+ addr = create_tmp_reg (ptr_type, get_name (parm));
DECL_IGNORED_P (addr) = 0;
local = build_fold_indirect_ref (addr);
t = built_in_decls[BUILT_IN_ALLOCA];
t = build_call_expr (t, 1, DECL_SIZE_UNIT (parm));
+ /* The call has been built for a variable-sized object. */
+ ALLOCA_FOR_VAR_P (t) = 1;
t = fold_convert (ptr_type, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t);
gimplify_and_add (t, &stmts);
FNDECL is the function in which the argument was defined.
There are two types of rounding that are done. The first, controlled by
- FUNCTION_ARG_BOUNDARY, forces the offset from the start of the argument
- list to be aligned to the specific boundary (in bits). This rounding
- affects the initial and starting offsets, but not the argument size.
+ TARGET_FUNCTION_ARG_BOUNDARY, forces the offset from the start of the
+ argument list to be aligned to the specific boundary (in bits). This
+ rounding affects the initial and starting offsets, but not the argument
+ size.
The second, controlled by FUNCTION_ARG_PADDING and PARM_BOUNDARY,
optionally rounds the size of the parm to PARM_BOUNDARY. The
sizetree
= type ? size_in_bytes (type) : size_int (GET_MODE_SIZE (passed_mode));
where_pad = FUNCTION_ARG_PADDING (passed_mode, type);
- boundary = FUNCTION_ARG_BOUNDARY (passed_mode, type);
+ boundary = targetm.calls.function_arg_boundary (passed_mode, type);
locate->where_pad = where_pad;
/* Alignment can't exceed MAX_SUPPORTED_STACK_ALIGNMENT. */
if (optimization_current_node != opts)
{
optimization_current_node = opts;
- cl_optimization_restore (TREE_OPTIMIZATION (opts));
+ cl_optimization_restore (&global_options, TREE_OPTIMIZATION (opts));
}
targetm.set_current_function (fndecl);
init_expr ();
default_rtl_profile ();
+ if (flag_stack_usage)
+ {
+ cfun->su = ggc_alloc_cleared_stack_usage ();
+ cfun->su->static_stack_size = -1;
+ }
+
cse_not_expected = ! optimize;
/* Caller save not needed yet. */
probe_stack_range (STACK_OLD_CHECK_PROTECT, max_frame_size);
seq = get_insns ();
end_sequence ();
+ set_insn_locators (seq, prologue_locator);
emit_insn_before (seq, stack_check_probe_note);
break;
}
/* Output the label for the actual return from the function. */
emit_label (return_label);
- if (USING_SJLJ_EXCEPTIONS)
+ if (targetm.except_unwind_info (&global_options) == UI_SJLJ)
{
/* Let except.c know where it should emit the call to unregister
the function context for sjlj exceptions. */
/* @@@ This is a kludge. We want to ensure that instructions that
may trap are not moved into the epilogue by scheduling, because
we don't always emit unwind information for the epilogue. */
- if (!USING_SJLJ_EXCEPTIONS && cfun->can_throw_non_call_exceptions)
+ if (cfun->can_throw_non_call_exceptions
+ && targetm.except_unwind_info (&global_options) != UI_SJLJ)
emit_insn (gen_blockage ());
/* If stack protection is enabled for this function, check the guard. */
push_topmost_sequence ();
emit_insn_after (seq, entry_of_function ());
pop_topmost_sequence ();
+
+ crtl->arg_pointer_save_area_init = true;
}
return ret;
}
}
-/* INSN has been duplicated as COPY, as part of duping a basic block.
- If INSN is an epilogue insn, then record COPY as epilogue as well. */
+/* INSN has been duplicated or replaced by as COPY, perhaps by duplicating a
+ basic block, splitting or peepholes. If INSN is a prologue or epilogue
+ insn, then record COPY as well. */
void
-maybe_copy_epilogue_insn (rtx insn, rtx copy)
+maybe_copy_prologue_epilogue_insn (rtx insn, rtx copy)
{
+ htab_t hash;
void **slot;
- if (epilogue_insn_hash == NULL
- || htab_find (epilogue_insn_hash, insn) == NULL)
- return;
+ hash = epilogue_insn_hash;
+ if (!hash || !htab_find (hash, insn))
+ {
+ hash = prologue_insn_hash;
+ if (!hash || !htab_find (hash, insn))
+ return;
+ }
- slot = htab_find_slot (epilogue_insn_hash, copy, INSERT);
+ slot = htab_find_slot (hash, copy, INSERT);
gcc_assert (*slot == NULL);
*slot = copy;
}
static void
thread_prologue_and_epilogue_insns (void)
{
- int inserted = 0;
+ bool inserted;
+ rtx seq ATTRIBUTE_UNUSED, epilogue_end ATTRIBUTE_UNUSED;
+ edge entry_edge ATTRIBUTE_UNUSED;
edge e;
-#if defined (HAVE_sibcall_epilogue) || defined (HAVE_epilogue) || defined (HAVE_return) || defined (HAVE_prologue)
- rtx seq;
-#endif
-#if defined (HAVE_epilogue) || defined(HAVE_return)
- rtx epilogue_end = NULL_RTX;
-#endif
edge_iterator ei;
rtl_profile_for_bb (ENTRY_BLOCK_PTR);
+
+ inserted = false;
+ seq = NULL_RTX;
+ epilogue_end = NULL_RTX;
+
+ /* Can't deal with multiple successors of the entry block at the
+ moment. Function should always have at least one entry
+ point. */
+ gcc_assert (single_succ_p (ENTRY_BLOCK_PTR));
+ entry_edge = single_succ_edge (ENTRY_BLOCK_PTR);
+
+ if (flag_split_stack
+ && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
+ == NULL))
+ {
+#ifndef HAVE_split_stack_prologue
+ gcc_unreachable ();
+#else
+ gcc_assert (HAVE_split_stack_prologue);
+
+ start_sequence ();
+ emit_insn (gen_split_stack_prologue ());
+ seq = get_insns ();
+ end_sequence ();
+
+ record_insns (seq, NULL, &prologue_insn_hash);
+ set_insn_locators (seq, prologue_locator);
+
+ /* This relies on the fact that committing the edge insertion
+ will look for basic blocks within the inserted instructions,
+ which in turn relies on the fact that we are not in CFG
+ layout mode here. */
+ insert_insn_on_edge (seq, entry_edge);
+ inserted = true;
+#endif
+ }
+
#ifdef HAVE_prologue
if (HAVE_prologue)
{
end_sequence ();
set_insn_locators (seq, prologue_locator);
- /* Can't deal with multiple successors of the entry block
- at the moment. Function should always have at least one
- entry point. */
- gcc_assert (single_succ_p (ENTRY_BLOCK_PTR));
-
- insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
- inserted = 1;
+ insert_insn_on_edge (seq, entry_edge);
+ inserted = true;
}
#endif
basic_block last;
rtx label;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
- if (e->flags & EDGE_FALLTHRU)
- break;
+ e = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
if (e == NULL)
goto epilogue_done;
last = e->src;
There really shouldn't be a mixture -- either all should have
been converted or none, however... */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
- if (e->flags & EDGE_FALLTHRU)
- break;
+ e = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
if (e == NULL)
goto epilogue_done;
end_sequence ();
insert_insn_on_edge (seq, e);
- inserted = 1;
+ inserted = true;
}
else
#endif
{
if (optimize)
cleanup_cfg (CLEANUP_EXPENSIVE);
+
/* On some machines, the prologue and epilogue code, or parts thereof,
can be represented as RTL. Doing so lets us schedule insns between
it and the rest of the code and also allows delayed branch
scheduling to operate in the epilogue. */
-
thread_prologue_and_epilogue_insns ();
+
+ /* The stack usage info is finalized during prologue expansion. */
+ if (flag_stack_usage)
+ output_stack_usage ();
+
return 0;
}