#endif
offset = cfa_offset;
}
+ else if (x == virtual_preferred_stack_boundary_rtx)
+ {
+ new_rtx = GEN_INT (crtl->preferred_stack_boundary / BITS_PER_UNIT);
+ offset = 0;
+ }
else
return NULL_RTX;
/* Indicate that, from now on, assign_stack_local should use
frame_pointer_rtx. */
virtuals_instantiated = 1;
+
+ /* See allocate_dynamic_stack_space for the rationale. */
+#ifdef SETJMP_VIA_SAVE_AREA
+ if (flag_stack_usage && cfun->calls_setjmp)
+ {
+ int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
+ dynamic_offset = (dynamic_offset + align - 1) / align * align;
+ current_function_dynamic_stack_size
+ += current_function_dynamic_alloc_count * dynamic_offset;
+ }
+#endif
+
return 0;
}
unsigned i;
tree p;
- for (i = 0; VEC_iterate (tree, *args, i, p); ++i)
+ FOR_EACH_VEC_ELT (tree, *args, i, p)
{
tree type = TREE_TYPE (p);
if (TREE_CODE (type) == COMPLEX_TYPE
align = BITS_PER_UNIT;
/* If we're padding upward, we know that the alignment of the slot
- is FUNCTION_ARG_BOUNDARY. If we're using slot_offset, we're
+ is TARGET_FUNCTION_ARG_BOUNDARY. If we're using slot_offset, we're
intentionally forcing upward padding. Otherwise we have to come
up with a guess at the alignment based on OFFSET_RTX. */
if (data->locate.where_pad != downward || data->entry_parm)
|| promoted_nominal_mode != data->promoted_mode);
moved = false;
- if (need_conversion)
+ if (need_conversion
+ && GET_MODE_CLASS (data->nominal_mode) == MODE_INT
+ && data->nominal_mode == data->passed_mode
+ && data->nominal_mode == GET_MODE (data->entry_parm))
{
/* ENTRY_PARM has been converted to PROMOTED_MODE, its
mode, by the caller. We now have to convert it to
if (moved)
{
emit_insn (insns);
- equiv_stack_parm = gen_rtx_fmt_e (code, GET_MODE (parmreg),
- equiv_stack_parm);
+ if (equiv_stack_parm != NULL_RTX)
+ equiv_stack_parm = gen_rtx_fmt_e (code, GET_MODE (parmreg),
+ equiv_stack_parm);
}
}
}
assign_parms_initialize_all (&all);
fnargs = assign_parms_augmented_arg_list (&all);
- for (i = 0; VEC_iterate (tree, fnargs, i, parm); ++i)
+ FOR_EACH_VEC_ELT (tree, fnargs, i, parm)
{
struct assign_parm_data_one data;
/* Estimate stack alignment from parameter alignment. */
if (SUPPORTS_STACK_ALIGNMENT)
{
- unsigned int align = FUNCTION_ARG_BOUNDARY (data.promoted_mode,
- data.passed_type);
+ unsigned int align
+ = targetm.calls.function_arg_boundary (data.promoted_mode,
+ data.passed_type);
align = MINIMUM_ALIGNMENT (data.passed_type, data.promoted_mode,
align);
if (TYPE_ALIGN (data.nominal_type) > align)
assign_parms_initialize_all (&all);
fnargs = assign_parms_augmented_arg_list (&all);
- for (i = 0; VEC_iterate (tree, fnargs, i, parm); ++i)
+ FOR_EACH_VEC_ELT (tree, fnargs, i, parm)
{
struct assign_parm_data_one data;
&& compare_tree_int (DECL_SIZE_UNIT (parm),
STACK_CHECK_MAX_VAR_SIZE) > 0))
{
- local = create_tmp_var (type, get_name (parm));
+ local = create_tmp_reg (type, get_name (parm));
DECL_IGNORED_P (local) = 0;
/* If PARM was addressable, move that flag over
to the local copy, as its address will be taken,
- not the PARMs. */
+ not the PARMs. Keep the parms address taken
+ as we'll query that flag during gimplification. */
if (TREE_ADDRESSABLE (parm))
- {
- TREE_ADDRESSABLE (parm) = 0;
- TREE_ADDRESSABLE (local) = 1;
- }
+ TREE_ADDRESSABLE (local) = 1;
}
else
{
tree ptr_type, addr;
ptr_type = build_pointer_type (type);
- addr = create_tmp_var (ptr_type, get_name (parm));
+ addr = create_tmp_reg (ptr_type, get_name (parm));
DECL_IGNORED_P (addr) = 0;
local = build_fold_indirect_ref (addr);
t = built_in_decls[BUILT_IN_ALLOCA];
t = build_call_expr (t, 1, DECL_SIZE_UNIT (parm));
+ /* The call has been built for a variable-sized object. */
+ ALLOCA_FOR_VAR_P (t) = 1;
t = fold_convert (ptr_type, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t);
gimplify_and_add (t, &stmts);
FNDECL is the function in which the argument was defined.
There are two types of rounding that are done. The first, controlled by
- FUNCTION_ARG_BOUNDARY, forces the offset from the start of the argument
- list to be aligned to the specific boundary (in bits). This rounding
- affects the initial and starting offsets, but not the argument size.
+ TARGET_FUNCTION_ARG_BOUNDARY, forces the offset from the start of the
+ argument list to be aligned to the specific boundary (in bits). This
+ rounding affects the initial and starting offsets, but not the argument
+ size.
The second, controlled by FUNCTION_ARG_PADDING and PARM_BOUNDARY,
optionally rounds the size of the parm to PARM_BOUNDARY. The
sizetree
= type ? size_in_bytes (type) : size_int (GET_MODE_SIZE (passed_mode));
where_pad = FUNCTION_ARG_PADDING (passed_mode, type);
- boundary = FUNCTION_ARG_BOUNDARY (passed_mode, type);
+ boundary = targetm.calls.function_arg_boundary (passed_mode, type);
locate->where_pad = where_pad;
/* Alignment can't exceed MAX_SUPPORTED_STACK_ALIGNMENT. */
}
\f
+/* Reverse the order of elements in the fragment chain T of blocks,
+ and return the new head of the chain (old last element). */
+
+static tree
+block_fragments_nreverse (tree t)
+{
+ tree prev = 0, block, next;
+ for (block = t; block; block = next)
+ {
+ next = BLOCK_FRAGMENT_CHAIN (block);
+ BLOCK_FRAGMENT_CHAIN (block) = prev;
+ prev = block;
+ }
+ return prev;
+}
+
+/* Reverse the order of elements in the chain T of blocks,
+ and return the new head of the chain (old last element).
+ Also do the same on subblocks and reverse the order of elements
+ in BLOCK_FRAGMENT_CHAIN as well. */
+
+static tree
+blocks_nreverse_all (tree t)
+{
+ tree prev = 0, block, next;
+ for (block = t; block; block = next)
+ {
+ next = BLOCK_CHAIN (block);
+ BLOCK_CHAIN (block) = prev;
+ BLOCK_SUBBLOCKS (block) = blocks_nreverse_all (BLOCK_SUBBLOCKS (block));
+ if (BLOCK_FRAGMENT_CHAIN (block)
+ && BLOCK_FRAGMENT_ORIGIN (block) == NULL_TREE)
+ BLOCK_FRAGMENT_CHAIN (block)
+ = block_fragments_nreverse (BLOCK_FRAGMENT_CHAIN (block));
+ prev = block;
+ }
+ return prev;
+}
+
+
/* Identify BLOCKs referenced by more than one NOTE_INSN_BLOCK_{BEG,END},
and create duplicate blocks. */
/* ??? Need an option to either create block fragments or to create
/* Recreate the block tree from the note nesting. */
reorder_blocks_1 (get_insns (), block, &block_stack);
- BLOCK_SUBBLOCKS (block) = blocks_nreverse (BLOCK_SUBBLOCKS (block));
+ BLOCK_SUBBLOCKS (block) = blocks_nreverse_all (BLOCK_SUBBLOCKS (block));
VEC_free (tree, heap, block_stack);
}
tree block = NOTE_BLOCK (insn);
tree origin;
- origin = (BLOCK_FRAGMENT_ORIGIN (block)
- ? BLOCK_FRAGMENT_ORIGIN (block)
- : block);
+ gcc_assert (BLOCK_FRAGMENT_ORIGIN (block) == NULL_TREE);
+ origin = block;
/* If we have seen this block before, that means it now
spans multiple address regions. Create a new fragment. */
else if (NOTE_KIND (insn) == NOTE_INSN_BLOCK_END)
{
NOTE_BLOCK (insn) = VEC_pop (tree, *p_block_stack);
- BLOCK_SUBBLOCKS (current_block)
- = blocks_nreverse (BLOCK_SUBBLOCKS (current_block));
current_block = BLOCK_SUPERCONTEXT (current_block);
}
}
tree
blocks_nreverse (tree t)
{
- tree prev = 0, decl, next;
- for (decl = t; decl; decl = next)
+ tree prev = 0, block, next;
+ for (block = t; block; block = next)
{
- next = BLOCK_CHAIN (decl);
- BLOCK_CHAIN (decl) = prev;
- prev = decl;
+ next = BLOCK_CHAIN (block);
+ BLOCK_CHAIN (block) = prev;
+ prev = block;
}
return prev;
}
if (optimization_current_node != opts)
{
optimization_current_node = opts;
- cl_optimization_restore (TREE_OPTIMIZATION (opts));
+ cl_optimization_restore (&global_options, TREE_OPTIMIZATION (opts));
}
targetm.set_current_function (fndecl);
cfun->returns_struct = 1;
}
- cfun->stdarg
- = (fntype
- && TYPE_ARG_TYPES (fntype) != 0
- && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
- != void_type_node));
+ cfun->stdarg = stdarg_p (fntype);
/* Assume all registers in stdarg functions need to be saved. */
cfun->va_list_gpr_size = VA_LIST_MAX_GPR_SIZE;
init_expr ();
default_rtl_profile ();
+ if (flag_stack_usage)
+ {
+ cfun->su = ggc_alloc_cleared_stack_usage ();
+ cfun->su->static_stack_size = -1;
+ }
+
cse_not_expected = ! optimize;
/* Caller save not needed yet. */
probe_stack_range (STACK_OLD_CHECK_PROTECT, max_frame_size);
seq = get_insns ();
end_sequence ();
+ set_insn_locators (seq, prologue_locator);
emit_insn_before (seq, stack_check_probe_note);
break;
}
/* Output the label for the actual return from the function. */
emit_label (return_label);
- if (USING_SJLJ_EXCEPTIONS)
+ if (targetm.except_unwind_info (&global_options) == UI_SJLJ)
{
/* Let except.c know where it should emit the call to unregister
the function context for sjlj exceptions. */
/* @@@ This is a kludge. We want to ensure that instructions that
may trap are not moved into the epilogue by scheduling, because
we don't always emit unwind information for the epilogue. */
- if (!USING_SJLJ_EXCEPTIONS && cfun->can_throw_non_call_exceptions)
+ if (cfun->can_throw_non_call_exceptions
+ && targetm.except_unwind_info (&global_options) != UI_SJLJ)
emit_insn (gen_blockage ());
/* If stack protection is enabled for this function, check the guard. */
push_topmost_sequence ();
emit_insn_after (seq, entry_of_function ());
pop_topmost_sequence ();
+
+ crtl->arg_pointer_save_area_init = true;
}
return ret;
}
}
-/* INSN has been duplicated as COPY, as part of duping a basic block.
- If INSN is an epilogue insn, then record COPY as epilogue as well. */
+/* INSN has been duplicated or replaced by as COPY, perhaps by duplicating a
+ basic block, splitting or peepholes. If INSN is a prologue or epilogue
+ insn, then record COPY as well. */
void
-maybe_copy_epilogue_insn (rtx insn, rtx copy)
+maybe_copy_prologue_epilogue_insn (rtx insn, rtx copy)
{
+ htab_t hash;
void **slot;
- if (epilogue_insn_hash == NULL
- || htab_find (epilogue_insn_hash, insn) == NULL)
- return;
+ hash = epilogue_insn_hash;
+ if (!hash || !htab_find (hash, insn))
+ {
+ hash = prologue_insn_hash;
+ if (!hash || !htab_find (hash, insn))
+ return;
+ }
- slot = htab_find_slot (epilogue_insn_hash, copy, INSERT);
+ slot = htab_find_slot (hash, copy, INSERT);
gcc_assert (*slot == NULL);
*slot = copy;
}
static void
thread_prologue_and_epilogue_insns (void)
{
- int inserted = 0;
+ bool inserted;
+ rtx seq ATTRIBUTE_UNUSED, epilogue_end ATTRIBUTE_UNUSED;
+ edge entry_edge ATTRIBUTE_UNUSED;
edge e;
-#if defined (HAVE_sibcall_epilogue) || defined (HAVE_epilogue) || defined (HAVE_return) || defined (HAVE_prologue)
- rtx seq;
-#endif
-#if defined (HAVE_epilogue) || defined(HAVE_return)
- rtx epilogue_end = NULL_RTX;
-#endif
edge_iterator ei;
rtl_profile_for_bb (ENTRY_BLOCK_PTR);
+
+ inserted = false;
+ seq = NULL_RTX;
+ epilogue_end = NULL_RTX;
+
+ /* Can't deal with multiple successors of the entry block at the
+ moment. Function should always have at least one entry
+ point. */
+ gcc_assert (single_succ_p (ENTRY_BLOCK_PTR));
+ entry_edge = single_succ_edge (ENTRY_BLOCK_PTR);
+
+ if (flag_split_stack
+ && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
+ == NULL))
+ {
+#ifndef HAVE_split_stack_prologue
+ gcc_unreachable ();
+#else
+ gcc_assert (HAVE_split_stack_prologue);
+
+ start_sequence ();
+ emit_insn (gen_split_stack_prologue ());
+ seq = get_insns ();
+ end_sequence ();
+
+ record_insns (seq, NULL, &prologue_insn_hash);
+ set_insn_locators (seq, prologue_locator);
+
+ /* This relies on the fact that committing the edge insertion
+ will look for basic blocks within the inserted instructions,
+ which in turn relies on the fact that we are not in CFG
+ layout mode here. */
+ insert_insn_on_edge (seq, entry_edge);
+ inserted = true;
+#endif
+ }
+
#ifdef HAVE_prologue
if (HAVE_prologue)
{
record_insns (seq, NULL, &prologue_insn_hash);
emit_note (NOTE_INSN_PROLOGUE_END);
-#ifndef PROFILE_BEFORE_PROLOGUE
/* Ensure that instructions are not moved into the prologue when
profiling is on. The call to the profiling routine can be
emitted within the live range of a call-clobbered register. */
- if (crtl->profile)
+ if (!targetm.profile_before_prologue () && crtl->profile)
emit_insn (gen_blockage ());
-#endif
seq = get_insns ();
end_sequence ();
set_insn_locators (seq, prologue_locator);
- /* Can't deal with multiple successors of the entry block
- at the moment. Function should always have at least one
- entry point. */
- gcc_assert (single_succ_p (ENTRY_BLOCK_PTR));
-
- insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
- inserted = 1;
+ insert_insn_on_edge (seq, entry_edge);
+ inserted = true;
}
#endif
basic_block last;
rtx label;
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
- if (e->flags & EDGE_FALLTHRU)
- break;
+ e = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
if (e == NULL)
goto epilogue_done;
last = e->src;
There really shouldn't be a mixture -- either all should have
been converted or none, however... */
- FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
- if (e->flags & EDGE_FALLTHRU)
- break;
+ e = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
if (e == NULL)
goto epilogue_done;
end_sequence ();
insert_insn_on_edge (seq, e);
- inserted = 1;
+ inserted = true;
}
else
#endif
{
if (optimize)
cleanup_cfg (CLEANUP_EXPENSIVE);
+
/* On some machines, the prologue and epilogue code, or parts thereof,
can be represented as RTL. Doing so lets us schedule insns between
it and the rest of the code and also allows delayed branch
scheduling to operate in the epilogue. */
-
thread_prologue_and_epilogue_insns ();
+
+ /* The stack usage info is finalized during prologue expansion. */
+ if (flag_stack_usage)
+ output_stack_usage ();
+
return 0;
}