/* A pass for lowering trees to RTL.
- Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
This file is part of GCC.
{
unsigned int align;
- align = DECL_ALIGN (decl);
- align = LOCAL_ALIGNMENT (TREE_TYPE (decl), align);
+ align = LOCAL_DECL_ALIGNMENT (decl);
if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
align = MAX_SUPPORTED_STACK_ALIGNMENT;
static void
expand_one_stack_var_at (tree decl, HOST_WIDE_INT offset)
{
- HOST_WIDE_INT align;
+ /* Alignment is unsigned. */
+ unsigned HOST_WIDE_INT align;
rtx x;
/* If this fails, we've overflowed the stack frame. Error nicely? */
offset -= frame_phase;
align = offset & -offset;
align *= BITS_PER_UNIT;
- if (align > STACK_BOUNDARY || align == 0)
+ if (align == 0)
align = STACK_BOUNDARY;
+ else if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
+ align = MAX_SUPPORTED_STACK_ALIGNMENT;
DECL_ALIGN (decl) = align;
DECL_USER_ALIGN (decl) = 0;
stack_vars_conflict_alloc = 0;
}
+/* Make a fair guess for the size of the stack frame of the current
+ function. This doesn't have to be exact, the result is only used
+ in the inline heuristics. So we don't want to run the full stack
+ var packing algorithm (which is quadratic in the number of stack
+ vars). Instead, we calculate the total size of all stack vars.
+ This turns out to be a pretty fair estimate -- packing of stack
+ vars doesn't happen very often. */
+
HOST_WIDE_INT
estimated_stack_frame_size (void)
{
HOST_WIDE_INT size = 0;
+ size_t i;
tree t, outer_block = DECL_INITIAL (current_function_decl);
init_vars_expansion ();
- /* At this point all variables on the local_decls with TREE_USED
- set are not associated with any block scope. Lay them out. */
for (t = cfun->local_decls; t; t = TREE_CHAIN (t))
{
tree var = TREE_VALUE (t);
TREE_USED (var) = 1;
}
size += account_used_vars_for_block (outer_block, true);
+
if (stack_vars_num > 0)
{
- /* Due to the way alias sets work, no variables with non-conflicting
- alias sets may be assigned the same address. Add conflicts to
- reflect this. */
- add_alias_set_conflicts ();
-
- /* If stack protection is enabled, we don't share space between
- vulnerable data and non-vulnerable data. */
- if (flag_stack_protect)
- add_stack_protection_conflicts ();
-
- /* Now that we have collected all stack variables, and have computed a
- minimal interference graph, attempt to save some stack space. */
- partition_stack_vars ();
- if (dump_file)
- dump_stack_var_partition ();
-
+ /* Fake sorting the stack vars for account_stack_vars (). */
+ stack_vars_sorted = XNEWVEC (size_t, stack_vars_num);
+ for (i = 0; i < stack_vars_num; ++i)
+ stack_vars_sorted[i] = i;
size += account_stack_vars ();
fini_vars_expansion ();
}
+
return size;
}
add_reg_br_prob_note (last, true_edge->probability);
maybe_dump_rtl_for_gimple_stmt (stmt, last);
if (true_edge->goto_locus)
- set_curr_insn_source_location (true_edge->goto_locus);
+ {
+ set_curr_insn_source_location (true_edge->goto_locus);
+ set_curr_insn_block (true_edge->goto_block);
+ true_edge->goto_locus = curr_insn_locator ();
+ }
+ true_edge->goto_block = NULL;
false_edge->flags |= EDGE_FALLTHRU;
ggc_free (pred);
return NULL;
add_reg_br_prob_note (last, false_edge->probability);
maybe_dump_rtl_for_gimple_stmt (stmt, last);
if (false_edge->goto_locus)
- set_curr_insn_source_location (false_edge->goto_locus);
+ {
+ set_curr_insn_source_location (false_edge->goto_locus);
+ set_curr_insn_block (false_edge->goto_block);
+ false_edge->goto_locus = curr_insn_locator ();
+ }
+ false_edge->goto_block = NULL;
true_edge->flags |= EDGE_FALLTHRU;
ggc_free (pred);
return NULL;
jumpif (pred, label_rtx_for_bb (true_edge->dest));
add_reg_br_prob_note (last, true_edge->probability);
last = get_last_insn ();
+ if (false_edge->goto_locus)
+ {
+ set_curr_insn_source_location (false_edge->goto_locus);
+ set_curr_insn_block (false_edge->goto_block);
+ false_edge->goto_locus = curr_insn_locator ();
+ }
+ false_edge->goto_block = NULL;
emit_jump (label_rtx_for_bb (false_edge->dest));
BB_END (bb) = last;
maybe_dump_rtl_for_gimple_stmt (stmt, last2);
- if (false_edge->goto_locus)
- set_curr_insn_source_location (false_edge->goto_locus);
+ if (true_edge->goto_locus)
+ {
+ set_curr_insn_source_location (true_edge->goto_locus);
+ set_curr_insn_block (true_edge->goto_block);
+ true_edge->goto_locus = curr_insn_locator ();
+ }
+ true_edge->goto_block = NULL;
ggc_free (pred);
return new_bb;
return new_bb;
}
}
- else
+ else if (gimple_code (stmt) != GIMPLE_CHANGE_DYNAMIC_TYPE)
{
tree stmt_tree = gimple_to_tree (stmt);
last = get_last_insn ();
}
}
- /* Expand implicit goto. */
+ /* Expand implicit goto and convert goto_locus. */
FOR_EACH_EDGE (e, ei, bb->succs)
{
- if (e->flags & EDGE_FALLTHRU)
- break;
- }
-
- if (e && e->dest != bb->next_bb)
- {
- emit_jump (label_rtx_for_bb (e->dest));
- if (e->goto_locus)
- set_curr_insn_source_location (e->goto_locus);
- e->flags &= ~EDGE_FALLTHRU;
+ if (e->goto_locus && e->goto_block)
+ {
+ set_curr_insn_source_location (e->goto_locus);
+ set_curr_insn_block (e->goto_block);
+ e->goto_locus = curr_insn_locator ();
+ }
+ e->goto_block = NULL;
+ if ((e->flags & EDGE_FALLTHRU) && e->dest != bb->next_bb)
+ {
+ emit_jump (label_rtx_for_bb (e->dest));
+ e->flags &= ~EDGE_FALLTHRU;
+ }
}
do_pending_stack_adjust ();
expand_stack_alignment (void)
{
rtx drap_rtx;
- unsigned int preferred_stack_boundary, incoming_stack_boundary;
+ unsigned int preferred_stack_boundary;
if (! SUPPORTS_STACK_ALIGNMENT)
return;
gcc_assert (crtl->stack_alignment_needed
<= crtl->stack_alignment_estimated);
- /* Update stack boundary if needed. */
- if (targetm.calls.update_stack_boundary)
- targetm.calls.update_stack_boundary ();
-
/* Update crtl->stack_alignment_estimated and use it later to align
stack. We check PREFERRED_STACK_BOUNDARY if there may be non-call
exceptions since callgraph doesn't collect incoming stack alignment
if (preferred_stack_boundary > crtl->stack_alignment_needed)
crtl->stack_alignment_needed = preferred_stack_boundary;
- /* The incoming stack frame has to be aligned at least at
- parm_stack_boundary. */
- if (crtl->parm_stack_boundary > INCOMING_STACK_BOUNDARY)
- incoming_stack_boundary = crtl->parm_stack_boundary;
- else
- incoming_stack_boundary = INCOMING_STACK_BOUNDARY;
-
crtl->stack_realign_needed
- = incoming_stack_boundary < crtl->stack_alignment_estimated;
+ = INCOMING_STACK_BOUNDARY < crtl->stack_alignment_estimated;
crtl->stack_realign_tried = crtl->stack_realign_needed;
crtl->stack_realign_processed = true;
if (crtl->stack_protect_guard)
stack_protect_prologue ();
+ /* Update stack boundary if needed. */
+ if (SUPPORTS_STACK_ALIGNMENT)
+ {
+ /* Call update_stack_boundary here to update incoming stack
+ boundary before TARGET_FUNCTION_OK_FOR_SIBCALL is called.
+ TARGET_FUNCTION_OK_FOR_SIBCALL needs to know the accurate
+ incoming stack alignment to check if it is OK to perform
+ sibcall optimization since sibcall optimization will only
+ align the outgoing stack to incoming stack boundary. */
+ if (targetm.calls.update_stack_boundary)
+ targetm.calls.update_stack_boundary ();
+
+ /* The incoming stack frame has to be aligned at least at
+ parm_stack_boundary. */
+ gcc_assert (crtl->parm_stack_boundary <= INCOMING_STACK_BOUNDARY);
+ }
+
/* Register rtl specific functions for cfg. */
rtl_register_cfg_hooks ();