/* A pass for lowering trees to RTL.
- Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
This file is part of GCC.
#include "tree-inline.h"
#include "value-prof.h"
#include "target.h"
+#include "ssaexpand.h"
+/* This variable holds information helping the rewriting of SSA trees
+ into RTL. */
+struct ssaexpand SA;
+
/* Return an expression tree corresponding to the RHS of GIMPLE
statement STMT. */
static tree
gimple_cond_pred_to_tree (gimple stmt)
{
+ /* We're sometimes presented with such code:
+ D.123_1 = x < y;
+ if (D.123_1 != 0)
+ ...
+ This would expand to two comparisons which then later might
+ be cleaned up by combine. But some pattern matchers like if-conversion
+ work better when there's only one compare, so make up for this
+ here as special exception if TER would have made the same change. */
+ tree lhs = gimple_cond_lhs (stmt);
+ if (SA.values
+ && TREE_CODE (lhs) == SSA_NAME
+ && bitmap_bit_p (SA.values, SSA_NAME_VERSION (lhs)))
+ lhs = gimple_assign_rhs_to_tree (SSA_NAME_DEF_STMT (lhs));
+
return build2 (gimple_cond_code (stmt), boolean_type_node,
- gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
+ lhs, gimple_cond_rhs (stmt));
}
/* Helper for gimple_to_tree. Set EXPR_LOCATION for every expression
#define STACK_ALIGNMENT_NEEDED 1
#endif
+#define SSAVAR(x) (TREE_CODE (x) == SSA_NAME ? SSA_NAME_VAR (x) : x)
+
+/* Associate declaration T with storage space X. If T is no
+ SSA name this is exactly SET_DECL_RTL, otherwise make the
+ partition of T associated with X. */
+static inline void
+set_rtl (tree t, rtx x)
+{
+ if (TREE_CODE (t) == SSA_NAME)
+ {
+ SA.partition_to_pseudo[var_to_partition (SA.map, t)] = x;
+ if (x && !MEM_P (x))
+ set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (t), x);
+ }
+ else
+ SET_DECL_RTL (t, x);
+}
/* This structure holds data relevant to one variable that will be
placed in a stack slot. */
{
unsigned int align;
- align = DECL_ALIGN (decl);
- align = LOCAL_ALIGNMENT (TREE_TYPE (decl), align);
+ align = LOCAL_DECL_ALIGNMENT (decl);
if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
align = MAX_SUPPORTED_STACK_ALIGNMENT;
}
stack_vars[stack_vars_num].decl = decl;
stack_vars[stack_vars_num].offset = 0;
- stack_vars[stack_vars_num].size = tree_low_cst (DECL_SIZE_UNIT (decl), 1);
- stack_vars[stack_vars_num].alignb = get_decl_align_unit (decl);
+ stack_vars[stack_vars_num].size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (decl)), 1);
+ stack_vars[stack_vars_num].alignb = get_decl_align_unit (SSAVAR (decl));
/* All variables are initially in their own partition. */
stack_vars[stack_vars_num].representative = stack_vars_num;
stack_vars[stack_vars_num].next = EOC;
/* Ensure that this decl doesn't get put onto the list twice. */
- SET_DECL_RTL (decl, pc_rtx);
+ set_rtl (decl, pc_rtx);
stack_vars_num++;
}
}
/* A subroutine of partition_stack_vars. A comparison function for qsort,
- sorting an array of indices by the size of the object. */
+ sorting an array of indices by the size and type of the object. */
static int
stack_var_size_cmp (const void *a, const void *b)
{
HOST_WIDE_INT sa = stack_vars[*(const size_t *)a].size;
HOST_WIDE_INT sb = stack_vars[*(const size_t *)b].size;
- unsigned int uida = DECL_UID (stack_vars[*(const size_t *)a].decl);
- unsigned int uidb = DECL_UID (stack_vars[*(const size_t *)b].decl);
+ tree decla, declb;
+ unsigned int uida, uidb;
if (sa < sb)
return -1;
if (sa > sb)
return 1;
- /* For stack variables of the same size use the uid of the decl
- to make the sort stable. */
+ decla = stack_vars[*(const size_t *)a].decl;
+ declb = stack_vars[*(const size_t *)b].decl;
+ /* For stack variables of the same size use and id of the decls
+ to make the sort stable. Two SSA names are compared by their
+ version, SSA names come before non-SSA names, and two normal
+ decls are compared by their DECL_UID. */
+ if (TREE_CODE (decla) == SSA_NAME)
+ {
+ if (TREE_CODE (declb) == SSA_NAME)
+ uida = SSA_NAME_VERSION (decla), uidb = SSA_NAME_VERSION (declb);
+ else
+ return -1;
+ }
+ else if (TREE_CODE (declb) == SSA_NAME)
+ return 1;
+ else
+ uida = DECL_UID (decla), uidb = DECL_UID (declb);
if (uida < uidb)
return -1;
if (uida > uidb)
static void
expand_one_stack_var_at (tree decl, HOST_WIDE_INT offset)
{
- HOST_WIDE_INT align;
+ /* Alignment is unsigned. */
+ unsigned HOST_WIDE_INT align;
rtx x;
/* If this fails, we've overflowed the stack frame. Error nicely? */
gcc_assert (offset == trunc_int_for_mode (offset, Pmode));
x = plus_constant (virtual_stack_vars_rtx, offset);
- x = gen_rtx_MEM (DECL_MODE (decl), x);
-
- /* Set alignment we actually gave this decl. */
- offset -= frame_phase;
- align = offset & -offset;
- align *= BITS_PER_UNIT;
- if (align > STACK_BOUNDARY || align == 0)
- align = STACK_BOUNDARY;
- DECL_ALIGN (decl) = align;
- DECL_USER_ALIGN (decl) = 0;
-
- set_mem_attributes (x, decl, true);
- SET_DECL_RTL (decl, x);
+ x = gen_rtx_MEM (DECL_MODE (SSAVAR (decl)), x);
+
+ if (TREE_CODE (decl) != SSA_NAME)
+ {
+ /* Set alignment we actually gave this decl if it isn't an SSA name.
+ If it is we generate stack slots only accidentally so it isn't as
+ important, we'll simply use the alignment that is already set. */
+ offset -= frame_phase;
+ align = offset & -offset;
+ align *= BITS_PER_UNIT;
+ if (align == 0)
+ align = STACK_BOUNDARY;
+ else if (align > MAX_SUPPORTED_STACK_ALIGNMENT)
+ align = MAX_SUPPORTED_STACK_ALIGNMENT;
+
+ DECL_ALIGN (decl) = align;
+ DECL_USER_ALIGN (decl) = 0;
+ }
+
+ set_mem_attributes (x, SSAVAR (decl), true);
+ set_rtl (decl, x);
}
/* A subroutine of expand_used_vars. Give each partition representative
/* Skip variables that have already had rtl assigned. See also
add_stack_var where we perpetrate this pc_rtx hack. */
- if (DECL_RTL (stack_vars[i].decl) != pc_rtx)
+ if ((TREE_CODE (stack_vars[i].decl) == SSA_NAME
+ ? SA.partition_to_pseudo[var_to_partition (SA.map, stack_vars[i].decl)]
+ : DECL_RTL (stack_vars[i].decl)) != pc_rtx)
continue;
/* Check the predicate to see whether this variable should be
size += stack_vars[i].size;
for (j = i; j != EOC; j = stack_vars[j].next)
- SET_DECL_RTL (stack_vars[j].decl, NULL);
+ set_rtl (stack_vars[j].decl, NULL);
}
return size;
}
{
HOST_WIDE_INT size, offset, align;
- size = tree_low_cst (DECL_SIZE_UNIT (var), 1);
- align = get_decl_align_unit (var);
+ size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (var)), 1);
+ align = get_decl_align_unit (SSAVAR (var));
offset = alloc_stack_frame_space (size, align);
expand_one_stack_var_at (var, offset);
static void
expand_one_register_var (tree var)
{
- tree type = TREE_TYPE (var);
+ tree decl = SSAVAR (var);
+ tree type = TREE_TYPE (decl);
int unsignedp = TYPE_UNSIGNED (type);
enum machine_mode reg_mode
- = promote_mode (type, DECL_MODE (var), &unsignedp, 0);
+ = promote_mode (type, DECL_MODE (decl), &unsignedp, 0);
rtx x = gen_reg_rtx (reg_mode);
- SET_DECL_RTL (var, x);
+ set_rtl (var, x);
/* Note if the object is a user variable. */
- if (!DECL_ARTIFICIAL (var))
- mark_user_reg (x);
+ if (!DECL_ARTIFICIAL (decl))
+ mark_user_reg (x);
if (POINTER_TYPE_P (type))
- mark_reg_pointer (x, TYPE_ALIGN (TREE_TYPE (TREE_TYPE (var))));
+ mark_reg_pointer (x, TYPE_ALIGN (TREE_TYPE (type)));
}
/* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL that
static HOST_WIDE_INT
expand_one_var (tree var, bool toplevel, bool really_expand)
{
+ tree origvar = var;
+ var = SSAVAR (var);
+
if (SUPPORTS_STACK_ALIGNMENT
&& TREE_TYPE (var) != error_mark_node
&& TREE_CODE (var) == VAR_DECL)
}
}
- if (TREE_CODE (var) != VAR_DECL)
+ if (TREE_CODE (origvar) == SSA_NAME)
+ {
+ gcc_assert (TREE_CODE (var) != VAR_DECL
+ || (!DECL_EXTERNAL (var)
+ && !DECL_HAS_VALUE_EXPR_P (var)
+ && !TREE_STATIC (var)
+ && !DECL_RTL_SET_P (var)
+ && TREE_TYPE (var) != error_mark_node
+ && !DECL_HARD_REGISTER (var)
+ && really_expand));
+ }
+ if (TREE_CODE (var) != VAR_DECL && TREE_CODE (origvar) != SSA_NAME)
;
else if (DECL_EXTERNAL (var))
;
if (really_expand)
expand_one_error_var (var);
}
- else if (DECL_HARD_REGISTER (var))
+ else if (TREE_CODE (var) == VAR_DECL && DECL_HARD_REGISTER (var))
{
if (really_expand)
expand_one_hard_reg_var (var);
else if (use_register_for_decl (var))
{
if (really_expand)
- expand_one_register_var (var);
+ expand_one_register_var (origvar);
}
else if (defer_stack_allocation (var, toplevel))
- add_stack_var (var);
+ add_stack_var (origvar);
else
{
if (really_expand)
- expand_one_stack_var (var);
+ expand_one_stack_var (origvar);
return tree_low_cst (DECL_SIZE_UNIT (var), 1);
}
return 0;
stack_vars_conflict_alloc = 0;
}
+/* Make a fair guess for the size of the stack frame of the current
+ function. This doesn't have to be exact, the result is only used
+ in the inline heuristics. So we don't want to run the full stack
+ var packing algorithm (which is quadratic in the number of stack
+ vars). Instead, we calculate the total size of all stack vars.
+ This turns out to be a pretty fair estimate -- packing of stack
+ vars doesn't happen very often. */
+
HOST_WIDE_INT
estimated_stack_frame_size (void)
{
HOST_WIDE_INT size = 0;
+ size_t i;
tree t, outer_block = DECL_INITIAL (current_function_decl);
init_vars_expansion ();
- /* At this point all variables on the local_decls with TREE_USED
- set are not associated with any block scope. Lay them out. */
for (t = cfun->local_decls; t; t = TREE_CHAIN (t))
{
tree var = TREE_VALUE (t);
TREE_USED (var) = 1;
}
size += account_used_vars_for_block (outer_block, true);
+
if (stack_vars_num > 0)
{
- /* Due to the way alias sets work, no variables with non-conflicting
- alias sets may be assigned the same address. Add conflicts to
- reflect this. */
- add_alias_set_conflicts ();
-
- /* If stack protection is enabled, we don't share space between
- vulnerable data and non-vulnerable data. */
- if (flag_stack_protect)
- add_stack_protection_conflicts ();
-
- /* Now that we have collected all stack variables, and have computed a
- minimal interference graph, attempt to save some stack space. */
- partition_stack_vars ();
- if (dump_file)
- dump_stack_var_partition ();
-
+ /* Fake sorting the stack vars for account_stack_vars (). */
+ stack_vars_sorted = XNEWVEC (size_t, stack_vars_num);
+ for (i = 0; i < stack_vars_num; ++i)
+ stack_vars_sorted[i] = i;
size += account_stack_vars ();
fini_vars_expansion ();
}
+
return size;
}
expand_used_vars (void)
{
tree t, next, outer_block = DECL_INITIAL (current_function_decl);
+ unsigned i;
/* Compute the phase of the stack frame for this function. */
{
init_vars_expansion ();
+ for (i = 0; i < SA.map->num_partitions; i++)
+ {
+ tree var = partition_to_var (SA.map, i);
+
+ gcc_assert (is_gimple_reg (var));
+ if (TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL)
+ expand_one_var (var, true, true);
+ else
+ {
+ /* This is a PARM_DECL or RESULT_DECL. For those partitions that
+ contain the default def (representing the parm or result itself)
+ we don't do anything here. But those which don't contain the
+ default def (representing a temporary based on the parm/result)
+ we need to allocate space just like for normal VAR_DECLs. */
+ if (!bitmap_bit_p (SA.partition_has_default_def, i))
+ {
+ expand_one_var (var, true, true);
+ gcc_assert (SA.partition_to_pseudo[i]);
+ }
+ }
+ }
+
/* At this point all variables on the local_decls with TREE_USED
set are not associated with any block scope. Lay them out. */
t = cfun->local_decls;
next = TREE_CHAIN (t);
+ /* Expanded above already. */
+ if (is_gimple_reg (var))
+ ;
/* We didn't set a block for static or extern because it's hard
to tell the difference between a global variable (re)declared
in a local scope, and one that's really declared there to
begin with. And it doesn't really matter much, since we're
not giving them stack space. Expand them now. */
- if (TREE_STATIC (var) || DECL_EXTERNAL (var))
- expand_now = true;
-
- /* Any variable that could have been hoisted into an SSA_NAME
- will have been propagated anywhere the optimizers chose,
- i.e. not confined to their original block. Allocate them
- as if they were defined in the outermost scope. */
- else if (is_gimple_reg (var))
+ else if (TREE_STATIC (var) || DECL_EXTERNAL (var))
expand_now = true;
/* If the variable is not associated with any block, then it
}
+/* A subroutine of expand_gimple_cond. Given E, a fallthrough edge
+ of a basic block where we just expanded the conditional at the end,
+ possibly clean up the CFG and instruction sequence. */
+
+static void
+maybe_cleanup_end_of_block (edge e)
+{
+ /* Special case: when jumpif decides that the condition is
+ trivial it emits an unconditional jump (and the necessary
+ barrier). But we still have two edges, the fallthru one is
+ wrong. purge_dead_edges would clean this up later. Unfortunately
+ we have to insert insns (and split edges) before
+ find_many_sub_basic_blocks and hence before purge_dead_edges.
+ But splitting edges might create new blocks which depend on the
+ fact that if there are two edges there's no barrier. So the
+ barrier would get lost and verify_flow_info would ICE. Instead
+ of auditing all edge splitters to care for the barrier (which
+ normally isn't there in a cleaned CFG), fix it here. */
+ if (BARRIER_P (get_last_insn ()))
+ {
+ basic_block bb = e->src;
+ rtx insn;
+ remove_edge (e);
+ /* Now, we have a single successor block, if we have insns to
+ insert on the remaining edge we potentially will insert
+ it at the end of this block (if the dest block isn't feasible)
+ in order to avoid splitting the edge. This insertion will take
+ place in front of the last jump. But we might have emitted
+ multiple jumps (conditional and one unconditional) to the
+ same destination. Inserting in front of the last one then
+ is a problem. See PR 40021. We fix this by deleting all
+ jumps except the last unconditional one. */
+ insn = PREV_INSN (get_last_insn ());
+ /* Make sure we have an unconditional jump. Otherwise we're
+ confused. */
+ gcc_assert (JUMP_P (insn) && !any_condjump_p (insn));
+ for (insn = PREV_INSN (insn); insn != BB_HEAD (bb);)
+ {
+ insn = PREV_INSN (insn);
+ if (JUMP_P (NEXT_INSN (insn)))
+ delete_insn (NEXT_INSN (insn));
+ }
+ }
+}
+
+
/* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_COND.
Returns a new basic block if we've terminated the current basic
block and created a new one. */
add_reg_br_prob_note (last, true_edge->probability);
maybe_dump_rtl_for_gimple_stmt (stmt, last);
if (true_edge->goto_locus)
- set_curr_insn_source_location (true_edge->goto_locus);
+ {
+ set_curr_insn_source_location (true_edge->goto_locus);
+ set_curr_insn_block (true_edge->goto_block);
+ true_edge->goto_locus = curr_insn_locator ();
+ }
+ true_edge->goto_block = NULL;
false_edge->flags |= EDGE_FALLTHRU;
ggc_free (pred);
+ maybe_cleanup_end_of_block (false_edge);
return NULL;
}
if (true_edge->dest == bb->next_bb)
add_reg_br_prob_note (last, false_edge->probability);
maybe_dump_rtl_for_gimple_stmt (stmt, last);
if (false_edge->goto_locus)
- set_curr_insn_source_location (false_edge->goto_locus);
+ {
+ set_curr_insn_source_location (false_edge->goto_locus);
+ set_curr_insn_block (false_edge->goto_block);
+ false_edge->goto_locus = curr_insn_locator ();
+ }
+ false_edge->goto_block = NULL;
true_edge->flags |= EDGE_FALLTHRU;
ggc_free (pred);
+ maybe_cleanup_end_of_block (true_edge);
return NULL;
}
jumpif (pred, label_rtx_for_bb (true_edge->dest));
add_reg_br_prob_note (last, true_edge->probability);
last = get_last_insn ();
+ if (false_edge->goto_locus)
+ {
+ set_curr_insn_source_location (false_edge->goto_locus);
+ set_curr_insn_block (false_edge->goto_block);
+ false_edge->goto_locus = curr_insn_locator ();
+ }
+ false_edge->goto_block = NULL;
emit_jump (label_rtx_for_bb (false_edge->dest));
BB_END (bb) = last;
maybe_dump_rtl_for_gimple_stmt (stmt, last2);
- if (false_edge->goto_locus)
- set_curr_insn_source_location (false_edge->goto_locus);
+ if (true_edge->goto_locus)
+ {
+ set_curr_insn_source_location (true_edge->goto_locus);
+ set_curr_insn_block (true_edge->goto_block);
+ true_edge->goto_locus = curr_insn_locator ();
+ }
+ true_edge->goto_block = NULL;
ggc_free (pred);
return new_bb;
NOTE_BASIC_BLOCK (note) = bb;
- for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
- {
- /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */
- e->flags &= ~EDGE_EXECUTABLE;
-
- /* At the moment not all abnormal edges match the RTL representation.
- It is safe to remove them here as find_many_sub_basic_blocks will
- rediscover them. In the future we should get this fixed properly. */
- if (e->flags & EDGE_ABNORMAL)
- remove_edge (e);
- else
- ei_next (&ei);
- }
-
for (; !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
}
else
{
- tree stmt_tree = gimple_to_tree (stmt);
+ def_operand_p def_p;
+ tree stmt_tree;
+ def_p = SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF);
+
+ if (def_p != NULL)
+ {
+ /* Ignore this stmt if it is in the list of
+ replaceable expressions. */
+ if (SA.values
+ && bitmap_bit_p (SA.values,
+ SSA_NAME_VERSION (DEF_FROM_PTR (def_p))))
+ continue;
+ }
+ stmt_tree = gimple_to_tree (stmt);
last = get_last_insn ();
expand_expr_stmt (stmt_tree);
maybe_dump_rtl_for_gimple_stmt (stmt, last);
}
}
- /* Expand implicit goto. */
+ /* Expand implicit goto and convert goto_locus. */
FOR_EACH_EDGE (e, ei, bb->succs)
{
- if (e->flags & EDGE_FALLTHRU)
- break;
- }
-
- if (e && e->dest != bb->next_bb)
- {
- emit_jump (label_rtx_for_bb (e->dest));
- if (e->goto_locus)
- set_curr_insn_source_location (e->goto_locus);
- e->flags &= ~EDGE_FALLTHRU;
+ if (e->goto_locus && e->goto_block)
+ {
+ set_curr_insn_source_location (e->goto_locus);
+ set_curr_insn_block (e->goto_block);
+ e->goto_locus = curr_insn_locator ();
+ }
+ e->goto_block = NULL;
+ if ((e->flags & EDGE_FALLTHRU) && e->dest != bb->next_bb)
+ {
+ emit_jump (label_rtx_for_bb (e->dest));
+ e->flags &= ~EDGE_FALLTHRU;
+ }
}
do_pending_stack_adjust ();
expand_stack_alignment (void)
{
rtx drap_rtx;
- unsigned int preferred_stack_boundary, incoming_stack_boundary;
+ unsigned int preferred_stack_boundary;
if (! SUPPORTS_STACK_ALIGNMENT)
return;
gcc_assert (crtl->stack_alignment_needed
<= crtl->stack_alignment_estimated);
- /* Update stack boundary if needed. */
- if (targetm.calls.update_stack_boundary)
- targetm.calls.update_stack_boundary ();
-
/* Update crtl->stack_alignment_estimated and use it later to align
stack. We check PREFERRED_STACK_BOUNDARY if there may be non-call
exceptions since callgraph doesn't collect incoming stack alignment
if (preferred_stack_boundary > crtl->stack_alignment_needed)
crtl->stack_alignment_needed = preferred_stack_boundary;
- /* The incoming stack frame has to be aligned at least at
- parm_stack_boundary. */
- if (crtl->parm_stack_boundary > INCOMING_STACK_BOUNDARY)
- incoming_stack_boundary = crtl->parm_stack_boundary;
- else
- incoming_stack_boundary = INCOMING_STACK_BOUNDARY;
-
crtl->stack_realign_needed
- = incoming_stack_boundary < crtl->stack_alignment_estimated;
+ = INCOMING_STACK_BOUNDARY < crtl->stack_alignment_estimated;
crtl->stack_realign_tried = crtl->stack_realign_needed;
crtl->stack_realign_processed = true;
sbitmap blocks;
edge_iterator ei;
edge e;
+ unsigned i;
+
+ rewrite_out_of_ssa (&SA);
+ SA.partition_to_pseudo = (rtx *)xcalloc (SA.map->num_partitions,
+ sizeof (rtx));
/* Some backends want to know that we are expanding to RTL. */
currently_expanding_to_rtl = 1;
/* Set up parameters and prepare for return, for the function. */
expand_function_start (current_function_decl);
+ /* Now that we also have the parameter RTXs, copy them over to our
+ partitions. */
+ for (i = 0; i < SA.map->num_partitions; i++)
+ {
+ tree var = SSA_NAME_VAR (partition_to_var (SA.map, i));
+
+ if (TREE_CODE (var) != VAR_DECL
+ && !SA.partition_to_pseudo[i])
+ SA.partition_to_pseudo[i] = DECL_RTL_IF_SET (var);
+ gcc_assert (SA.partition_to_pseudo[i]);
+ /* Some RTL parts really want to look at DECL_RTL(x) when x
+ was a decl marked in REG_ATTR or MEM_ATTR. We could use
+ SET_DECL_RTL here making this available, but that would mean
+ to select one of the potentially many RTLs for one DECL. Instead
+ of doing that we simply reset the MEM_EXPR of the RTL in question,
+ then nobody can get at it and hence nobody can call DECL_RTL on it. */
+ if (!DECL_RTL_SET_P (var))
+ {
+ if (MEM_P (SA.partition_to_pseudo[i]))
+ set_mem_expr (SA.partition_to_pseudo[i], NULL);
+ }
+ }
+
/* If this function is `main', emit a call to `__main'
to run global initializers, etc. */
if (DECL_NAME (current_function_decl)
if (crtl->stack_protect_guard)
stack_protect_prologue ();
+ /* Update stack boundary if needed. */
+ if (SUPPORTS_STACK_ALIGNMENT)
+ {
+ /* Call update_stack_boundary here to update incoming stack
+ boundary before TARGET_FUNCTION_OK_FOR_SIBCALL is called.
+ TARGET_FUNCTION_OK_FOR_SIBCALL needs to know the accurate
+ incoming stack alignment to check if it is OK to perform
+ sibcall optimization since sibcall optimization will only
+ align the outgoing stack to incoming stack boundary. */
+ if (targetm.calls.update_stack_boundary)
+ targetm.calls.update_stack_boundary ();
+
+ /* The incoming stack frame has to be aligned at least at
+ parm_stack_boundary. */
+ gcc_assert (crtl->parm_stack_boundary <= INCOMING_STACK_BOUNDARY);
+ }
+
+ expand_phi_nodes (&SA);
+
/* Register rtl specific functions for cfg. */
rtl_register_cfg_hooks ();
init_block = construct_init_block ();
/* Clear EDGE_EXECUTABLE on the entry edge(s). It is cleaned from the
- remaining edges in expand_gimple_basic_block. */
+ remaining edges later. */
FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
e->flags &= ~EDGE_EXECUTABLE;
FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR, next_bb)
bb = expand_gimple_basic_block (bb);
+ execute_free_datastructures ();
+ finish_out_of_ssa (&SA);
+
/* Expansion is used by optimization passes too, set maybe_hot_insn_p
conservatively to true until they are all profile aware. */
pointer_map_destroy (lab_rtx_for_bb);
set_curr_insn_block (DECL_INITIAL (current_function_decl));
insn_locators_finalize ();
- /* We're done expanding trees to RTL. */
- currently_expanding_to_rtl = 0;
-
/* Convert tree EH labels to RTL EH labels and zap the tree EH table. */
convert_from_eh_region_ranges ();
set_eh_throw_stmt_table (cfun, NULL);
rebuild_jump_labels (get_insns ());
find_exception_handler_labels ();
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ {
+ edge e;
+ edge_iterator ei;
+ for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
+ {
+ if (e->insns.r)
+ commit_one_edge_insertion (e);
+ else
+ ei_next (&ei);
+ }
+ }
+
+ /* We're done expanding trees to RTL. */
+ currently_expanding_to_rtl = 0;
+
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb)
+ {
+ edge e;
+ edge_iterator ei;
+ for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
+ {
+ /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */
+ e->flags &= ~EDGE_EXECUTABLE;
+
+ /* At the moment not all abnormal edges match the RTL
+ representation. It is safe to remove them here as
+ find_many_sub_basic_blocks will rediscover them.
+ In the future we should get this fixed properly. */
+ if ((e->flags & EDGE_ABNORMAL)
+ && !(e->flags & EDGE_SIBCALL))
+ remove_edge (e);
+ else
+ ei_next (&ei);
+ }
+ }
+
blocks = sbitmap_alloc (last_basic_block);
sbitmap_ones (blocks);
find_many_sub_basic_blocks (blocks);
- purge_all_dead_edges ();
sbitmap_free (blocks);
+ purge_all_dead_edges ();
compact_blocks ();
NULL, /* next */
0, /* static_pass_number */
TV_EXPAND, /* tv_id */
- /* ??? If TER is enabled, we actually receive GENERIC. */
- PROP_gimple_leh | PROP_cfg, /* properties_required */
+ PROP_ssa | PROP_gimple_leh | PROP_cfg,/* properties_required */
PROP_rtl, /* properties_provided */
- PROP_trees, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func, /* todo_flags_finish */
+ PROP_ssa | PROP_trees, /* properties_destroyed */
+ TODO_verify_ssa | TODO_verify_flow
+ | TODO_verify_stmts, /* todo_flags_start */
+ TODO_dump_func
+ | TODO_ggc_collect /* todo_flags_finish */
}
};