/* Control flow functions for trees.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
- Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
+ 2010 Free Software Foundation, Inc.
Contributed by Diego Novillo <dnovillo@redhat.com>
This file is part of GCC.
static struct pointer_map_t *edge_to_cases;
+/* If we record edge_to_cases, this bitmap will hold indexes
+ of basic blocks that end in a GIMPLE_SWITCH which we touched
+ due to edge manipulations. */
+
+static bitmap touched_switch_bbs;
+
/* CFG statistics. */
struct cfg_stats_d
{
static edge find_taken_edge_cond_expr (basic_block, tree);
static edge find_taken_edge_switch_expr (basic_block, tree);
static tree find_case_label_for_value (gimple, tree);
+static void group_case_labels_stmt (gimple);
void
init_empty_tree_cfg_for_function (struct function *fn)
label_to_block_map_for_function (fn),
initial_cfg_capacity);
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
+ SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
ENTRY_BLOCK_PTR_FOR_FUNCTION (fn));
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK,
+ SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK,
EXIT_BLOCK_PTR_FOR_FUNCTION (fn));
ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->next_bb
{
gcc_assert (edge_to_cases == NULL);
edge_to_cases = pointer_map_create ();
+ touched_switch_bbs = BITMAP_ALLOC (NULL);
}
/* Return nonzero if we are recording information for case labels. */
void
end_recording_case_labels (void)
{
+ bitmap_iterator bi;
+ unsigned i;
pointer_map_traverse (edge_to_cases, edge_to_cases_cleanup, NULL);
pointer_map_destroy (edge_to_cases);
edge_to_cases = NULL;
+ EXECUTE_IF_SET_IN_BITMAP (touched_switch_bbs, 0, i, bi)
+ {
+ basic_block bb = BASIC_BLOCK (i);
+ if (bb)
+ {
+ gimple stmt = last_stmt (bb);
+ if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
+ group_case_labels_stmt (stmt);
+ }
+ }
+ BITMAP_FREE (touched_switch_bbs);
}
/* If we are inside a {start,end}_recording_cases block, then return
free (label_for_bb);
}
-/* Look for blocks ending in a multiway branch (a SWITCH_EXPR in GIMPLE),
- and scan the sorted vector of cases. Combine the ones jumping to the
- same label.
+/* Scan the sorted vector of cases in STMT (a GIMPLE_SWITCH) and combine
+ the ones jumping to the same label.
Eg. three separate entries 1: 2: 3: become one entry 1..3: */
-void
-group_case_labels (void)
+static void
+group_case_labels_stmt (gimple stmt)
{
- basic_block bb;
+ int old_size = gimple_switch_num_labels (stmt);
+ int i, j, new_size = old_size;
+ tree default_case = NULL_TREE;
+ tree default_label = NULL_TREE;
+ bool has_default;
- FOR_EACH_BB (bb)
+ /* The default label is always the first case in a switch
+ statement after gimplification if it was not optimized
+ away */
+ if (!CASE_LOW (gimple_switch_default_label (stmt))
+ && !CASE_HIGH (gimple_switch_default_label (stmt)))
{
- gimple stmt = last_stmt (bb);
- if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
+ default_case = gimple_switch_default_label (stmt);
+ default_label = CASE_LABEL (default_case);
+ has_default = true;
+ }
+ else
+ has_default = false;
+
+ /* Look for possible opportunities to merge cases. */
+ if (has_default)
+ i = 1;
+ else
+ i = 0;
+ while (i < old_size)
+ {
+ tree base_case, base_label, base_high;
+ base_case = gimple_switch_label (stmt, i);
+
+ gcc_assert (base_case);
+ base_label = CASE_LABEL (base_case);
+
+ /* Discard cases that have the same destination as the
+ default case. */
+ if (base_label == default_label)
{
- int old_size = gimple_switch_num_labels (stmt);
- int i, j, new_size = old_size;
- tree default_case = NULL_TREE;
- tree default_label = NULL_TREE;
- bool has_default;
-
- /* The default label is always the first case in a switch
- statement after gimplification if it was not optimized
- away */
- if (!CASE_LOW (gimple_switch_default_label (stmt))
- && !CASE_HIGH (gimple_switch_default_label (stmt)))
+ gimple_switch_set_label (stmt, i, NULL_TREE);
+ i++;
+ new_size--;
+ continue;
+ }
+
+ base_high = CASE_HIGH (base_case)
+ ? CASE_HIGH (base_case)
+ : CASE_LOW (base_case);
+ i++;
+
+ /* Try to merge case labels. Break out when we reach the end
+ of the label vector or when we cannot merge the next case
+ label with the current one. */
+ while (i < old_size)
+ {
+ tree merge_case = gimple_switch_label (stmt, i);
+ tree merge_label = CASE_LABEL (merge_case);
+ tree t = int_const_binop (PLUS_EXPR, base_high,
+ integer_one_node, 1);
+
+ /* Merge the cases if they jump to the same place,
+ and their ranges are consecutive. */
+ if (merge_label == base_label
+ && tree_int_cst_equal (CASE_LOW (merge_case), t))
{
- default_case = gimple_switch_default_label (stmt);
- default_label = CASE_LABEL (default_case);
- has_default = true;
+ base_high = CASE_HIGH (merge_case) ?
+ CASE_HIGH (merge_case) : CASE_LOW (merge_case);
+ CASE_HIGH (base_case) = base_high;
+ gimple_switch_set_label (stmt, i, NULL_TREE);
+ new_size--;
+ i++;
}
else
- has_default = false;
-
- /* Look for possible opportunities to merge cases. */
- if (has_default)
- i = 1;
- else
- i = 0;
- while (i < old_size)
- {
- tree base_case, base_label, base_high;
- base_case = gimple_switch_label (stmt, i);
-
- gcc_assert (base_case);
- base_label = CASE_LABEL (base_case);
+ break;
+ }
+ }
- /* Discard cases that have the same destination as the
- default case. */
- if (base_label == default_label)
- {
- gimple_switch_set_label (stmt, i, NULL_TREE);
- i++;
- new_size--;
- continue;
- }
+ /* Compress the case labels in the label vector, and adjust the
+ length of the vector. */
+ for (i = 0, j = 0; i < new_size; i++)
+ {
+ while (! gimple_switch_label (stmt, j))
+ j++;
+ gimple_switch_set_label (stmt, i,
+ gimple_switch_label (stmt, j++));
+ }
- base_high = CASE_HIGH (base_case)
- ? CASE_HIGH (base_case)
- : CASE_LOW (base_case);
- i++;
+ gcc_assert (new_size <= old_size);
+ gimple_switch_set_num_labels (stmt, new_size);
+}
- /* Try to merge case labels. Break out when we reach the end
- of the label vector or when we cannot merge the next case
- label with the current one. */
- while (i < old_size)
- {
- tree merge_case = gimple_switch_label (stmt, i);
- tree merge_label = CASE_LABEL (merge_case);
- tree t = int_const_binop (PLUS_EXPR, base_high,
- integer_one_node, 1);
-
- /* Merge the cases if they jump to the same place,
- and their ranges are consecutive. */
- if (merge_label == base_label
- && tree_int_cst_equal (CASE_LOW (merge_case), t))
- {
- base_high = CASE_HIGH (merge_case) ?
- CASE_HIGH (merge_case) : CASE_LOW (merge_case);
- CASE_HIGH (base_case) = base_high;
- gimple_switch_set_label (stmt, i, NULL_TREE);
- new_size--;
- i++;
- }
- else
- break;
- }
- }
+/* Look for blocks ending in a multiway branch (a GIMPLE_SWITCH),
+ and scan the sorted vector of cases. Combine the ones jumping to the
+ same label. */
- /* Compress the case labels in the label vector, and adjust the
- length of the vector. */
- for (i = 0, j = 0; i < new_size; i++)
- {
- while (! gimple_switch_label (stmt, j))
- j++;
- gimple_switch_set_label (stmt, i,
- gimple_switch_label (stmt, j++));
- }
+void
+group_case_labels (void)
+{
+ basic_block bb;
- gcc_assert (new_size <= old_size);
- gimple_switch_set_num_labels (stmt, new_size);
- }
+ FOR_EACH_BB (bb)
+ {
+ gimple stmt = last_stmt (bb);
+ if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
+ group_case_labels_stmt (stmt);
}
}
return false;
/* It must be possible to eliminate all phi nodes in B. If ssa form
- is not up-to-date, we cannot eliminate any phis; however, if only
- some symbols as whole are marked for renaming, this is not a problem,
- as phi nodes for those symbols are irrelevant in updating anyway. */
+ is not up-to-date and a name-mapping is registered, we cannot eliminate
+ any phis. Symbols marked for renaming are never a problem though. */
phis = phi_nodes (b);
- if (!gimple_seq_empty_p (phis))
- {
- gimple_stmt_iterator i;
-
- if (name_mappings_registered_p ())
- return false;
-
- for (i = gsi_start (phis); !gsi_end_p (i); gsi_next (&i))
- {
- gimple phi = gsi_stmt (i);
-
- if (!is_gimple_reg (gimple_phi_result (phi))
- && !may_propagate_copy (gimple_phi_result (phi),
- gimple_phi_arg_def (phi, 0)))
- return false;
- }
- }
+ if (!gimple_seq_empty_p (phis)
+ && name_mappings_registered_p ())
+ return false;
return true;
}
FOR_EACH_IMM_USE_STMT (stmt, iter, def)
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, use);
+
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def))
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use) = 1;
}
else
replace_uses_by (def, use);
remove_bb (basic_block bb)
{
gimple_stmt_iterator i;
- source_location loc = UNKNOWN_LOCATION;
if (dump_file)
{
i = gsi_last_bb (bb);
else
gsi_prev (&i);
-
- /* Don't warn for removed gotos. Gotos are often removed due to
- jump threading, thus resulting in bogus warnings. Not great,
- since this way we lose warnings for gotos in the original
- program that are indeed unreachable. */
- if (gimple_code (stmt) != GIMPLE_GOTO
- && gimple_has_location (stmt))
- loc = gimple_location (stmt);
}
}
- /* If requested, give a warning that the first statement in the
- block is unreachable. We walk statements backwards in the
- loop above, so the last statement we process is the first statement
- in the block. */
- if (loc > BUILTINS_LOCATION && LOCATION_LINE (loc) > 0)
- warning_at (loc, OPT_Wunreachable_code, "will never be executed");
-
remove_phi_nodes_and_edges_for_unreachable_block (bb);
bb->il.gimple = NULL;
}
return true;
/* A call also alters control flow if it does not return. */
- if (gimple_call_flags (t) & ECF_NORETURN)
+ if (flags & ECF_NORETURN)
return true;
}
break;
edge_var_map *vm;
int i;
gimple_stmt_iterator phis;
-
+
v = redirect_edge_var_map_vector (old_edge);
if (!v)
return;
-
+
for (i = 0, phis = gsi_start_phis (new_edge->dest);
VEC_iterate (edge_var_map, v, i, vm) && !gsi_end_p (phis);
i++, gsi_next (&phis))
gimple phi = gsi_stmt (phis);
tree result = redirect_edge_var_map_result (vm);
tree arg = redirect_edge_var_map_def (vm);
-
+
gcc_assert (result == gimple_phi_result (phi));
-
+
add_phi_arg (phi, arg, new_edge, redirect_edge_var_map_location (vm));
}
-
+
redirect_edge_var_map_clear (old_edge);
}
return true;
}
- /* For VIEW_CONVERT_EXPRs which are allowed here, too, there
- is nothing to verify. Gross mismatches at most invoke
- undefined behavior. */
- if (TREE_CODE (expr) == VIEW_CONVERT_EXPR
- && !handled_component_p (op))
- return false;
+ if (TREE_CODE (expr) == VIEW_CONVERT_EXPR)
+ {
+ /* For VIEW_CONVERT_EXPRs which are allowed here too, we only check
+ that their operand is not an SSA name or an invariant when
+ requiring an lvalue (this usually means there is a SRA or IPA-SRA
+ bug). Otherwise there is nothing to verify, gross mismatches at
+ most invoke undefined behavior. */
+ if (require_lvalue
+ && (TREE_CODE (op) == SSA_NAME
+ || is_gimple_min_invariant (op)))
+ {
+ error ("Conversion of an SSA_NAME on the left hand side.");
+ debug_generic_stmt (expr);
+ return true;
+ }
+ else if (!handled_component_p (op))
+ return false;
+ }
expr = op;
}
{
tree fn = gimple_call_fn (stmt);
tree fntype;
+ unsigned i;
+
+ if (TREE_CODE (fn) != OBJ_TYPE_REF
+ && !is_gimple_val (fn))
+ {
+ error ("invalid function in gimple call");
+ debug_generic_stmt (fn);
+ return true;
+ }
if (!POINTER_TYPE_P (TREE_TYPE (fn))
|| (TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != FUNCTION_TYPE
}
if (gimple_call_lhs (stmt)
- && !is_gimple_lvalue (gimple_call_lhs (stmt)))
+ && (!is_gimple_lvalue (gimple_call_lhs (stmt))
+ || verify_types_in_gimple_reference (gimple_call_lhs (stmt), true)))
{
error ("invalid LHS in gimple call");
return true;
}
+ if (gimple_call_lhs (stmt) && gimple_call_noreturn_p (stmt))
+ {
+ error ("LHS in noreturn call");
+ return true;
+ }
+
fntype = TREE_TYPE (TREE_TYPE (fn));
if (gimple_call_lhs (stmt)
&& !useless_type_conversion_p (TREE_TYPE (gimple_call_lhs (stmt)),
return true;
}
+ if (gimple_call_chain (stmt)
+ && !is_gimple_val (gimple_call_chain (stmt)))
+ {
+ error ("invalid static chain in gimple call");
+ debug_generic_stmt (gimple_call_chain (stmt));
+ return true;
+ }
+
/* If there is a static chain argument, this should not be an indirect
call, and the decl should have DECL_STATIC_CHAIN set. */
if (gimple_call_chain (stmt))
/* ??? The C frontend passes unpromoted arguments in case it
didn't see a function declaration before the call. So for now
- leave the call arguments unverified. Once we gimplify
+ leave the call arguments mostly unverified. Once we gimplify
unit-at-a-time we have a chance to fix this. */
+ for (i = 0; i < gimple_call_num_args (stmt); ++i)
+ {
+ tree arg = gimple_call_arg (stmt, i);
+ if (!is_gimple_operand (arg))
+ {
+ error ("invalid argument to gimple call");
+ debug_generic_expr (arg);
+ }
+ }
+
return false;
}
if ((!INTEGRAL_TYPE_P (rhs1_type)
&& !FIXED_POINT_TYPE_P (rhs1_type)
&& !(TREE_CODE (rhs1_type) == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (rhs1_type)) == INTEGER_TYPE))
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))))
|| (!INTEGRAL_TYPE_P (rhs2_type)
/* Vector shifts of vectors are also ok. */
&& !(TREE_CODE (rhs1_type) == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (rhs1_type)) == INTEGER_TYPE
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
&& TREE_CODE (rhs2_type) == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (rhs2_type)) == INTEGER_TYPE))
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs2_type))))
|| !useless_type_conversion_p (lhs_type, rhs1_type))
{
error ("type mismatch in shift expression");
}
return false;
- }
+ }
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
connected to the operand types. */
return verify_gimple_comparison (lhs_type, rhs1, rhs2);
- case WIDEN_SUM_EXPR:
case WIDEN_MULT_EXPR:
+ if (TREE_CODE (lhs_type) != INTEGER_TYPE)
+ return true;
+ return ((2 * TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (lhs_type))
+ || (TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type)));
+
+ case WIDEN_SUM_EXPR:
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case VEC_PACK_TRUNC_EXPR:
return values from the original source. */
if (op == NULL)
return false;
-
+
if (!is_gimple_val (op)
&& TREE_CODE (op) != RESULT_DECL)
{
return verify_gimple_call (stmt);
case GIMPLE_COND:
+ if (TREE_CODE_CLASS (gimple_cond_code (stmt)) != tcc_comparison)
+ {
+ error ("invalid comparison code in gimple cond");
+ return true;
+ }
+ if (!(!gimple_cond_true_label (stmt)
+ || TREE_CODE (gimple_cond_true_label (stmt)) == LABEL_DECL)
+ || !(!gimple_cond_false_label (stmt)
+ || TREE_CODE (gimple_cond_false_label (stmt)) == LABEL_DECL))
+ {
+ error ("invalid labels in gimple cond");
+ return true;
+ }
+
return verify_gimple_comparison (boolean_type_node,
gimple_cond_lhs (stmt),
gimple_cond_rhs (stmt));
err = 1;
}
+ if (prev_stmt && EH_LANDING_PAD_NR (label) != 0)
+ {
+ error ("EH landing pad label ");
+ print_generic_expr (stderr, label, 0);
+ fprintf (stderr, " is not first in a sequence of labels in bb %d",
+ bb->index);
+ err = 1;
+ }
+
if (label_to_block (label) != bb)
{
error ("label ");
{
edge true_edge;
edge false_edge;
-
+
extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
if (!true_edge
for (gsi = gsi_start_phis (dummy); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi, new_phi;
-
+
phi = gsi_stmt (gsi);
var = gimple_phi_result (phi);
new_phi = create_phi_node (var, bb);
SSA_NAME_DEF_STMT (var) = new_phi;
gimple_phi_set_result (phi, make_ssa_name (SSA_NAME_VAR (var), phi));
- add_phi_arg (new_phi, gimple_phi_result (phi), fallthru,
+ add_phi_arg (new_phi, gimple_phi_result (phi), fallthru,
UNKNOWN_LOCATION);
}
TREE_CHAIN (last) = TREE_CHAIN (cases2);
TREE_CHAIN (cases2) = first;
}
+ bitmap_set_bit (touched_switch_bbs, gimple_bb (stmt)->index);
}
else
{
return new_bb;
/* Split the statement list - avoid re-creating new containers as this
- brings ugly quadratic memory consumption in the inliner.
+ brings ugly quadratic memory consumption in the inliner.
(We are still quadratic since we need to update stmt BB pointers,
sadly.) */
list = gsi_split_seq_before (&gsi);
return new_bb;
}
-/* Add phi arguments to the phi nodes in E_COPY->dest according to
- the phi arguments coming from the equivalent edge at
- the phi nodes of DEST. */
-
-static void
-add_phi_args_after_redirect (edge e_copy, edge orig_e)
-{
- gimple_stmt_iterator psi, psi_copy;
- gimple phi, phi_copy;
- tree def;
-
- for (psi = gsi_start_phis (orig_e->dest),
- psi_copy = gsi_start_phis (e_copy->dest);
- !gsi_end_p (psi);
- gsi_next (&psi), gsi_next (&psi_copy))
- {
-
- phi = gsi_stmt (psi);
- phi_copy = gsi_stmt (psi_copy);
- def = PHI_ARG_DEF_FROM_EDGE (phi, orig_e);
- add_phi_arg (phi_copy, def, e_copy,
- gimple_phi_arg_location_from_edge (phi, orig_e));
- }
-}
-
/* Adds phi node arguments for edge E_COPY after basic block duplication. */
static void
phi = gsi_stmt (psi);
phi_copy = gsi_stmt (psi_copy);
def = PHI_ARG_DEF_FROM_EDGE (phi, e);
- add_phi_arg (phi_copy, def, e_copy,
+ add_phi_arg (phi_copy, def, e_copy,
gimple_phi_arg_location_from_edge (phi, e));
}
}
is moved to ENTRY. Returns true if duplication succeeds, false
otherwise.
- For example,
-
+ For example,
+
some_code;
if (cond)
A;
edge exits[2], nexits[2], e;
gimple_stmt_iterator gsi,gsi1;
gimple cond_stmt;
- edge sorig, snew, orig_e;
+ edge sorig, snew;
basic_block exit_bb;
- edge_iterator ei;
- VEC (edge, heap) *redirect_edges;
- basic_block iters_bb, orig_src;
+ basic_block iters_bb;
tree new_rhs;
+ gimple_stmt_iterator psi;
+ gimple phi;
+ tree def;
gcc_assert (EDGE_COUNT (exit->src->succs) == 2);
exits[0] = exit;
if (!can_copy_bbs_p (region, n_region))
return false;
- /* Some sanity checking. Note that we do not check for all possible
- missuses of the functions. I.e. if you ask to copy something weird
- (e.g., in the example, if there is a jump from inside to the middle
- of some_code, or come_code defines some of the values used in cond)
- it will work, but the resulting code will not be correct. */
- for (i = 0; i < n_region; i++)
- {
- if (region[i] == orig_loop->latch)
- return false;
- }
-
initialize_original_copy_tables ();
set_loop_copy (orig_loop, loop);
duplicate_subloops (orig_loop, loop);
cond_stmt = last_stmt (exit->src);
gcc_assert (gimple_code (cond_stmt) == GIMPLE_COND);
cond_stmt = gimple_copy (cond_stmt);
-
- /* If the block consisting of the exit condition has the latch as
- successor, then the body of the loop is executed before
- the exit condition is tested. In such case, moving the
- condition to the entry, causes that the loop will iterate
- one less iteration (which is the wanted outcome, since we
- peel out the last iteration). If the body is executed after
- the condition, moving the condition to the entry requires
+
+ /* If the block consisting of the exit condition has the latch as
+ successor, then the body of the loop is executed before
+ the exit condition is tested. In such case, moving the
+ condition to the entry, causes that the loop will iterate
+ one less iteration (which is the wanted outcome, since we
+ peel out the last iteration). If the body is executed after
+ the condition, moving the condition to the entry requires
decrementing one iteration. */
if (exits[1]->dest == orig_loop->latch)
new_rhs = gimple_cond_rhs (cond_stmt);
else
{
new_rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (gimple_cond_rhs (cond_stmt)),
- gimple_cond_rhs (cond_stmt),
+ gimple_cond_rhs (cond_stmt),
build_int_cst (TREE_TYPE (gimple_cond_rhs (cond_stmt)), 1));
if (TREE_CODE (gimple_cond_rhs (cond_stmt)) == SSA_NAME)
for (gsi1 = gsi_start_bb (iters_bb); !gsi_end_p (gsi1); gsi_next (&gsi1))
if (gsi_stmt (gsi1) == SSA_NAME_DEF_STMT (gimple_cond_rhs (cond_stmt)))
break;
-
+
new_rhs = force_gimple_operand_gsi (&gsi1, new_rhs, true,
NULL_TREE,false,GSI_CONTINUE_LINKING);
}
- }
- gimple_cond_set_rhs (cond_stmt, unshare_expr (new_rhs));
+ }
+ gimple_cond_set_rhs (cond_stmt, unshare_expr (new_rhs));
gimple_cond_set_lhs (cond_stmt, unshare_expr (gimple_cond_lhs (cond_stmt)));
gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
/* Add the PHI node arguments. */
add_phi_args_after_copy (region_copy, n_region, snew);
-
+
/* Get rid of now superfluous conditions and associated edges (and phi node
arguments). */
exit_bb = exit->dest;
-
+
e = redirect_edge_and_branch (exits[0], exits[1]->dest);
PENDING_STMT (e) = NULL;
-
- /* If the block consisting of the exit condition has the latch as
- successor, then the body of the loop is executed before
- the exit condition is tested.
-
- { body }
- { cond } (exit[0]) -> { latch }
- |
- V (exit[1])
-
- { exit_bb }
-
-
- In such case, the equivalent copied edge nexits[1]
- (for the peeled iteration) needs to be redirected to exit_bb.
-
- Otherwise,
-
- { cond } (exit[0]) -> { body }
- |
- V (exit[1])
-
- { exit_bb }
-
-
- exit[0] is pointing to the body of the loop,
- and the equivalent nexits[0] needs to be redirected to
- the copied body (of the peeled iteration). */
-
- if (exits[1]->dest == orig_loop->latch)
- e = redirect_edge_and_branch (nexits[1], nexits[0]->dest);
- else
- e = redirect_edge_and_branch (nexits[0], nexits[1]->dest);
- PENDING_STMT (e) = NULL;
-
- redirect_edges = VEC_alloc (edge, heap, 10);
-
- for (i = 0; i < n_region; i++)
- region_copy[i]->flags |= BB_DUPLICATED;
-
- /* Iterate all incoming edges to latch. All those coming from
- copied bbs will be redirected to exit_bb. */
- FOR_EACH_EDGE (e, ei, orig_loop->latch->preds)
- {
- if (e->src->flags & BB_DUPLICATED)
- VEC_safe_push (edge, heap, redirect_edges, e);
- }
-
+
+ /* The latch of ORIG_LOOP was copied, and so was the backedge
+ to the original header. We redirect this backedge to EXIT_BB. */
for (i = 0; i < n_region; i++)
- region_copy[i]->flags &= ~BB_DUPLICATED;
-
- for (i = 0; VEC_iterate (edge, redirect_edges, i, e); ++i)
- {
- e = redirect_edge_and_branch (e, exit_bb);
- PENDING_STMT (e) = NULL;
- orig_src = get_bb_original (e->src);
- orig_e = find_edge (orig_src, orig_loop->latch);
- add_phi_args_after_redirect (e, orig_e);
- }
+ if (get_bb_original (region_copy[i]) == orig_loop->latch)
+ {
+ gcc_assert (single_succ_edge (region_copy[i]));
+ e = redirect_edge_and_branch (single_succ_edge (region_copy[i]), exit_bb);
+ PENDING_STMT (e) = NULL;
+ for (psi = gsi_start_phis (exit_bb);
+ !gsi_end_p (psi);
+ gsi_next (&psi))
+ {
+ phi = gsi_stmt (psi);
+ def = PHI_ARG_DEF (phi, nexits[0]->dest_idx);
+ add_phi_arg (phi, def, e, gimple_phi_arg_location_from_edge (phi, e));
+ }
+ }
+ e = redirect_edge_and_branch (nexits[0], nexits[1]->dest);
+ PENDING_STMT (e) = NULL;
- VEC_free (edge, heap, redirect_edges);
-
/* Anything that is outside of the region, but was dominated by something
inside needs to update dominance info. */
iterate_fix_dominators (CDI_DOMINATORS, doms, false);
VEC_free (basic_block, heap, doms);
-
/* Update the SSA web. */
update_ssa (TODO_update_ssa);
&& !is_global_var (t))
|| TREE_CODE (t) == CONST_DECL)
replace_by_duplicate_decl (tp, p->vars_map, p->to_context);
-
+
if (SSA_VAR_P (t)
&& gimple_in_ssa_p (cfun))
{
/* Print to FILE the basic block BB following the VERBOSITY level. */
-void
+void
print_loops_bb (FILE *file, basic_block bb, int indent, int verbosity)
{
char *s_indent = (char *) alloca ((size_t) indent + 1);
s_indent[indent] = '\0';
/* Print loop's header. */
- fprintf (file, "%sloop_%d (header = %d, latch = %d", s_indent,
+ fprintf (file, "%sloop_%d (header = %d, latch = %d", s_indent,
loop->num, loop->header->index, loop->latch->index);
fprintf (file, ", niter = ");
print_generic_expr (file, loop->nb_iterations, 0);
/* Update the dominance information. The immediate dominator may change only
for blocks whose immediate dominator belongs to DF_IDOM:
-
+
Suppose that idom(X) = Y before removal of E and idom(X) != Y after the
removal. Let Z the arbitrary block such that idom(Z) = Y and
Z dominates X after the removal. Before removal, there exists a path P
from Y to X that avoids Z. Let F be the last edge on P that is
removed, and let W = F->dest. Before removal, idom(W) = Y (since Y
dominates W, and because of P, Z does not dominate W), and W belongs to
- the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
+ the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
EXECUTE_IF_SET_IN_BITMAP (df_idom, 0, i, bi)
{
bb = BASIC_BLOCK (i);
{
basic_block bb = e->dest;
- if (phi_nodes (bb))
+ if (!gimple_seq_empty_p (phi_nodes (bb)))
reserve_phi_args_for_new_edge (bb);
}
static void
gimple_execute_on_shrinking_pred (edge e)
{
- if (phi_nodes (e->dest))
+ if (!gimple_seq_empty_p (phi_nodes (e->dest)))
remove_phi_args (e);
}
{
if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL))
split_edge (e);
- /* PRE inserts statements to edges and expects that
+ /* PRE inserts statements to edges and expects that
since split_critical_edges was done beforehand, committing edge
insertions will not split more edges. In addition to critical
edges we must split edges that have multiple successors and
- end by control flow statements, such as RESX.
+ end by control flow statements, such as RESX.
Go ahead and split them too. This matches the logic in
gimple_find_edge_insert_loc. */
else if ((!single_pred_p (e->dest)