label_to_block_map_for_function (fn),
initial_cfg_capacity);
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
+ SET_BASIC_BLOCK_FOR_FUNCTION (fn, ENTRY_BLOCK,
ENTRY_BLOCK_PTR_FOR_FUNCTION (fn));
- SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK,
+ SET_BASIC_BLOCK_FOR_FUNCTION (fn, EXIT_BLOCK,
EXIT_BLOCK_PTR_FOR_FUNCTION (fn));
ENTRY_BLOCK_PTR_FOR_FUNCTION (fn)->next_bb
return bb;
}
-
-/* Walk the function tree removing unnecessary statements.
-
- * Empty statement nodes are removed
-
- * Unnecessary TRY_FINALLY and TRY_CATCH blocks are removed
-
- * Unnecessary COND_EXPRs are removed
-
- * Some unnecessary BIND_EXPRs are removed
-
- * GOTO_EXPRs immediately preceding destination are removed.
-
- Clearly more work could be done. The trick is doing the analysis
- and removal fast enough to be a net improvement in compile times.
-
- Note that when we remove a control structure such as a COND_EXPR
- BIND_EXPR, or TRY block, we will need to repeat this optimization pass
- to ensure we eliminate all the useless code. */
-
-struct rus_data
-{
- bool repeat;
- bool may_throw;
- bool may_branch;
- bool has_label;
- bool last_was_goto;
- gimple_stmt_iterator last_goto_gsi;
-};
-
-
-static void remove_useless_stmts_1 (gimple_stmt_iterator *gsi, struct rus_data *);
-
-/* Given a statement sequence, find the first executable statement with
- location information, and warn that it is unreachable. When searching,
- descend into containers in execution order. */
-
-static bool
-remove_useless_stmts_warn_notreached (gimple_seq stmts)
-{
- gimple_stmt_iterator gsi;
-
- for (gsi = gsi_start (stmts); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple stmt = gsi_stmt (gsi);
-
- if (gimple_no_warning_p (stmt)) return false;
-
- if (gimple_has_location (stmt))
- {
- location_t loc = gimple_location (stmt);
- if (LOCATION_LINE (loc) > 0)
- {
- warning_at (loc, OPT_Wunreachable_code, "will never be executed");
- return true;
- }
- }
-
- switch (gimple_code (stmt))
- {
- /* Unfortunately, we need the CFG now to detect unreachable
- branches in a conditional, so conditionals are not handled here. */
-
- case GIMPLE_TRY:
- if (remove_useless_stmts_warn_notreached (gimple_try_eval (stmt)))
- return true;
- if (remove_useless_stmts_warn_notreached (gimple_try_cleanup (stmt)))
- return true;
- break;
-
- case GIMPLE_CATCH:
- return remove_useless_stmts_warn_notreached (gimple_catch_handler (stmt));
-
- case GIMPLE_EH_FILTER:
- return remove_useless_stmts_warn_notreached (gimple_eh_filter_failure (stmt));
-
- case GIMPLE_BIND:
- return remove_useless_stmts_warn_notreached (gimple_bind_body (stmt));
-
- default:
- break;
- }
- }
-
- return false;
-}
-
-/* Helper for remove_useless_stmts_1. Handle GIMPLE_COND statements. */
-
-static void
-remove_useless_stmts_cond (gimple_stmt_iterator *gsi, struct rus_data *data)
-{
- gimple stmt = gsi_stmt (*gsi);
-
- /* The folded result must still be a conditional statement. */
- fold_stmt (gsi);
- gcc_assert (gsi_stmt (*gsi) == stmt);
-
- data->may_branch = true;
-
- /* Replace trivial conditionals with gotos. */
- if (gimple_cond_true_p (stmt))
- {
- /* Goto THEN label. */
- tree then_label = gimple_cond_true_label (stmt);
-
- gsi_replace (gsi, gimple_build_goto (then_label), false);
- data->last_goto_gsi = *gsi;
- data->last_was_goto = true;
- data->repeat = true;
- }
- else if (gimple_cond_false_p (stmt))
- {
- /* Goto ELSE label. */
- tree else_label = gimple_cond_false_label (stmt);
-
- gsi_replace (gsi, gimple_build_goto (else_label), false);
- data->last_goto_gsi = *gsi;
- data->last_was_goto = true;
- data->repeat = true;
- }
- else
- {
- tree then_label = gimple_cond_true_label (stmt);
- tree else_label = gimple_cond_false_label (stmt);
-
- if (then_label == else_label)
- {
- /* Goto common destination. */
- gsi_replace (gsi, gimple_build_goto (then_label), false);
- data->last_goto_gsi = *gsi;
- data->last_was_goto = true;
- data->repeat = true;
- }
- }
-
- gsi_next (gsi);
-
- data->last_was_goto = false;
-}
-
-/* Helper for remove_useless_stmts_1.
- Handle the try-finally case for GIMPLE_TRY statements. */
-
-static void
-remove_useless_stmts_tf (gimple_stmt_iterator *gsi, struct rus_data *data)
-{
- bool save_may_branch, save_may_throw;
- bool this_may_branch, this_may_throw;
-
- gimple_seq eval_seq, cleanup_seq;
- gimple_stmt_iterator eval_gsi, cleanup_gsi;
-
- gimple stmt = gsi_stmt (*gsi);
-
- /* Collect may_branch and may_throw information for the body only. */
- save_may_branch = data->may_branch;
- save_may_throw = data->may_throw;
- data->may_branch = false;
- data->may_throw = false;
- data->last_was_goto = false;
-
- eval_seq = gimple_try_eval (stmt);
- eval_gsi = gsi_start (eval_seq);
- remove_useless_stmts_1 (&eval_gsi, data);
-
- this_may_branch = data->may_branch;
- this_may_throw = data->may_throw;
- data->may_branch |= save_may_branch;
- data->may_throw |= save_may_throw;
- data->last_was_goto = false;
-
- cleanup_seq = gimple_try_cleanup (stmt);
- cleanup_gsi = gsi_start (cleanup_seq);
- remove_useless_stmts_1 (&cleanup_gsi, data);
-
- /* If the body is empty, then we can emit the FINALLY block without
- the enclosing TRY_FINALLY_EXPR. */
- if (gimple_seq_empty_p (eval_seq))
- {
- gsi_insert_seq_before (gsi, cleanup_seq, GSI_SAME_STMT);
- gsi_remove (gsi, false);
- data->repeat = true;
- }
-
- /* If the handler is empty, then we can emit the TRY block without
- the enclosing TRY_FINALLY_EXPR. */
- else if (gimple_seq_empty_p (cleanup_seq))
- {
- gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT);
- gsi_remove (gsi, false);
- data->repeat = true;
- }
-
- /* If the body neither throws, nor branches, then we can safely
- string the TRY and FINALLY blocks together. */
- else if (!this_may_branch && !this_may_throw)
- {
- gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT);
- gsi_insert_seq_before (gsi, cleanup_seq, GSI_SAME_STMT);
- gsi_remove (gsi, false);
- data->repeat = true;
- }
- else
- gsi_next (gsi);
-}
-
-/* Helper for remove_useless_stmts_1.
- Handle the try-catch case for GIMPLE_TRY statements. */
-
-static void
-remove_useless_stmts_tc (gimple_stmt_iterator *gsi, struct rus_data *data)
-{
- bool save_may_throw, this_may_throw;
-
- gimple_seq eval_seq, cleanup_seq, handler_seq, failure_seq;
- gimple_stmt_iterator eval_gsi, cleanup_gsi, handler_gsi, failure_gsi;
-
- gimple stmt = gsi_stmt (*gsi);
-
- /* Collect may_throw information for the body only. */
- save_may_throw = data->may_throw;
- data->may_throw = false;
- data->last_was_goto = false;
-
- eval_seq = gimple_try_eval (stmt);
- eval_gsi = gsi_start (eval_seq);
- remove_useless_stmts_1 (&eval_gsi, data);
-
- this_may_throw = data->may_throw;
- data->may_throw = save_may_throw;
-
- cleanup_seq = gimple_try_cleanup (stmt);
-
- /* If the body cannot throw, then we can drop the entire TRY_CATCH_EXPR. */
- if (!this_may_throw)
- {
- if (warn_notreached)
- {
- remove_useless_stmts_warn_notreached (cleanup_seq);
- }
- gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT);
- gsi_remove (gsi, false);
- data->repeat = true;
- return;
- }
-
- /* Process the catch clause specially. We may be able to tell that
- no exceptions propagate past this point. */
-
- this_may_throw = true;
- cleanup_gsi = gsi_start (cleanup_seq);
- stmt = gsi_stmt (cleanup_gsi);
- data->last_was_goto = false;
-
- switch (gimple_code (stmt))
- {
- case GIMPLE_CATCH:
- /* If the first element is a catch, they all must be. */
- while (!gsi_end_p (cleanup_gsi))
- {
- stmt = gsi_stmt (cleanup_gsi);
- /* If we catch all exceptions, then the body does not
- propagate exceptions past this point. */
- if (gimple_catch_types (stmt) == NULL)
- this_may_throw = false;
- data->last_was_goto = false;
- handler_seq = gimple_catch_handler (stmt);
- handler_gsi = gsi_start (handler_seq);
- remove_useless_stmts_1 (&handler_gsi, data);
- gsi_next (&cleanup_gsi);
- }
- gsi_next (gsi);
- break;
-
- case GIMPLE_EH_FILTER:
- if (gimple_eh_filter_types (stmt) == NULL)
- this_may_throw = false;
- failure_seq = gimple_eh_filter_failure (stmt);
- failure_gsi = gsi_start (failure_seq);
- remove_useless_stmts_1 (&failure_gsi, data);
- gsi_next (gsi);
- break;
-
- case GIMPLE_EH_MUST_NOT_THROW:
- this_may_throw = false;
- break;
-
- default:
- /* Otherwise this is a list of cleanup statements. */
- remove_useless_stmts_1 (&cleanup_gsi, data);
-
- /* If the cleanup is empty, then we can emit the TRY block without
- the enclosing TRY_CATCH_EXPR. */
- if (gimple_seq_empty_p (cleanup_seq))
- {
- gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT);
- gsi_remove(gsi, false);
- data->repeat = true;
- }
- else
- gsi_next (gsi);
- break;
- }
-
- data->may_throw |= this_may_throw;
-}
-
-/* Helper for remove_useless_stmts_1. Handle GIMPLE_BIND statements. */
-
-static void
-remove_useless_stmts_bind (gimple_stmt_iterator *gsi, struct rus_data *data ATTRIBUTE_UNUSED)
-{
- tree block;
- gimple_seq body_seq, fn_body_seq;
- gimple_stmt_iterator body_gsi;
-
- gimple stmt = gsi_stmt (*gsi);
-
- /* First remove anything underneath the BIND_EXPR. */
-
- body_seq = gimple_bind_body (stmt);
- body_gsi = gsi_start (body_seq);
- remove_useless_stmts_1 (&body_gsi, data);
-
- /* If the GIMPLE_BIND has no variables, then we can pull everything
- up one level and remove the GIMPLE_BIND, unless this is the toplevel
- GIMPLE_BIND for the current function or an inlined function.
-
- When this situation occurs we will want to apply this
- optimization again. */
- block = gimple_bind_block (stmt);
- fn_body_seq = gimple_body (current_function_decl);
- if (gimple_bind_vars (stmt) == NULL_TREE
- && (gimple_seq_empty_p (fn_body_seq)
- || stmt != gimple_seq_first_stmt (fn_body_seq))
- && (! block
- || ! BLOCK_ABSTRACT_ORIGIN (block)
- || (TREE_CODE (BLOCK_ABSTRACT_ORIGIN (block))
- != FUNCTION_DECL)))
- {
- tree var = NULL_TREE;
- /* Even if there are no gimple_bind_vars, there might be other
- decls in BLOCK_VARS rendering the GIMPLE_BIND not useless. */
- if (block && !BLOCK_NUM_NONLOCALIZED_VARS (block))
- for (var = BLOCK_VARS (block); var; var = TREE_CHAIN (var))
- if (TREE_CODE (var) == IMPORTED_DECL)
- break;
- if (var || (block && BLOCK_NUM_NONLOCALIZED_VARS (block)))
- gsi_next (gsi);
- else
- {
- gsi_insert_seq_before (gsi, body_seq, GSI_SAME_STMT);
- gsi_remove (gsi, false);
- data->repeat = true;
- }
- }
- else
- gsi_next (gsi);
-}
-
-/* Helper for remove_useless_stmts_1. Handle GIMPLE_GOTO statements. */
-
-static void
-remove_useless_stmts_goto (gimple_stmt_iterator *gsi, struct rus_data *data)
-{
- gimple stmt = gsi_stmt (*gsi);
-
- tree dest = gimple_goto_dest (stmt);
-
- data->may_branch = true;
- data->last_was_goto = false;
-
- /* Record iterator for last goto expr, so that we can delete it if unnecessary. */
- if (TREE_CODE (dest) == LABEL_DECL)
- {
- data->last_goto_gsi = *gsi;
- data->last_was_goto = true;
- }
-
- gsi_next(gsi);
-}
-
-/* Helper for remove_useless_stmts_1. Handle GIMPLE_LABEL statements. */
-
-static void
-remove_useless_stmts_label (gimple_stmt_iterator *gsi, struct rus_data *data)
-{
- gimple stmt = gsi_stmt (*gsi);
-
- tree label = gimple_label_label (stmt);
-
- data->has_label = true;
-
- /* We do want to jump across non-local label receiver code. */
- if (DECL_NONLOCAL (label))
- data->last_was_goto = false;
-
- else if (data->last_was_goto
- && gimple_goto_dest (gsi_stmt (data->last_goto_gsi)) == label)
- {
- /* Replace the preceding GIMPLE_GOTO statement with
- a GIMPLE_NOP, which will be subsequently removed.
- In this way, we avoid invalidating other iterators
- active on the statement sequence. */
- gsi_replace(&data->last_goto_gsi, gimple_build_nop(), false);
- data->last_was_goto = false;
- data->repeat = true;
- }
-
- /* ??? Add something here to delete unused labels. */
-
- gsi_next (gsi);
-}
-
-
/* T is CALL_EXPR. Set current_function_calls_* flags. */
void
cfun->calls_setjmp = false;
}
-/* Remove useless statements from a statement sequence, and perform
- some preliminary simplifications. */
-
-static void
-remove_useless_stmts_1 (gimple_stmt_iterator *gsi, struct rus_data *data)
-{
- while (!gsi_end_p (*gsi))
- {
- gimple stmt = gsi_stmt (*gsi);
-
- switch (gimple_code (stmt))
- {
- case GIMPLE_COND:
- remove_useless_stmts_cond (gsi, data);
- break;
-
- case GIMPLE_GOTO:
- remove_useless_stmts_goto (gsi, data);
- break;
-
- case GIMPLE_LABEL:
- remove_useless_stmts_label (gsi, data);
- break;
-
- case GIMPLE_ASSIGN:
- fold_stmt (gsi);
- stmt = gsi_stmt (*gsi);
- data->last_was_goto = false;
- if (stmt_could_throw_p (stmt))
- data->may_throw = true;
- gsi_next (gsi);
- break;
-
- case GIMPLE_ASM:
- fold_stmt (gsi);
- data->last_was_goto = false;
- gsi_next (gsi);
- break;
-
- case GIMPLE_CALL:
- fold_stmt (gsi);
- stmt = gsi_stmt (*gsi);
- data->last_was_goto = false;
- if (is_gimple_call (stmt))
- notice_special_calls (stmt);
-
- /* We used to call update_gimple_call_flags here,
- which copied side-effects and nothrows status
- from the function decl to the call. In the new
- tuplified GIMPLE, the accessors for this information
- always consult the function decl, so this copying
- is no longer necessary. */
- if (stmt_could_throw_p (stmt))
- data->may_throw = true;
- gsi_next (gsi);
- break;
-
- case GIMPLE_RETURN:
- fold_stmt (gsi);
- data->last_was_goto = false;
- data->may_branch = true;
- gsi_next (gsi);
- break;
-
- case GIMPLE_BIND:
- remove_useless_stmts_bind (gsi, data);
- break;
-
- case GIMPLE_TRY:
- if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
- remove_useless_stmts_tc (gsi, data);
- else if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
- remove_useless_stmts_tf (gsi, data);
- else
- gcc_unreachable ();
- break;
-
- case GIMPLE_CATCH:
- gcc_unreachable ();
- break;
-
- case GIMPLE_NOP:
- gsi_remove (gsi, false);
- break;
-
- case GIMPLE_OMP_FOR:
- {
- gimple_seq pre_body_seq = gimple_omp_for_pre_body (stmt);
- gimple_stmt_iterator pre_body_gsi = gsi_start (pre_body_seq);
-
- remove_useless_stmts_1 (&pre_body_gsi, data);
- data->last_was_goto = false;
- }
- /* FALLTHROUGH */
- case GIMPLE_OMP_CRITICAL:
- case GIMPLE_OMP_CONTINUE:
- case GIMPLE_OMP_MASTER:
- case GIMPLE_OMP_ORDERED:
- case GIMPLE_OMP_SECTION:
- case GIMPLE_OMP_SECTIONS:
- case GIMPLE_OMP_SINGLE:
- {
- gimple_seq body_seq = gimple_omp_body (stmt);
- gimple_stmt_iterator body_gsi = gsi_start (body_seq);
-
- remove_useless_stmts_1 (&body_gsi, data);
- data->last_was_goto = false;
- gsi_next (gsi);
- }
- break;
-
- case GIMPLE_OMP_PARALLEL:
- case GIMPLE_OMP_TASK:
- {
- /* Make sure the outermost GIMPLE_BIND isn't removed
- as useless. */
- gimple_seq body_seq = gimple_omp_body (stmt);
- gimple bind = gimple_seq_first_stmt (body_seq);
- gimple_seq bind_seq = gimple_bind_body (bind);
- gimple_stmt_iterator bind_gsi = gsi_start (bind_seq);
-
- remove_useless_stmts_1 (&bind_gsi, data);
- data->last_was_goto = false;
- gsi_next (gsi);
- }
- break;
-
- default:
- data->last_was_goto = false;
- gsi_next (gsi);
- break;
- }
- }
-}
-
-/* Walk the function tree, removing useless statements and performing
- some preliminary simplifications. */
-
-static unsigned int
-remove_useless_stmts (void)
-{
- struct rus_data data;
-
- clear_special_calls ();
-
- do
- {
- gimple_stmt_iterator gsi;
-
- gsi = gsi_start (gimple_body (current_function_decl));
- memset (&data, 0, sizeof (data));
- remove_useless_stmts_1 (&gsi, &data);
- }
- while (data.repeat);
-
-#ifdef ENABLE_TYPES_CHECKING
- verify_types_in_gimple_seq (gimple_body (current_function_decl));
-#endif
-
- return 0;
-}
-
-
-struct gimple_opt_pass pass_remove_useless_stmts =
-{
- {
- GIMPLE_PASS,
- "useless", /* name */
- NULL, /* gate */
- remove_useless_stmts, /* execute */
- NULL, /* sub */
- NULL, /* next */
- 0, /* static_pass_number */
- TV_NONE, /* tv_id */
- PROP_gimple_any, /* properties_required */
- 0, /* properties_provided */
- 0, /* properties_destroyed */
- 0, /* todo_flags_start */
- TODO_dump_func /* todo_flags_finish */
- }
-};
-
/* Remove PHI nodes associated with basic block BB and all edges out of BB. */
static void
edge_var_map *vm;
int i;
gimple_stmt_iterator phis;
-
+
v = redirect_edge_var_map_vector (old_edge);
if (!v)
return;
-
+
for (i = 0, phis = gsi_start_phis (new_edge->dest);
VEC_iterate (edge_var_map, v, i, vm) && !gsi_end_p (phis);
i++, gsi_next (&phis))
gimple phi = gsi_stmt (phis);
tree result = redirect_edge_var_map_result (vm);
tree arg = redirect_edge_var_map_def (vm);
-
+
gcc_assert (result == gimple_phi_result (phi));
-
+
add_phi_arg (phi, arg, new_edge, redirect_edge_var_map_location (vm));
}
-
+
redirect_edge_var_map_clear (old_edge);
}
return true;
}
- /* For VIEW_CONVERT_EXPRs which are allowed here, too, there
- is nothing to verify. Gross mismatches at most invoke
- undefined behavior. */
- if (TREE_CODE (expr) == VIEW_CONVERT_EXPR
- && !handled_component_p (op))
- return false;
+ if (TREE_CODE (expr) == VIEW_CONVERT_EXPR)
+ {
+ /* For VIEW_CONVERT_EXPRs which are allowed here too, we only check
+ that their operand is not an SSA name or an invariant when
+ requiring an lvalue (this usually means there is a SRA or IPA-SRA
+ bug). Otherwise there is nothing to verify, gross mismatches at
+ most invoke undefined behavior. */
+ if (require_lvalue
+ && (TREE_CODE (op) == SSA_NAME
+ || is_gimple_min_invariant (op)))
+ {
+ error ("Conversion of an SSA_NAME on the left hand side.");
+ debug_generic_stmt (expr);
+ return true;
+ }
+ else if (!handled_component_p (op))
+ return false;
+ }
expr = op;
}
}
if (gimple_call_lhs (stmt)
- && !is_gimple_lvalue (gimple_call_lhs (stmt)))
+ && (!is_gimple_lvalue (gimple_call_lhs (stmt))
+ || verify_types_in_gimple_reference (gimple_call_lhs (stmt), true)))
{
error ("invalid LHS in gimple call");
return true;
}
/* If there is a static chain argument, this should not be an indirect
- call, and the decl should not have DECL_NO_STATIC_CHAIN set. */
+ call, and the decl should have DECL_STATIC_CHAIN set. */
if (gimple_call_chain (stmt))
{
if (TREE_CODE (fn) != ADDR_EXPR
}
fn = TREE_OPERAND (fn, 0);
- if (DECL_NO_STATIC_CHAIN (fn))
+ if (!DECL_STATIC_CHAIN (fn))
{
error ("static chain with function that doesn't use one");
return true;
return false;
}
+ case ADDR_SPACE_CONVERT_EXPR:
+ {
+ if (!POINTER_TYPE_P (rhs1_type) || !POINTER_TYPE_P (lhs_type)
+ || (TYPE_ADDR_SPACE (TREE_TYPE (rhs1_type))
+ == TYPE_ADDR_SPACE (TREE_TYPE (lhs_type))))
+ {
+ error ("invalid types in address space conversion");
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs1_type);
+ return true;
+ }
+
+ return false;
+ }
+
case FIXED_CONVERT_EXPR:
{
if (!valid_fixed_convert_types_p (lhs_type, rhs1_type)
}
return false;
- }
+ }
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
return values from the original source. */
if (op == NULL)
return false;
-
+
if (!is_gimple_val (op)
&& TREE_CODE (op) != RESULT_DECL)
{
case GIMPLE_PREDICT:
case GIMPLE_RESX:
case GIMPLE_EH_DISPATCH:
+ case GIMPLE_EH_MUST_NOT_THROW:
return false;
CASE_GIMPLE_OMP:
/* Return true when the T can be shared. */
-static bool
+bool
tree_node_can_be_shared (tree t)
{
if (IS_TYPE_OR_DECL_P (t)
{
edge true_edge;
edge false_edge;
-
+
extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
if (!true_edge
for (gsi = gsi_start_phis (dummy); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi, new_phi;
-
+
phi = gsi_stmt (gsi);
var = gimple_phi_result (phi);
new_phi = create_phi_node (var, bb);
SSA_NAME_DEF_STMT (var) = new_phi;
gimple_phi_set_result (phi, make_ssa_name (SSA_NAME_VAR (var), phi));
- add_phi_arg (new_phi, gimple_phi_result (phi), fallthru,
+ add_phi_arg (new_phi, gimple_phi_result (phi), fallthru,
UNKNOWN_LOCATION);
}
case GIMPLE_ASM:
{
int i, n = gimple_asm_nlabels (stmt);
- tree label = gimple_block_label (dest);
+ tree label = NULL;
for (i = 0; i < n; ++i)
{
tree cons = gimple_asm_label_op (stmt, i);
if (label_to_block (TREE_VALUE (cons)) == e->dest)
- TREE_VALUE (cons) = label;
+ {
+ if (!label)
+ label = gimple_block_label (dest);
+ TREE_VALUE (cons) = label;
+ }
}
+
+ /* If we didn't find any label matching the former edge in the
+ asm labels, we must be redirecting the fallthrough
+ edge. */
+ gcc_assert (label || (e->flags & EDGE_FALLTHRU));
}
break;
return new_bb;
/* Split the statement list - avoid re-creating new containers as this
- brings ugly quadratic memory consumption in the inliner.
+ brings ugly quadratic memory consumption in the inliner.
(We are still quadratic since we need to update stmt BB pointers,
sadly.) */
list = gsi_split_seq_before (&gsi);
return new_bb;
}
+/* Add phi arguments to the phi nodes in E_COPY->dest according to
+ the phi arguments coming from the equivalent edge at
+ the phi nodes of DEST. */
+
+static void
+add_phi_args_after_redirect (edge e_copy, edge orig_e)
+{
+ gimple_stmt_iterator psi, psi_copy;
+ gimple phi, phi_copy;
+ tree def;
+
+ for (psi = gsi_start_phis (orig_e->dest),
+ psi_copy = gsi_start_phis (e_copy->dest);
+ !gsi_end_p (psi);
+ gsi_next (&psi), gsi_next (&psi_copy))
+ {
+
+ phi = gsi_stmt (psi);
+ phi_copy = gsi_stmt (psi_copy);
+ def = PHI_ARG_DEF_FROM_EDGE (phi, orig_e);
+ add_phi_arg (phi_copy, def, e_copy,
+ gimple_phi_arg_location_from_edge (phi, orig_e));
+ }
+}
+
/* Adds phi node arguments for edge E_COPY after basic block duplication. */
static void
phi = gsi_stmt (psi);
phi_copy = gsi_stmt (psi_copy);
def = PHI_ARG_DEF_FROM_EDGE (phi, e);
- add_phi_arg (phi_copy, def, e_copy,
+ add_phi_arg (phi_copy, def, e_copy,
gimple_phi_arg_location_from_edge (phi, e));
}
}
is moved to ENTRY. Returns true if duplication succeeds, false
otherwise.
- For example,
-
+ For example,
+
some_code;
if (cond)
A;
int total_freq = 0, exit_freq = 0;
gcov_type total_count = 0, exit_count = 0;
edge exits[2], nexits[2], e;
- gimple_stmt_iterator gsi;
+ gimple_stmt_iterator gsi,gsi1;
gimple cond_stmt;
- edge sorig, snew;
+ edge sorig, snew, orig_e;
+ basic_block exit_bb;
+ edge_iterator ei;
+ VEC (edge, heap) *redirect_edges;
+ basic_block iters_bb, orig_src;
+ tree new_rhs;
gcc_assert (EDGE_COUNT (exit->src->succs) == 2);
exits[0] = exit;
it will work, but the resulting code will not be correct. */
for (i = 0; i < n_region; i++)
{
- /* We do not handle subloops, i.e. all the blocks must belong to the
- same loop. */
- if (region[i]->loop_father != orig_loop)
- return false;
-
if (region[i] == orig_loop->latch)
return false;
}
initialize_original_copy_tables ();
set_loop_copy (orig_loop, loop);
+ duplicate_subloops (orig_loop, loop);
if (!region_copy)
{
cond_stmt = last_stmt (exit->src);
gcc_assert (gimple_code (cond_stmt) == GIMPLE_COND);
cond_stmt = gimple_copy (cond_stmt);
+
+ /* If the block consisting of the exit condition has the latch as
+ successor, then the body of the loop is executed before
+ the exit condition is tested. In such case, moving the
+ condition to the entry, causes that the loop will iterate
+ one less iteration (which is the wanted outcome, since we
+ peel out the last iteration). If the body is executed after
+ the condition, moving the condition to the entry requires
+ decrementing one iteration. */
+ if (exits[1]->dest == orig_loop->latch)
+ new_rhs = gimple_cond_rhs (cond_stmt);
+ else
+ {
+ new_rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (gimple_cond_rhs (cond_stmt)),
+ gimple_cond_rhs (cond_stmt),
+ build_int_cst (TREE_TYPE (gimple_cond_rhs (cond_stmt)), 1));
+
+ if (TREE_CODE (gimple_cond_rhs (cond_stmt)) == SSA_NAME)
+ {
+ iters_bb = gimple_bb (SSA_NAME_DEF_STMT (gimple_cond_rhs (cond_stmt)));
+ for (gsi1 = gsi_start_bb (iters_bb); !gsi_end_p (gsi1); gsi_next (&gsi1))
+ if (gsi_stmt (gsi1) == SSA_NAME_DEF_STMT (gimple_cond_rhs (cond_stmt)))
+ break;
+
+ new_rhs = force_gimple_operand_gsi (&gsi1, new_rhs, true,
+ NULL_TREE,false,GSI_CONTINUE_LINKING);
+ }
+ }
+ gimple_cond_set_rhs (cond_stmt, unshare_expr (new_rhs));
gimple_cond_set_lhs (cond_stmt, unshare_expr (gimple_cond_lhs (cond_stmt)));
- gimple_cond_set_rhs (cond_stmt, unshare_expr (gimple_cond_rhs (cond_stmt)));
gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
sorig = single_succ_edge (switch_bb);
/* Get rid of now superfluous conditions and associated edges (and phi node
arguments). */
+ exit_bb = exit->dest;
+
e = redirect_edge_and_branch (exits[0], exits[1]->dest);
PENDING_STMT (e) = NULL;
- e = redirect_edge_and_branch (nexits[1], nexits[0]->dest);
+
+ /* If the block consisting of the exit condition has the latch as
+ successor, then the body of the loop is executed before
+ the exit condition is tested.
+
+ { body }
+ { cond } (exit[0]) -> { latch }
+ |
+ V (exit[1])
+
+ { exit_bb }
+
+
+ In such case, the equivalent copied edge nexits[1]
+ (for the peeled iteration) needs to be redirected to exit_bb.
+
+ Otherwise,
+
+ { cond } (exit[0]) -> { body }
+ |
+ V (exit[1])
+
+ { exit_bb }
+
+
+ exit[0] is pointing to the body of the loop,
+ and the equivalent nexits[0] needs to be redirected to
+ the copied body (of the peeled iteration). */
+
+ if (exits[1]->dest == orig_loop->latch)
+ e = redirect_edge_and_branch (nexits[1], nexits[0]->dest);
+ else
+ e = redirect_edge_and_branch (nexits[0], nexits[1]->dest);
PENDING_STMT (e) = NULL;
+ redirect_edges = VEC_alloc (edge, heap, 10);
+
+ for (i = 0; i < n_region; i++)
+ region_copy[i]->flags |= BB_DUPLICATED;
+
+ /* Iterate all incoming edges to latch. All those coming from
+ copied bbs will be redirected to exit_bb. */
+ FOR_EACH_EDGE (e, ei, orig_loop->latch->preds)
+ {
+ if (e->src->flags & BB_DUPLICATED)
+ VEC_safe_push (edge, heap, redirect_edges, e);
+ }
+
+ for (i = 0; i < n_region; i++)
+ region_copy[i]->flags &= ~BB_DUPLICATED;
+
+ for (i = 0; VEC_iterate (edge, redirect_edges, i, e); ++i)
+ {
+ e = redirect_edge_and_branch (e, exit_bb);
+ PENDING_STMT (e) = NULL;
+ orig_src = get_bb_original (e->src);
+ orig_e = find_edge (orig_src, orig_loop->latch);
+ add_phi_args_after_redirect (e, orig_e);
+ }
+
+ VEC_free (edge, heap, redirect_edges);
+
/* Anything that is outside of the region, but was dominated by something
inside needs to update dominance info. */
iterate_fix_dominators (CDI_DOMINATORS, doms, false);
&& !is_global_var (t))
|| TREE_CODE (t) == CONST_DECL)
replace_by_duplicate_decl (tp, p->vars_map, p->to_context);
-
+
if (SSA_VAR_P (t)
&& gimple_in_ssa_p (cfun))
{
/* Print to FILE the basic block BB following the VERBOSITY level. */
-void
+void
print_loops_bb (FILE *file, basic_block bb, int indent, int verbosity)
{
char *s_indent = (char *) alloca ((size_t) indent + 1);
s_indent[indent] = '\0';
/* Print loop's header. */
- fprintf (file, "%sloop_%d (header = %d, latch = %d", s_indent,
+ fprintf (file, "%sloop_%d (header = %d, latch = %d", s_indent,
loop->num, loop->header->index, loop->latch->index);
fprintf (file, ", niter = ");
print_generic_expr (file, loop->nb_iterations, 0);
/* Update the dominance information. The immediate dominator may change only
for blocks whose immediate dominator belongs to DF_IDOM:
-
+
Suppose that idom(X) = Y before removal of E and idom(X) != Y after the
removal. Let Z the arbitrary block such that idom(Z) = Y and
Z dominates X after the removal. Before removal, there exists a path P
from Y to X that avoids Z. Let F be the last edge on P that is
removed, and let W = F->dest. Before removal, idom(W) = Y (since Y
dominates W, and because of P, Z does not dominate W), and W belongs to
- the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
+ the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
EXECUTE_IF_SET_IN_BITMAP (df_idom, 0, i, bi)
{
bb = BASIC_BLOCK (i);
{
if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL))
split_edge (e);
- /* PRE inserts statements to edges and expects that
+ /* PRE inserts statements to edges and expects that
since split_critical_edges was done beforehand, committing edge
insertions will not split more edges. In addition to critical
edges we must split edges that have multiple successors and
- end by control flow statements, such as RESX.
+ end by control flow statements, such as RESX.
Go ahead and split them too. This matches the logic in
gimple_find_edge_insert_loc. */
else if ((!single_pred_p (e->dest)
{
{
GIMPLE_PASS,
- NULL, /* name */
+ "*warn_function_return", /* name */
NULL, /* gate */
execute_warn_function_return, /* execute */
NULL, /* sub */
{
{
GIMPLE_PASS,
- NULL, /* name */
+ "*warn_function_noreturn", /* name */
NULL, /* gate */
execute_warn_function_noreturn, /* execute */
NULL, /* sub */