X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-cfgcleanup.c;h=0c8c0852024bc63c53fb379bf7d548e4752677e0;hb=10089e90bd4b103351854568b2463b275a8588a2;hp=dc0380aefc207dd53a9a22f27b9db582e474dfa9;hpb=bc1002e978f2d8e749dd32cd47e3724b0f2e2efb;p=pf3gnuchains%2Fgcc-fork.git
diff --git a/gcc/tree-cfgcleanup.c b/gcc/tree-cfgcleanup.c
index dc0380aefc2..0c8c0852024 100644
--- a/gcc/tree-cfgcleanup.c
+++ b/gcc/tree-cfgcleanup.c
@@ -1,11 +1,12 @@
/* CFG cleanup for trees.
- Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
+the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
@@ -14,32 +15,26 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to
-the Free Software Foundation, 51 Franklin Street, Fifth Floor,
-Boston, MA 02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
-#include "rtl.h"
#include "tm_p.h"
-#include "hard-reg-set.h"
#include "basic-block.h"
#include "output.h"
-#include "errors.h"
+#include "diagnostic-core.h"
#include "flags.h"
#include "function.h"
-#include "expr.h"
#include "ggc.h"
#include "langhooks.h"
-#include "diagnostic.h"
#include "tree-flow.h"
#include "timevar.h"
#include "tree-dump.h"
#include "tree-pass.h"
-#include "toplev.h"
#include "except.h"
#include "cfgloop.h"
#include "cfglayout.h"
@@ -47,6 +42,15 @@ Boston, MA 02110-1301, USA. */
#include "tree-ssa-propagate.h"
#include "tree-scalar-evolution.h"
+/* The set of blocks in that at least one of the following changes happened:
+ -- the statement at the end of the block was changed
+ -- the block was newly created
+ -- the set of the predecessors of the block changed
+ -- the set of the successors of the block changed
+ ??? Maybe we could track these changes separately, since they determine
+ what cleanups it makes sense to try on the block. */
+bitmap cfgcleanup_altered_bbs;
+
/* Remove any fallthru edge from EV. Return true if an edge was removed. */
static bool
@@ -58,173 +62,192 @@ remove_fallthru_edge (VEC(edge,gc) *ev)
FOR_EACH_EDGE (e, ei, ev)
if ((e->flags & EDGE_FALLTHRU) != 0)
{
- remove_edge (e);
+ remove_edge_and_dominated_blocks (e);
return true;
}
return false;
}
+
/* Disconnect an unreachable block in the control expression starting
at block BB. */
static bool
-cleanup_control_expr_graph (basic_block bb, block_stmt_iterator bsi)
+cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
{
edge taken_edge;
bool retval = false;
- tree expr = bsi_stmt (bsi), val;
+ gimple stmt = gsi_stmt (gsi);
+ tree val;
if (!single_succ_p (bb))
{
edge e;
edge_iterator ei;
+ bool warned;
+ location_t loc;
- switch (TREE_CODE (expr))
+ fold_defer_overflow_warnings ();
+ loc = gimple_location (stmt);
+ switch (gimple_code (stmt))
{
- case COND_EXPR:
- val = fold (COND_EXPR_COND (expr));
- break;
-
- case SWITCH_EXPR:
- val = fold (SWITCH_COND (expr));
- if (TREE_CODE (val) != INTEGER_CST)
- return false;
+ case GIMPLE_COND:
+ {
+ tree lhs = gimple_cond_lhs (stmt);
+ tree rhs = gimple_cond_rhs (stmt);
+ /* For conditions try harder and lookup single-argument
+ PHI nodes. Only do so from the same basic-block though
+ as other basic-blocks may be dead already. */
+ if (TREE_CODE (lhs) == SSA_NAME
+ && !name_registered_for_update_p (lhs))
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (lhs);
+ if (gimple_code (def_stmt) == GIMPLE_PHI
+ && gimple_phi_num_args (def_stmt) == 1
+ && gimple_bb (def_stmt) == gimple_bb (stmt)
+ && (TREE_CODE (PHI_ARG_DEF (def_stmt, 0)) != SSA_NAME
+ || !name_registered_for_update_p (PHI_ARG_DEF (def_stmt,
+ 0))))
+ lhs = PHI_ARG_DEF (def_stmt, 0);
+ }
+ if (TREE_CODE (rhs) == SSA_NAME
+ && !name_registered_for_update_p (rhs))
+ {
+ gimple def_stmt = SSA_NAME_DEF_STMT (rhs);
+ if (gimple_code (def_stmt) == GIMPLE_PHI
+ && gimple_phi_num_args (def_stmt) == 1
+ && gimple_bb (def_stmt) == gimple_bb (stmt)
+ && (TREE_CODE (PHI_ARG_DEF (def_stmt, 0)) != SSA_NAME
+ || !name_registered_for_update_p (PHI_ARG_DEF (def_stmt,
+ 0))))
+ rhs = PHI_ARG_DEF (def_stmt, 0);
+ }
+ val = fold_binary_loc (loc, gimple_cond_code (stmt),
+ boolean_type_node, lhs, rhs);
+ break;
+ }
+
+ case GIMPLE_SWITCH:
+ val = gimple_switch_index (stmt);
break;
default:
- gcc_unreachable ();
+ val = NULL_TREE;
}
-
taken_edge = find_taken_edge (bb, val);
if (!taken_edge)
- return false;
+ {
+ fold_undefer_and_ignore_overflow_warnings ();
+ return false;
+ }
/* Remove all the edges except the one that is always executed. */
+ warned = false;
for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
{
if (e != taken_edge)
{
+ if (!warned)
+ {
+ fold_undefer_overflow_warnings
+ (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
+ warned = true;
+ }
+
taken_edge->probability += e->probability;
taken_edge->count += e->count;
- remove_edge (e);
+ remove_edge_and_dominated_blocks (e);
retval = true;
}
else
ei_next (&ei);
}
+ if (!warned)
+ fold_undefer_and_ignore_overflow_warnings ();
if (taken_edge->probability > REG_BR_PROB_BASE)
taken_edge->probability = REG_BR_PROB_BASE;
}
else
taken_edge = single_succ_edge (bb);
- bsi_remove (&bsi, true);
+ bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
+ gsi_remove (&gsi, true);
taken_edge->flags = EDGE_FALLTHRU;
- /* We removed some paths from the cfg. */
- free_dominance_info (CDI_DOMINATORS);
-
return retval;
}
-/* A list of all the noreturn calls passed to modify_stmt.
- cleanup_control_flow uses it to detect cases where a mid-block
- indirect call has been turned into a noreturn call. When this
- happens, all the instructions after the call are no longer
- reachable and must be deleted as dead. */
-
-VEC(tree,gc) *modified_noreturn_calls;
-
-/* Try to remove superfluous control structures. */
+/* Try to remove superfluous control structures in basic block BB. Returns
+ true if anything changes. */
static bool
-cleanup_control_flow (void)
+cleanup_control_flow_bb (basic_block bb)
{
- basic_block bb;
- block_stmt_iterator bsi;
+ gimple_stmt_iterator gsi;
bool retval = false;
- tree stmt;
-
- /* Detect cases where a mid-block call is now known not to return. */
- while (VEC_length (tree, modified_noreturn_calls))
- {
- stmt = VEC_pop (tree, modified_noreturn_calls);
- bb = bb_for_stmt (stmt);
- if (bb != NULL && last_stmt (bb) != stmt && noreturn_call_p (stmt))
- split_block (bb, stmt);
- }
+ gimple stmt;
- FOR_EACH_BB (bb)
- {
- bsi = bsi_last (bb);
+ /* If the last statement of the block could throw and now cannot,
+ we need to prune cfg. */
+ retval |= gimple_purge_dead_eh_edges (bb);
- /* If the last statement of the block could throw and now cannot,
- we need to prune cfg. */
- tree_purge_dead_eh_edges (bb);
+ gsi = gsi_last_bb (bb);
+ if (gsi_end_p (gsi))
+ return retval;
- if (bsi_end_p (bsi))
- continue;
+ stmt = gsi_stmt (gsi);
- stmt = bsi_stmt (bsi);
-
- if (TREE_CODE (stmt) == COND_EXPR
- || TREE_CODE (stmt) == SWITCH_EXPR)
- retval |= cleanup_control_expr_graph (bb, bsi);
+ if (gimple_code (stmt) == GIMPLE_COND
+ || gimple_code (stmt) == GIMPLE_SWITCH)
+ retval |= cleanup_control_expr_graph (bb, gsi);
+ else if (gimple_code (stmt) == GIMPLE_GOTO
+ && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
+ == LABEL_DECL))
+ {
/* If we had a computed goto which has a compile-time determinable
destination, then we can eliminate the goto. */
- else if (TREE_CODE (stmt) == GOTO_EXPR
- && TREE_CODE (GOTO_DESTINATION (stmt)) == ADDR_EXPR
- && (TREE_CODE (TREE_OPERAND (GOTO_DESTINATION (stmt), 0))
- == LABEL_DECL))
+ edge e;
+ tree label;
+ edge_iterator ei;
+ basic_block target_block;
+
+ /* First look at all the outgoing edges. Delete any outgoing
+ edges which do not go to the right block. For the one
+ edge which goes to the right block, fix up its flags. */
+ label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
+ target_block = label_to_block (label);
+ for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
{
- edge e;
- tree label;
- edge_iterator ei;
- basic_block target_block;
- bool removed_edge = false;
-
- /* First look at all the outgoing edges. Delete any outgoing
- edges which do not go to the right block. For the one
- edge which goes to the right block, fix up its flags. */
- label = TREE_OPERAND (GOTO_DESTINATION (stmt), 0);
- target_block = label_to_block (label);
- for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
+ if (e->dest != target_block)
+ remove_edge_and_dominated_blocks (e);
+ else
{
- if (e->dest != target_block)
- {
- removed_edge = true;
- remove_edge (e);
- }
- else
- {
- /* Turn off the EDGE_ABNORMAL flag. */
- e->flags &= ~EDGE_ABNORMAL;
+ /* Turn off the EDGE_ABNORMAL flag. */
+ e->flags &= ~EDGE_ABNORMAL;
- /* And set EDGE_FALLTHRU. */
- e->flags |= EDGE_FALLTHRU;
- ei_next (&ei);
- }
+ /* And set EDGE_FALLTHRU. */
+ e->flags |= EDGE_FALLTHRU;
+ ei_next (&ei);
}
-
- /* If we removed one or more edges, then we will need to fix the
- dominators. It may be possible to incrementally update them. */
- if (removed_edge)
- free_dominance_info (CDI_DOMINATORS);
-
- /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
- relevant information we need. */
- bsi_remove (&bsi, true);
- retval = true;
}
- /* Check for indirect calls that have been turned into
- noreturn calls. */
- else if (noreturn_call_p (stmt) && remove_fallthru_edge (bb->succs))
- {
- free_dominance_info (CDI_DOMINATORS);
- retval = true;
- }
+ bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
+ bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
+
+ /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
+ relevant information we need. */
+ gsi_remove (&gsi, true);
+ retval = true;
}
+
+ /* Check for indirect calls that have been turned into
+ noreturn calls. */
+ else if (is_gimple_call (stmt)
+ && gimple_call_noreturn_p (stmt)
+ && remove_fallthru_edge (bb->succs))
+ retval = true;
+
return retval;
}
@@ -238,13 +261,14 @@ cleanup_control_flow (void)
static bool
tree_forwarder_block_p (basic_block bb, bool phi_wanted)
{
- block_stmt_iterator bsi;
+ gimple_stmt_iterator gsi;
+ location_t locus;
/* BB must have a single outgoing edge. */
if (single_succ_p (bb) != 1
/* If PHI_WANTED is false, BB must not have any PHI nodes.
Otherwise, BB must have PHI nodes. */
- || (phi_nodes (bb) != NULL_TREE) != phi_wanted
+ || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
/* BB may not be a predecessor of EXIT_BLOCK_PTR. */
|| single_succ (bb) == EXIT_BLOCK_PTR
/* Nor should this be an infinite loop. */
@@ -253,31 +277,49 @@ tree_forwarder_block_p (basic_block bb, bool phi_wanted)
|| (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
return false;
-#if ENABLE_CHECKING
- gcc_assert (bb != ENTRY_BLOCK_PTR);
-#endif
+ gcc_checking_assert (bb != ENTRY_BLOCK_PTR);
+
+ locus = single_succ_edge (bb)->goto_locus;
+
+ /* There should not be an edge coming from entry, or an EH edge. */
+ {
+ edge_iterator ei;
+ edge e;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (e->src == ENTRY_BLOCK_PTR || (e->flags & EDGE_EH))
+ return false;
+ /* If goto_locus of any of the edges differs, prevent removing
+ the forwarder block for -O0. */
+ else if (optimize == 0 && e->goto_locus != locus)
+ return false;
+ }
/* Now walk through the statements backward. We can ignore labels,
anything else means this is not a forwarder block. */
- for (bsi = bsi_last (bb); !bsi_end_p (bsi); bsi_prev (&bsi))
+ for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
{
- tree stmt = bsi_stmt (bsi);
+ gimple stmt = gsi_stmt (gsi);
- switch (TREE_CODE (stmt))
+ switch (gimple_code (stmt))
{
- case LABEL_EXPR:
- if (DECL_NONLOCAL (LABEL_EXPR_LABEL (stmt)))
+ case GIMPLE_LABEL:
+ if (DECL_NONLOCAL (gimple_label_label (stmt)))
+ return false;
+ if (optimize == 0 && gimple_location (stmt) != locus)
return false;
break;
+ /* ??? For now, hope there's a corresponding debug
+ assignment at the destination. */
+ case GIMPLE_DEBUG:
+ break;
+
default:
return false;
}
}
- if (find_edge (ENTRY_BLOCK_PTR, bb))
- return false;
-
if (current_loops)
{
basic_block dest;
@@ -289,25 +331,9 @@ tree_forwarder_block_p (basic_block bb, bool phi_wanted)
if (dest->loop_father->header == dest)
return false;
}
-
return true;
}
-/* Return true if BB has at least one abnormal incoming edge. */
-
-static inline bool
-has_abnormal_incoming_edge_p (basic_block bb)
-{
- edge e;
- edge_iterator ei;
-
- FOR_EACH_EDGE (e, ei, bb->preds)
- if (e->flags & EDGE_ABNORMAL)
- return true;
-
- return false;
-}
-
/* If all the PHI nodes in DEST have alternatives for E1 and E2 and
those alternatives are equal in each of the PHI nodes, then return
true, else return false. */
@@ -317,12 +343,13 @@ phi_alternatives_equal (basic_block dest, edge e1, edge e2)
{
int n1 = e1->dest_idx;
int n2 = e2->dest_idx;
- tree phi;
+ gimple_stmt_iterator gsi;
- for (phi = phi_nodes (dest); phi; phi = PHI_CHAIN (phi))
+ for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
- tree val1 = PHI_ARG_DEF (phi, n1);
- tree val2 = PHI_ARG_DEF (phi, n2);
+ gimple phi = gsi_stmt (gsi);
+ tree val1 = gimple_phi_arg_def (phi, n1);
+ tree val2 = gimple_phi_arg_def (phi, n2);
gcc_assert (val1 != NULL_TREE);
gcc_assert (val2 != NULL_TREE);
@@ -334,20 +361,17 @@ phi_alternatives_equal (basic_block dest, edge e1, edge e2)
return true;
}
-/* Removes forwarder block BB. Returns false if this failed. If a new
- forwarder block is created due to redirection of edges, it is
- stored to worklist. */
+/* Removes forwarder block BB. Returns false if this failed. */
static bool
-remove_forwarder_block (basic_block bb, basic_block **worklist)
+remove_forwarder_block (basic_block bb)
{
edge succ = single_succ_edge (bb), e, s;
basic_block dest = succ->dest;
- tree label;
- tree phi;
+ gimple label;
edge_iterator ei;
- block_stmt_iterator bsi, bsi_to;
- bool seen_abnormal_edge = false;
+ gimple_stmt_iterator gsi, gsi_to;
+ bool can_move_debug_stmts;
/* We check for infinite loops already in tree_forwarder_block_p.
However it may happen that the infinite loop is created
@@ -355,12 +379,13 @@ remove_forwarder_block (basic_block bb, basic_block **worklist)
if (dest == bb)
return false;
- /* If the destination block consists of a nonlocal label, do not merge
- it. */
+ /* If the destination block consists of a nonlocal label or is a
+ EH landing pad, do not merge it. */
label = first_stmt (dest);
if (label
- && TREE_CODE (label) == LABEL_EXPR
- && DECL_NONLOCAL (LABEL_EXPR_LABEL (label)))
+ && gimple_code (label) == GIMPLE_LABEL
+ && (DECL_NONLOCAL (gimple_label_label (label))
+ || EH_LANDING_PAD_NR (gimple_label_label (label)) != 0))
return false;
/* If there is an abnormal edge to basic block BB, but not into
@@ -374,19 +399,15 @@ remove_forwarder_block (basic_block bb, basic_block **worklist)
So if there is an abnormal edge to BB, proceed only if there is
no abnormal edge to DEST and there are no phi nodes in DEST. */
- if (has_abnormal_incoming_edge_p (bb))
- {
- seen_abnormal_edge = true;
-
- if (has_abnormal_incoming_edge_p (dest)
- || phi_nodes (dest) != NULL_TREE)
- return false;
- }
+ if (bb_has_abnormal_pred (bb)
+ && (bb_has_abnormal_pred (dest)
+ || !gimple_seq_empty_p (phi_nodes (dest))))
+ return false;
/* If there are phi nodes in DEST, and some of the blocks that are
predecessors of BB are also predecessors of DEST, check that the
phi node arguments match. */
- if (phi_nodes (dest))
+ if (!gimple_seq_empty_p (phi_nodes (dest)))
{
FOR_EACH_EDGE (e, ei, bb->preds)
{
@@ -399,9 +420,13 @@ remove_forwarder_block (basic_block bb, basic_block **worklist)
}
}
+ can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
+
/* Redirect the edges. */
for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
{
+ bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
+
if (e->flags & EDGE_ABNORMAL)
{
/* If there is an abnormal edge, redirect it anyway, and
@@ -415,35 +440,58 @@ remove_forwarder_block (basic_block bb, basic_block **worklist)
{
/* Create arguments for the phi nodes, since the edge was not
here before. */
- for (phi = phi_nodes (dest); phi; phi = PHI_CHAIN (phi))
- add_phi_arg (phi, PHI_ARG_DEF (phi, succ->dest_idx), s);
+ for (gsi = gsi_start_phis (dest);
+ !gsi_end_p (gsi);
+ gsi_next (&gsi))
+ {
+ gimple phi = gsi_stmt (gsi);
+ source_location l = gimple_phi_arg_location_from_edge (phi, succ);
+ add_phi_arg (phi, gimple_phi_arg_def (phi, succ->dest_idx), s, l);
+ }
}
- else
+ }
+
+ /* Move nonlocal labels and computed goto targets as well as user
+ defined labels and labels with an EH landing pad number to the
+ new block, so that the redirection of the abnormal edges works,
+ jump targets end up in a sane place and debug information for
+ labels is retained. */
+ gsi_to = gsi_start_bb (dest);
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
+ {
+ tree decl;
+ label = gsi_stmt (gsi);
+ if (is_gimple_debug (label))
+ break;
+ decl = gimple_label_label (label);
+ if (EH_LANDING_PAD_NR (decl) != 0
+ || DECL_NONLOCAL (decl)
+ || FORCED_LABEL (decl)
+ || !DECL_ARTIFICIAL (decl))
{
- /* The source basic block might become a forwarder. We know
- that it was not a forwarder before, since it used to have
- at least two outgoing edges, so we may just add it to
- worklist. */
- if (tree_forwarder_block_p (s->src, false))
- *(*worklist)++ = s->src;
+ gsi_remove (&gsi, false);
+ gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
}
+ else
+ gsi_next (&gsi);
}
- if (seen_abnormal_edge)
+ /* Move debug statements if the destination has a single predecessor. */
+ if (can_move_debug_stmts)
{
- /* Move the labels to the new block, so that the redirection of
- the abnormal edges works. */
-
- bsi_to = bsi_start (dest);
- for (bsi = bsi_start (bb); !bsi_end_p (bsi); )
+ gsi_to = gsi_after_labels (dest);
+ for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
{
- label = bsi_stmt (bsi);
- gcc_assert (TREE_CODE (label) == LABEL_EXPR);
- bsi_remove (&bsi, false);
- bsi_insert_before (&bsi_to, label, BSI_CONTINUE_LINKING);
+ gimple debug = gsi_stmt (gsi);
+ if (!is_gimple_debug (debug))
+ break;
+ gsi_remove (&gsi, false);
+ gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
}
}
+ bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
+
/* Update the dominators. */
if (dom_info_available_p (CDI_DOMINATORS))
{
@@ -469,62 +517,209 @@ remove_forwarder_block (basic_block bb, basic_block **worklist)
return true;
}
-/* Removes forwarder blocks. */
+/* STMT is a call that has been discovered noreturn. Fixup the CFG
+ and remove LHS. Return true if something changed. */
-static bool
-cleanup_forwarder_blocks (void)
+bool
+fixup_noreturn_call (gimple stmt)
{
- basic_block bb;
+ basic_block bb = gimple_bb (stmt);
bool changed = false;
- basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
- basic_block *current = worklist;
- FOR_EACH_BB (bb)
+ if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
+ return false;
+
+ /* First split basic block if stmt is not last. */
+ if (stmt != gsi_stmt (gsi_last_bb (bb)))
+ split_block (bb, stmt);
+
+ changed |= remove_fallthru_edge (bb->succs);
+
+ /* If there is LHS, remove it. */
+ if (gimple_call_lhs (stmt))
{
- if (tree_forwarder_block_p (bb, false))
- *current++ = bb;
+ tree op = gimple_call_lhs (stmt);
+ gimple_call_set_lhs (stmt, NULL_TREE);
+
+ /* We need to remove SSA name to avoid checking errors.
+ All uses are dominated by the noreturn and thus will
+ be removed afterwards.
+ We proactively remove affected non-PHI statements to avoid
+ fixup_cfg from trying to update them and crashing. */
+ if (TREE_CODE (op) == SSA_NAME)
+ {
+ use_operand_p use_p;
+ imm_use_iterator iter;
+ gimple use_stmt;
+ bitmap_iterator bi;
+ unsigned int bb_index;
+
+ bitmap blocks = BITMAP_ALLOC (NULL);
+
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
+ {
+ if (gimple_code (use_stmt) != GIMPLE_PHI)
+ bitmap_set_bit (blocks, gimple_bb (use_stmt)->index);
+ else
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ SET_USE (use_p, error_mark_node);
+ }
+ EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
+ delete_basic_block (BASIC_BLOCK (bb_index));
+ BITMAP_FREE (blocks);
+ release_ssa_name (op);
+ }
+ update_stmt (stmt);
+ changed = true;
}
+ /* Similarly remove VDEF if there is any. */
+ else if (gimple_vdef (stmt))
+ update_stmt (stmt);
+ return changed;
+}
- while (current != worklist)
+
+/* Split basic blocks on calls in the middle of a basic block that are now
+ known not to return, and remove the unreachable code. */
+
+static bool
+split_bbs_on_noreturn_calls (void)
+{
+ bool changed = false;
+ gimple stmt;
+ basic_block bb;
+
+ /* Detect cases where a mid-block call is now known not to return. */
+ if (cfun->gimple_df)
+ while (VEC_length (gimple, MODIFIED_NORETURN_CALLS (cfun)))
+ {
+ stmt = VEC_pop (gimple, MODIFIED_NORETURN_CALLS (cfun));
+ bb = gimple_bb (stmt);
+ /* BB might be deleted at this point, so verify first
+ BB is present in the cfg. */
+ if (bb == NULL
+ || bb->index < NUM_FIXED_BLOCKS
+ || bb->index >= last_basic_block
+ || BASIC_BLOCK (bb->index) != bb
+ || !gimple_call_noreturn_p (stmt))
+ continue;
+
+ changed |= fixup_noreturn_call (stmt);
+ }
+
+ return changed;
+}
+
+/* If GIMPLE_OMP_RETURN in basic block BB is unreachable, remove it. */
+
+static bool
+cleanup_omp_return (basic_block bb)
+{
+ gimple stmt = last_stmt (bb);
+ basic_block control_bb;
+
+ if (stmt == NULL
+ || gimple_code (stmt) != GIMPLE_OMP_RETURN
+ || !single_pred_p (bb))
+ return false;
+
+ control_bb = single_pred (bb);
+ stmt = last_stmt (control_bb);
+
+ if (stmt == NULL || gimple_code (stmt) != GIMPLE_OMP_SECTIONS_SWITCH)
+ return false;
+
+ /* The block with the control statement normally has two entry edges -- one
+ from entry, one from continue. If continue is removed, return is
+ unreachable, so we remove it here as well. */
+ if (EDGE_COUNT (control_bb->preds) == 2)
+ return false;
+
+ gcc_assert (EDGE_COUNT (control_bb->preds) == 1);
+ remove_edge_and_dominated_blocks (single_pred_edge (bb));
+ return true;
+}
+
+/* Tries to cleanup cfg in basic block BB. Returns true if anything
+ changes. */
+
+static bool
+cleanup_tree_cfg_bb (basic_block bb)
+{
+ bool retval = false;
+
+ if (cleanup_omp_return (bb))
+ return true;
+
+ retval = cleanup_control_flow_bb (bb);
+
+ if (tree_forwarder_block_p (bb, false)
+ && remove_forwarder_block (bb))
+ return true;
+
+ /* Merging the blocks may create new opportunities for folding
+ conditional branches (due to the elimination of single-valued PHI
+ nodes). */
+ if (single_succ_p (bb)
+ && can_merge_blocks_p (bb, single_succ (bb)))
{
- bb = *--current;
- changed |= remove_forwarder_block (bb, ¤t);
+ merge_blocks (bb, single_succ (bb));
+ return true;
}
- free (worklist);
- return changed;
+ return retval;
}
-/* Do one round of CFG cleanup. */
+/* Iterate the cfg cleanups, while anything changes. */
static bool
cleanup_tree_cfg_1 (void)
{
- bool retval;
+ bool retval = false;
+ basic_block bb;
+ unsigned i, n;
+
+ retval |= split_bbs_on_noreturn_calls ();
- retval = cleanup_control_flow ();
- retval |= delete_unreachable_blocks ();
+ /* Prepare the worklists of altered blocks. */
+ cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
- /* Forwarder blocks can carry line number information which is
- useful when debugging, so we only clean them up when
- optimizing. */
+ /* During forwarder block cleanup, we may redirect edges out of
+ SWITCH_EXPRs, which can get expensive. So we want to enable
+ recording of edge to CASE_LABEL_EXPR. */
+ start_recording_case_labels ();
- if (optimize > 0)
+ /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB,
+ since the basic blocks may get removed. */
+ n = last_basic_block;
+ for (i = NUM_FIXED_BLOCKS; i < n; i++)
{
- /* cleanup_forwarder_blocks can redirect edges out of
- SWITCH_EXPRs, which can get expensive. So we want to enable
- recording of edge to CASE_LABEL_EXPR mappings around the call
- to cleanup_forwarder_blocks. */
- start_recording_case_labels ();
- retval |= cleanup_forwarder_blocks ();
- end_recording_case_labels ();
+ bb = BASIC_BLOCK (i);
+ if (bb)
+ retval |= cleanup_tree_cfg_bb (bb);
}
- /* Merging the blocks may create new opportunities for folding
- conditional branches (due to the elimination of single-valued PHI
- nodes). */
- retval |= merge_seq_blocks ();
+ /* Now process the altered blocks, as long as any are available. */
+ while (!bitmap_empty_p (cfgcleanup_altered_bbs))
+ {
+ i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
+ bitmap_clear_bit (cfgcleanup_altered_bbs, i);
+ if (i < NUM_FIXED_BLOCKS)
+ continue;
+ bb = BASIC_BLOCK (i);
+ if (!bb)
+ continue;
+
+ retval |= cleanup_tree_cfg_bb (bb);
+
+ /* Rerun split_bbs_on_noreturn_calls, in case we have altered any noreturn
+ calls. */
+ retval |= split_bbs_on_noreturn_calls ();
+ }
+
+ end_recording_case_labels ();
+ BITMAP_FREE (cfgcleanup_altered_bbs);
return retval;
}
@@ -532,23 +727,34 @@ cleanup_tree_cfg_1 (void)
/* Remove unreachable blocks and other miscellaneous clean up work.
Return true if the flowgraph was modified, false otherwise. */
-bool
-cleanup_tree_cfg (void)
+static bool
+cleanup_tree_cfg_noloop (void)
{
- bool retval, changed;
+ bool changed;
timevar_push (TV_TREE_CLEANUP_CFG);
/* Iterate until there are no more cleanups left to do. If any
- iteration changed the flowgraph, set CHANGED to true. */
- changed = false;
- do
+ iteration changed the flowgraph, set CHANGED to true.
+
+ If dominance information is available, there cannot be any unreachable
+ blocks. */
+ if (!dom_info_available_p (CDI_DOMINATORS))
+ {
+ changed = delete_unreachable_blocks ();
+ calculate_dominance_info (CDI_DOMINATORS);
+ }
+ else
{
- retval = cleanup_tree_cfg_1 ();
- changed |= retval;
+#ifdef ENABLE_CHECKING
+ verify_dominators (CDI_DOMINATORS);
+#endif
+ changed = false;
}
- while (retval);
+ changed |= cleanup_tree_cfg_1 ();
+
+ gcc_assert (dom_info_available_p (CDI_DOMINATORS));
compact_blocks ();
#ifdef ENABLE_CHECKING
@@ -557,34 +763,52 @@ cleanup_tree_cfg (void)
timevar_pop (TV_TREE_CLEANUP_CFG);
+ if (changed && current_loops)
+ loops_state_set (LOOPS_NEED_FIXUP);
+
return changed;
}
-/* Cleanup cfg and repair loop structures. */
+/* Repairs loop structures. */
-void
-cleanup_tree_cfg_loop (void)
+static void
+repair_loop_structures (void)
{
- bool changed = cleanup_tree_cfg ();
+ bitmap changed_bbs;
- if (changed)
- {
- bitmap changed_bbs = BITMAP_ALLOC (NULL);
- fix_loop_structure (current_loops, changed_bbs);
- calculate_dominance_info (CDI_DOMINATORS);
+ timevar_push (TV_REPAIR_LOOPS);
+ changed_bbs = BITMAP_ALLOC (NULL);
+ fix_loop_structure (changed_bbs);
- /* This usually does nothing. But sometimes parts of cfg that originally
- were inside a loop get out of it due to edge removal (since they
- become unreachable by back edges from latch). */
- rewrite_into_loop_closed_ssa (changed_bbs, TODO_update_ssa);
+ /* This usually does nothing. But sometimes parts of cfg that originally
+ were inside a loop get out of it due to edge removal (since they
+ become unreachable by back edges from latch). */
+ if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
+ rewrite_into_loop_closed_ssa (changed_bbs, TODO_update_ssa);
- BITMAP_FREE (changed_bbs);
+ BITMAP_FREE (changed_bbs);
#ifdef ENABLE_CHECKING
- verify_loop_structure (current_loops);
+ verify_loop_structure ();
#endif
- scev_reset ();
- }
+ scev_reset ();
+
+ loops_state_clear (LOOPS_NEED_FIXUP);
+ timevar_pop (TV_REPAIR_LOOPS);
+}
+
+/* Cleanup cfg and repair loop structures. */
+
+bool
+cleanup_tree_cfg (void)
+{
+ bool changed = cleanup_tree_cfg_noloop ();
+
+ if (current_loops != NULL
+ && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
+ repair_loop_structures ();
+
+ return changed;
}
/* Merge the PHI nodes at BB into those at BB's sole successor. */
@@ -594,7 +818,7 @@ remove_forwarder_block_with_phi (basic_block bb)
{
edge succ = single_succ_edge (bb);
basic_block dest = succ->dest;
- tree label;
+ gimple label;
basic_block dombb, domdest, dom;
/* We check for infinite loops already in tree_forwarder_block_p.
@@ -607,15 +831,15 @@ remove_forwarder_block_with_phi (basic_block bb)
merge it. */
label = first_stmt (dest);
if (label
- && TREE_CODE (label) == LABEL_EXPR
- && DECL_NONLOCAL (LABEL_EXPR_LABEL (label)))
+ && gimple_code (label) == GIMPLE_LABEL
+ && DECL_NONLOCAL (gimple_label_label (label)))
return;
/* Redirect each incoming edge to BB to DEST. */
while (EDGE_COUNT (bb->preds) > 0)
{
edge e = EDGE_PRED (bb, 0), s;
- tree phi;
+ gimple_stmt_iterator gsi;
s = find_edge (e->src, dest);
if (s)
@@ -626,7 +850,7 @@ remove_forwarder_block_with_phi (basic_block bb)
if (phi_alternatives_equal (dest, s, succ))
{
e = redirect_edge_and_branch (e, dest);
- PENDING_STMT (e) = NULL_TREE;
+ redirect_edge_var_map_clear (e);
continue;
}
@@ -643,34 +867,42 @@ remove_forwarder_block_with_phi (basic_block bb)
/* Add to the PHI nodes at DEST each PHI argument removed at the
destination of E. */
- for (phi = phi_nodes (dest); phi; phi = PHI_CHAIN (phi))
+ for (gsi = gsi_start_phis (dest);
+ !gsi_end_p (gsi);
+ gsi_next (&gsi))
{
- tree def = PHI_ARG_DEF (phi, succ->dest_idx);
+ gimple phi = gsi_stmt (gsi);
+ tree def = gimple_phi_arg_def (phi, succ->dest_idx);
+ source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
if (TREE_CODE (def) == SSA_NAME)
{
- tree var;
+ edge_var_map_vector head;
+ edge_var_map *vm;
+ size_t i;
/* If DEF is one of the results of PHI nodes removed during
redirection, replace it with the PHI argument that used
to be on E. */
- for (var = PENDING_STMT (e); var; var = TREE_CHAIN (var))
+ head = redirect_edge_var_map_vector (e);
+ FOR_EACH_VEC_ELT (edge_var_map, head, i, vm)
{
- tree old_arg = TREE_PURPOSE (var);
- tree new_arg = TREE_VALUE (var);
+ tree old_arg = redirect_edge_var_map_result (vm);
+ tree new_arg = redirect_edge_var_map_def (vm);
if (def == old_arg)
{
def = new_arg;
+ locus = redirect_edge_var_map_location (vm);
break;
}
}
}
- add_phi_arg (phi, def, s);
+ add_phi_arg (phi, def, s, locus);
}
- PENDING_STMT (e) = NULL;
+ redirect_edge_var_map_clear (e);
}
/* Update the dominators. */
@@ -717,7 +949,7 @@ remove_forwarder_block_with_phi (basic_block bb)
:;
*/
-static void
+static unsigned int
merge_phi_nodes (void)
{
basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks);
@@ -739,10 +971,10 @@ merge_phi_nodes (void)
/* We have to feed into another basic block with PHI
nodes. */
- if (!phi_nodes (dest)
+ if (gimple_seq_empty_p (phi_nodes (dest))
/* We don't want to deal with a basic block with
abnormal edges. */
- || has_abnormal_incoming_edge_p (bb))
+ || bb_has_abnormal_pred (bb))
continue;
if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
@@ -754,7 +986,7 @@ merge_phi_nodes (void)
}
else
{
- tree phi;
+ gimple_stmt_iterator gsi;
unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
/* BB dominates DEST. There may be many users of the PHI
@@ -762,29 +994,30 @@ merge_phi_nodes (void)
can handle. If the result of every PHI in BB is used
only by a PHI in DEST, then we can trivially merge the
PHI nodes from BB into DEST. */
- for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
+ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
+ gsi_next (&gsi))
{
- tree result = PHI_RESULT (phi);
- int num_uses = num_imm_uses (result);
+ gimple phi = gsi_stmt (gsi);
+ tree result = gimple_phi_result (phi);
use_operand_p imm_use;
- tree use_stmt;
+ gimple use_stmt;
/* If the PHI's result is never used, then we can just
ignore it. */
- if (num_uses == 0)
+ if (has_zero_uses (result))
continue;
/* Get the single use of the result of this PHI node. */
if (!single_imm_use (result, &imm_use, &use_stmt)
- || TREE_CODE (use_stmt) != PHI_NODE
- || bb_for_stmt (use_stmt) != dest
- || PHI_ARG_DEF (use_stmt, dest_idx) != result)
+ || gimple_code (use_stmt) != GIMPLE_PHI
+ || gimple_bb (use_stmt) != dest
+ || gimple_phi_arg_def (use_stmt, dest_idx) != result)
break;
}
- /* If the loop above iterated thorugh all the PHI nodes
+ /* If the loop above iterated through all the PHI nodes
in BB, then we can merge the PHIs from BB into DEST. */
- if (!phi)
+ if (gsi_end_p (gsi))
*current++ = bb;
}
}
@@ -797,6 +1030,7 @@ merge_phi_nodes (void)
}
free (worklist);
+ return 0;
}
static bool
@@ -805,7 +1039,10 @@ gate_merge_phi (void)
return 1;
}
-struct tree_opt_pass pass_merge_phi = {
+struct gimple_opt_pass pass_merge_phi =
+{
+ {
+ GIMPLE_PASS,
"mergephi", /* name */
gate_merge_phi, /* gate */
merge_phi_nodes, /* execute */
@@ -817,7 +1054,7 @@ struct tree_opt_pass pass_merge_phi = {
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_dump_func | TODO_ggc_collect /* todo_flags_finish */
- | TODO_verify_ssa,
- 0 /* letter */
+ TODO_ggc_collect /* todo_flags_finish */
+ | TODO_verify_ssa
+ }
};