X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-ssa-threadupdate.c;h=54f87afaf687ca4d2da53a487c526025b43bb6b6;hb=28bfa791d44b30fa96cc852ad093b0bcf3e9c168;hp=8d205fdf384428b0fb461c645e4d4056114e46dc;hpb=c5d4a10b529a6cf299b7e6d87b5aa762ff147d56;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c index 8d205fdf384..54f87afaf68 100644 --- a/gcc/tree-ssa-threadupdate.c +++ b/gcc/tree-ssa-threadupdate.c @@ -1,11 +1,12 @@ /* Thread edges through blocks and update the control flow and SSA graphs. - Copyright (C) 2004 Free Software Foundation, Inc. + Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation, + Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2, or (at your option) +the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, @@ -14,9 +15,8 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to -the Free Software Foundation, 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ +along with GCC; see the file COPYING3. If not see +. */ #include "config.h" #include "system.h" @@ -29,13 +29,13 @@ Boston, MA 02111-1307, USA. */ #include "ggc.h" #include "basic-block.h" #include "output.h" -#include "errors.h" #include "expr.h" #include "function.h" #include "diagnostic.h" #include "tree-flow.h" #include "tree-dump.h" #include "tree-pass.h" +#include "cfgloop.h" /* Given a block B, update the CFG and SSA graph to reflect redirecting one or more in-edges to B to instead reach the destination of an @@ -72,7 +72,7 @@ Boston, MA 02111-1307, USA. */ 7. Put the duplicated resources in B and all the B' blocks into SSA form. Note that block duplication can be minimized by first collecting the - the set of unique destination blocks that the incoming edges should + set of unique destination blocks that the incoming edges should be threaded to. Block duplication can be further minimized by using B instead of creating B' for one destination if all edges into B are going to be threaded to a successor of B. @@ -140,8 +140,28 @@ struct local_info /* A template copy of BB with no outgoing edges or control statement that we use for creating copies. */ basic_block template_block; + + /* TRUE if we thread one or more jumps, FALSE otherwise. */ + bool jumps_threaded; +}; + +/* Passes which use the jump threading code register jump threading + opportunities as they are discovered. We keep the registered + jump threading opportunities in this vector as edge pairs + (original_edge, target_edge). */ +static VEC(edge,heap) *threaded_edges; + + +/* Jump threading statistics. */ + +struct thread_stats_d +{ + unsigned long num_threaded_edges; }; +struct thread_stats_d thread_stats; + + /* Remove the last statement in block BB if it is a control statement Also remove all outgoing edges except the edge which reaches DEST_BB. If DEST_BB is NULL, then remove all outgoing edges. */ @@ -163,13 +183,14 @@ remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb) if (!bsi_end_p (bsi) && bsi_stmt (bsi) && (TREE_CODE (bsi_stmt (bsi)) == COND_EXPR + || TREE_CODE (bsi_stmt (bsi)) == GOTO_EXPR || TREE_CODE (bsi_stmt (bsi)) == SWITCH_EXPR)) - bsi_remove (&bsi); + bsi_remove (&bsi, true); for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) { if (e->dest != dest_bb) - ssa_remove_edge (e); + remove_edge (e); else ei_next (&ei); } @@ -183,7 +204,7 @@ create_block_for_threading (basic_block bb, struct redirection_data *rd) { /* We can use the generic block duplication code and simply remove the stuff we do not need. */ - rd->dup_block = duplicate_block (bb, NULL); + rd->dup_block = duplicate_block (bb, NULL, NULL); /* Zero out the profile, since the block is unreachable for now. */ rd->dup_block->frequency = 0; @@ -202,15 +223,15 @@ create_block_for_threading (basic_block bb, struct redirection_data *rd) static hashval_t redirection_data_hash (const void *p) { - edge e = ((struct redirection_data *)p)->outgoing_edge; - return htab_hash_pointer (e); + edge e = ((const struct redirection_data *)p)->outgoing_edge; + return e->dest->index; } static int redirection_data_eq (const void *p1, const void *p2) { - edge e1 = ((struct redirection_data *)p1)->outgoing_edge; - edge e2 = ((struct redirection_data *)p2)->outgoing_edge; + edge e1 = ((const struct redirection_data *)p1)->outgoing_edge; + edge e2 = ((const struct redirection_data *)p2)->outgoing_edge; return e1 == e2; } @@ -222,14 +243,14 @@ redirection_data_eq (const void *p1, const void *p2) edges associated with E in the hash table. */ static struct redirection_data * -lookup_redirection_data (edge e, edge incoming_edge, bool insert) +lookup_redirection_data (edge e, edge incoming_edge, enum insert_option insert) { void **slot; struct redirection_data *elt; /* Build a hash table element so we can see if E is already in the table. */ - elt = xmalloc (sizeof (struct redirection_data)); + elt = XNEW (struct redirection_data); elt->outgoing_edge = e; elt->dup_block = NULL; elt->do_not_duplicate = false; @@ -250,7 +271,7 @@ lookup_redirection_data (edge e, edge incoming_edge, bool insert) if (*slot == NULL) { *slot = (void *)elt; - elt->incoming_edges = xmalloc (sizeof (struct el)); + elt->incoming_edges = XNEW (struct el); elt->incoming_edges->e = incoming_edge; elt->incoming_edges->next = NULL; return elt; @@ -269,7 +290,7 @@ lookup_redirection_data (edge e, edge incoming_edge, bool insert) to the list of incoming edges associated with E. */ if (insert) { - struct el *el = xmalloc (sizeof (struct el)); + struct el *el = XNEW (struct el); el->next = elt->incoming_edges; el->e = incoming_edge; elt->incoming_edges = el; @@ -292,14 +313,19 @@ create_edge_and_update_destination_phis (struct redirection_data *rd) edge e = make_edge (rd->dup_block, rd->outgoing_edge->dest, EDGE_FALLTHRU); tree phi; + rescan_loop_exit (e, true, false); + e->probability = REG_BR_PROB_BASE; + e->count = rd->dup_block->count; + e->aux = rd->outgoing_edge->aux; + /* If there are any PHI nodes at the destination of the outgoing edge from the duplicate block, then we will need to add a new argument to them. The argument should have the same value as the argument associated with the outgoing edge stored in RD. */ for (phi = phi_nodes (e->dest); phi; phi = PHI_CHAIN (phi)) { - int indx = phi_arg_from_edge (phi, rd->outgoing_edge); - add_phi_arg (&phi, PHI_ARG_DEF_TREE (phi, indx), e); + int indx = rd->outgoing_edge->dest_idx; + add_phi_arg (phi, PHI_ARG_DEF (phi, indx), e); } } @@ -387,6 +413,8 @@ redirect_edges (void **slot, void *data) to clear it will cause all kinds of unpleasant problems later. */ e->aux = NULL; + thread_stats.num_threaded_edges++; + if (rd->dup_block) { edge e2; @@ -395,14 +423,14 @@ redirect_edges (void **slot, void *data) fprintf (dump_file, " Threaded jump %d --> %d to %d\n", e->src->index, e->dest->index, rd->dup_block->index); + rd->dup_block->count += e->count; + rd->dup_block->frequency += EDGE_FREQUENCY (e); + EDGE_SUCC (rd->dup_block, 0)->count += e->count; /* Redirect the incoming edge to the appropriate duplicate block. */ e2 = redirect_edge_and_branch (e, rd->dup_block); + gcc_assert (e == e2); flush_pending_stmts (e2); - - if ((dump_file && (dump_flags & TDF_DETAILS)) - && e->src != e2->src) - fprintf (dump_file, " basic block %d created\n", e2->src->index); } else { @@ -415,15 +443,51 @@ redirect_edges (void **slot, void *data) remove_ctrl_stmt_and_useless_edges (local_info->bb, rd->outgoing_edge->dest); - /* And fixup the flags on the single remaining edge. */ - EDGE_SUCC (local_info->bb, 0)->flags - &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); - EDGE_SUCC (local_info->bb, 0)->flags |= EDGE_FALLTHRU; + /* Fixup the flags on the single remaining edge. */ + single_succ_edge (local_info->bb)->flags + &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL); + single_succ_edge (local_info->bb)->flags |= EDGE_FALLTHRU; + + /* And adjust count and frequency on BB. */ + local_info->bb->count = e->count; + local_info->bb->frequency = EDGE_FREQUENCY (e); } } + + /* Indicate that we actually threaded one or more jumps. */ + if (rd->incoming_edges) + local_info->jumps_threaded = true; + return 1; } +/* Return true if this block has no executable statements other than + a simple ctrl flow instruction. When the number of outgoing edges + is one, this is equivalent to a "forwarder" block. */ + +static bool +redirection_block_p (basic_block bb) +{ + block_stmt_iterator bsi; + + /* Advance to the first executable statement. */ + bsi = bsi_start (bb); + while (!bsi_end_p (bsi) + && (TREE_CODE (bsi_stmt (bsi)) == LABEL_EXPR + || IS_EMPTY_STMT (bsi_stmt (bsi)))) + bsi_next (&bsi); + + /* Check if this is an empty block. */ + if (bsi_end_p (bsi)) + return true; + + /* Test that we've reached the terminating control statement. */ + return bsi_stmt (bsi) + && (TREE_CODE (bsi_stmt (bsi)) == COND_EXPR + || TREE_CODE (bsi_stmt (bsi)) == GOTO_EXPR + || TREE_CODE (bsi_stmt (bsi)) == SWITCH_EXPR); +} + /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB is reached via one or more specific incoming edges, we know which outgoing edge from BB will be traversed. @@ -442,25 +506,18 @@ redirect_edges (void **slot, void *data) successor of BB. We then revector the incoming edges into BB to the appropriate duplicate of BB. - BB and its duplicates will have assignments to the same set of - SSA_NAMEs. Right now, we just call into rewrite_ssa_into_ssa - to update the SSA graph for those names. - - We are also going to experiment with a true incremental update - scheme for the duplicated resources. One of the interesting - properties we can exploit here is that all the resources set - in BB will have the same IDFS, so we have one IDFS computation - per block with incoming threaded edges, which can lower the - cost of the true incremental update algorithm. */ + If NOLOOP_ONLY is true, we only perform the threading as long as it + does not affect the structure of the loops in a nontrivial way. */ -static void -thread_block (basic_block bb) +static bool +thread_block (basic_block bb, bool noloop_only) { /* E is an incoming edge into BB that we may or may not want to redirect to a duplicate of BB. */ - edge e; + edge e, e2; edge_iterator ei; struct local_info local_info; + struct loop *loop = bb->loop_father; /* ALL indicates whether or not all incoming edges into BB should be threaded to a duplicate of BB. */ @@ -475,22 +532,44 @@ thread_block (basic_block bb) redirection_data_eq, free); + /* If we thread the latch of the loop to its exit, the loop ceases to + exist. Make sure we do not restrict ourselves in order to preserve + this loop. */ + if (loop->header == bb) + { + e = loop_latch_edge (loop); + e2 = (edge) e->aux; + + if (e2 && loop_exit_edge_p (loop, e2)) + { + loop->header = NULL; + loop->latch = NULL; + } + } + /* Record each unique threaded destination into a hash table for efficient lookups. */ FOR_EACH_EDGE (e, ei, bb->preds) { - if (!e->aux) + e2 = (edge) e->aux; + + if (!e2 + /* If NOLOOP_ONLY is true, we only allow threading through the + header of a loop to exit edges. */ + || (noloop_only + && bb == bb->loop_father->header + && !loop_exit_edge_p (bb->loop_father, e2))) { all = false; + continue; } - else - { - edge e2 = e->aux; - /* Insert the outgoing edge into the hash table if it is not - already in the hash table. */ - lookup_redirection_data (e2, e, true); - } + update_bb_profile_for_threading (e->dest, EDGE_FREQUENCY (e), + e->count, (edge) e->aux); + + /* Insert the outgoing edge into the hash table if it is not + already in the hash table. */ + lookup_redirection_data (e2, e, INSERT); } /* If we are going to thread all incoming edges to an outgoing edge, then @@ -499,10 +578,13 @@ thread_block (basic_block bb) DO_NOT_DUPLICATE attribute. */ if (all) { - edge e = EDGE_PRED (bb, 0)->aux; - lookup_redirection_data (e, NULL, false)->do_not_duplicate = true; + edge e = (edge) EDGE_PRED (bb, 0)->aux; + lookup_redirection_data (e, NULL, NO_INSERT)->do_not_duplicate = true; } + /* We do not update dominance info. */ + free_dominance_info (CDI_DOMINATORS); + /* Now create duplicates of BB. Note that for a block with a high outgoing degree we can waste @@ -514,6 +596,7 @@ thread_block (basic_block bb) the rest of the duplicates. */ local_info.template_block = NULL; local_info.bb = bb; + local_info.jumps_threaded = false; htab_traverse (redirection_data, create_duplicates, &local_info); /* The template does not have an outgoing edge. Create that outgoing @@ -532,40 +615,491 @@ thread_block (basic_block bb) /* Done with this block. Clear REDIRECTION_DATA. */ htab_delete (redirection_data); redirection_data = NULL; + + /* Indicate to our caller whether or not any jumps were threaded. */ + return local_info.jumps_threaded; +} + +/* Threads edge E through E->dest to the edge E->aux. Returns the copy + of E->dest created during threading, or E->dest if it was not necessary + to copy it (E is its single predecessor). */ + +static basic_block +thread_single_edge (edge e) +{ + basic_block bb = e->dest; + edge eto = (edge) e->aux; + struct redirection_data rd; + struct local_info local_info; + + e->aux = NULL; + + thread_stats.num_threaded_edges++; + + if (single_pred_p (bb)) + { + /* If BB has just a single predecessor, we should only remove the + control statements at its end, and successors except for ETO. */ + remove_ctrl_stmt_and_useless_edges (bb, eto->dest); + + /* And fixup the flags on the single remaining edge. */ + eto->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL); + eto->flags |= EDGE_FALLTHRU; + + return bb; + } + + /* Otherwise, we need to create a copy. */ + update_bb_profile_for_threading (bb, EDGE_FREQUENCY (e), e->count, eto); + + local_info.bb = bb; + rd.outgoing_edge = eto; + + create_block_for_threading (bb, &rd); + create_edge_and_update_destination_phis (&rd); + + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, " Threaded jump %d --> %d to %d\n", + e->src->index, e->dest->index, rd.dup_block->index); + + rd.dup_block->count = e->count; + rd.dup_block->frequency = EDGE_FREQUENCY (e); + single_succ_edge (rd.dup_block)->count = e->count; + redirect_edge_and_branch (e, rd.dup_block); + flush_pending_stmts (e); + + return rd.dup_block; } -/* Walk through all blocks and thread incoming edges to the block's - destinations as requested. This is the only entry point into this - file. +/* Callback for dfs_enumerate_from. Returns true if BB is different + from STOP and DBDS_CE_STOP. */ + +static basic_block dbds_ce_stop; +static bool +dbds_continue_enumeration_p (const_basic_block bb, const void *stop) +{ + return (bb != (const_basic_block) stop + && bb != dbds_ce_stop); +} + +/* Evaluates the dominance relationship of latch of the LOOP and BB, and + returns the state. */ + +enum bb_dom_status +{ + /* BB does not dominate latch of the LOOP. */ + DOMST_NONDOMINATING, + /* The LOOP is broken (there is no path from the header to its latch. */ + DOMST_LOOP_BROKEN, + /* BB dominates the latch of the LOOP. */ + DOMST_DOMINATING +}; + +static enum bb_dom_status +determine_bb_domination_status (struct loop *loop, basic_block bb) +{ + basic_block *bblocks; + unsigned nblocks, i; + bool bb_reachable = false; + edge_iterator ei; + edge e; + +#ifdef ENABLE_CHECKING + /* This function assumes BB is a successor of LOOP->header. */ + { + bool ok = false; - Blocks which have one or more incoming edges have INCOMING_EDGE_THREADED - set in the block's annotation. + FOR_EACH_EDGE (e, ei, bb->preds) + { + if (e->src == loop->header) + { + ok = true; + break; + } + } - Each edge that should be threaded has the new destination edge stored in - the original edge's AUX field. + gcc_assert (ok); + } +#endif + + if (bb == loop->latch) + return DOMST_DOMINATING; + + /* Check that BB dominates LOOP->latch, and that it is back-reachable + from it. */ + + bblocks = XCNEWVEC (basic_block, loop->num_nodes); + dbds_ce_stop = loop->header; + nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p, + bblocks, loop->num_nodes, bb); + for (i = 0; i < nblocks; i++) + FOR_EACH_EDGE (e, ei, bblocks[i]->preds) + { + if (e->src == loop->header) + { + free (bblocks); + return DOMST_NONDOMINATING; + } + if (e->src == bb) + bb_reachable = true; + } + + free (bblocks); + return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN); +} - This routine (or one of its callees) will clear INCOMING_EDGE_THREADED - in the block annotations and the AUX field in the edges. +/* Thread jumps through the header of LOOP. Returns true if cfg changes. + If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges + to the inside of the loop. */ + +static bool +thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers) +{ + basic_block header = loop->header; + edge e, tgt_edge, latch = loop_latch_edge (loop); + edge_iterator ei; + basic_block tgt_bb, atgt_bb; + enum bb_dom_status domst; + + /* We have already threaded through headers to exits, so all the threading + requests now are to the inside of the loop. We need to avoid creating + irreducible regions (i.e., loops with more than one entry block), and + also loop with several latch edges, or new subloops of the loop (although + there are cases where it might be appropriate, it is difficult to decide, + and doing it wrongly may confuse other optimizers). + + We could handle more general cases here. However, the intention is to + preserve some information about the loop, which is impossible if its + structure changes significantly, in a way that is not well understood. + Thus we only handle few important special cases, in which also updating + of the loop-carried information should be feasible: + + 1) Propagation of latch edge to a block that dominates the latch block + of a loop. This aims to handle the following idiom: + + first = 1; + while (1) + { + if (first) + initialize; + first = 0; + body; + } + + After threading the latch edge, this becomes + + first = 1; + if (first) + initialize; + while (1) + { + first = 0; + body; + } + + The original header of the loop is moved out of it, and we may thread + the remaining edges through it without further constraints. + + 2) All entry edges are propagated to a single basic block that dominates + the latch block of the loop. This aims to handle the following idiom + (normally created for "for" loops): + + i = 0; + while (1) + { + if (i >= 100) + break; + body; + i++; + } + + This becomes + + i = 0; + while (1) + { + body; + i++; + if (i >= 100) + break; + } + */ + + /* Threading through the header won't improve the code if the header has just + one successor. */ + if (single_succ_p (header)) + goto fail; + + if (latch->aux) + { + tgt_edge = (edge) latch->aux; + tgt_bb = tgt_edge->dest; + } + else if (!may_peel_loop_headers + && !redirection_block_p (loop->header)) + goto fail; + else + { + tgt_bb = NULL; + tgt_edge = NULL; + FOR_EACH_EDGE (e, ei, header->preds) + { + if (!e->aux) + { + if (e == latch) + continue; + + /* If latch is not threaded, and there is a header + edge that is not threaded, we would create loop + with multiple entries. */ + goto fail; + } + + tgt_edge = (edge) e->aux; + atgt_bb = tgt_edge->dest; + if (!tgt_bb) + tgt_bb = atgt_bb; + /* Two targets of threading would make us create loop + with multiple entries. */ + else if (tgt_bb != atgt_bb) + goto fail; + } + + if (!tgt_bb) + { + /* There are no threading requests. */ + return false; + } + + /* Redirecting to empty loop latch is useless. */ + if (tgt_bb == loop->latch + && empty_block_p (loop->latch)) + goto fail; + } + + /* The target block must dominate the loop latch, otherwise we would be + creating a subloop. */ + domst = determine_bb_domination_status (loop, tgt_bb); + if (domst == DOMST_NONDOMINATING) + goto fail; + if (domst == DOMST_LOOP_BROKEN) + { + /* If the loop ceased to exist, mark it as such, and thread through its + original header. */ + loop->header = NULL; + loop->latch = NULL; + return thread_block (header, false); + } + + if (tgt_bb->loop_father->header == tgt_bb) + { + /* If the target of the threading is a header of a subloop, we need + to create a preheader for it, so that the headers of the two loops + do not merge. */ + if (EDGE_COUNT (tgt_bb->preds) > 2) + { + tgt_bb = create_preheader (tgt_bb->loop_father, 0); + gcc_assert (tgt_bb != NULL); + } + else + tgt_bb = split_edge (tgt_edge); + } + + if (latch->aux) + { + /* First handle the case latch edge is redirected. */ + loop->latch = thread_single_edge (latch); + gcc_assert (single_succ (loop->latch) == tgt_bb); + loop->header = tgt_bb; + + /* Thread the remaining edges through the former header. */ + thread_block (header, false); + } + else + { + basic_block new_preheader; + + /* Now consider the case entry edges are redirected to the new entry + block. Remember one entry edge, so that we can find the new + preheader (its destination after threading). */ + FOR_EACH_EDGE (e, ei, header->preds) + { + if (e->aux) + break; + } + + /* The duplicate of the header is the new preheader of the loop. Ensure + that it is placed correctly in the loop hierarchy. */ + set_loop_copy (loop, loop_outer (loop)); + + thread_block (header, false); + set_loop_copy (loop, NULL); + new_preheader = e->dest; + + /* Create the new latch block. This is always necessary, as the latch + must have only a single successor, but the original header had at + least two successors. */ + loop->latch = NULL; + mfb_kj_edge = single_succ_edge (new_preheader); + loop->header = mfb_kj_edge->dest; + latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL); + loop->header = latch->dest; + loop->latch = latch->src; + } + + return true; + +fail: + /* We failed to thread anything. Cancel the requests. */ + FOR_EACH_EDGE (e, ei, header->preds) + { + e->aux = NULL; + } + return false; +} + +/* Walk through the registered jump threads and convert them into a + form convenient for this pass. + + Any block which has incoming edges threaded to outgoing edges + will have its entry in THREADED_BLOCK set. + + Any threaded edge will have its new outgoing edge stored in the + original edge's AUX field. + + This form avoids the need to walk all the edges in the CFG to + discover blocks which need processing and avoids unnecessary + hash table lookups to map from threaded edge to new target. */ + +static void +mark_threaded_blocks (bitmap threaded_blocks) +{ + unsigned int i; + bitmap_iterator bi; + bitmap tmp = BITMAP_ALLOC (NULL); + basic_block bb; + edge e; + edge_iterator ei; + + for (i = 0; i < VEC_length (edge, threaded_edges); i += 2) + { + edge e = VEC_index (edge, threaded_edges, i); + edge e2 = VEC_index (edge, threaded_edges, i + 1); + + e->aux = e2; + bitmap_set_bit (tmp, e->dest->index); + } + + /* If optimizing for size, only thread through block if we don't have + to duplicate it or it's an otherwise empty redirection block. */ + if (optimize_size) + { + EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi) + { + bb = BASIC_BLOCK (i); + if (EDGE_COUNT (bb->preds) > 1 + && !redirection_block_p (bb)) + { + FOR_EACH_EDGE (e, ei, bb->preds) + e->aux = NULL; + } + else + bitmap_set_bit (threaded_blocks, i); + } + } + else + bitmap_copy (threaded_blocks, tmp); + + BITMAP_FREE(tmp); +} + + +/* Walk through all blocks and thread incoming edges to the appropriate + outgoing edge for each edge pair recorded in THREADED_EDGES. It is the caller's responsibility to fix the dominance information and rewrite duplicated SSA_NAMEs back into SSA form. + If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through + loop headers if it does not simplify the loop. + Returns true if one or more edges were threaded, false otherwise. */ bool -thread_through_all_blocks (void) +thread_through_all_blocks (bool may_peel_loop_headers) { - basic_block bb; bool retval = false; + unsigned int i; + bitmap_iterator bi; + bitmap threaded_blocks; + struct loop *loop; + loop_iterator li; + + /* We must know about loops in order to preserve them. */ + gcc_assert (current_loops != NULL); + + if (threaded_edges == NULL) + return false; + + threaded_blocks = BITMAP_ALLOC (NULL); + memset (&thread_stats, 0, sizeof (thread_stats)); + + mark_threaded_blocks (threaded_blocks); - FOR_EACH_BB (bb) + initialize_original_copy_tables (); + + /* First perform the threading requests that do not affect + loop structure. */ + EXECUTE_IF_SET_IN_BITMAP (threaded_blocks, 0, i, bi) { - if (bb_ann (bb)->incoming_edge_threaded) - { - thread_block (bb); - retval = true; - bb_ann (bb)->incoming_edge_threaded = false; - } + basic_block bb = BASIC_BLOCK (i); + + if (EDGE_COUNT (bb->preds) > 0) + retval |= thread_block (bb, true); + } + + /* Then perform the threading through loop headers. We start with the + innermost loop, so that the changes in cfg we perform won't affect + further threading. */ + FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) + { + if (!loop->header + || !bitmap_bit_p (threaded_blocks, loop->header->index)) + continue; + + retval |= thread_through_loop_header (loop, may_peel_loop_headers); } + + statistics_counter_event (cfun, "Jumps threaded", + thread_stats.num_threaded_edges); + + free_original_copy_tables (); + + BITMAP_FREE (threaded_blocks); + threaded_blocks = NULL; + VEC_free (edge, heap, threaded_edges); + threaded_edges = NULL; + + if (retval) + loops_state_set (LOOPS_NEED_FIXUP); + return retval; } + +/* Register a jump threading opportunity. We queue up all the jump + threading opportunities discovered by a pass and update the CFG + and SSA form all at once. + + E is the edge we can thread, E2 is the new target edge, i.e., we + are effectively recording that E->dest can be changed to E2->dest + after fixing the SSA graph. */ + +void +register_jump_thread (edge e, edge e2) +{ + if (threaded_edges == NULL) + threaded_edges = VEC_alloc (edge, heap, 10); + + VEC_safe_push (edge, heap, threaded_edges, e); + VEC_safe_push (edge, heap, threaded_edges, e2); +}