X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Fcfganal.c;h=75cb49d293cf0e13cc594708b63ac556e5e37099;hp=96a20b8f3c3e7adee2fb2dd477c3806b69ed622b;hb=1df76a7982581c4557b57ee92d80eff2c7001ede;hpb=65f34de51669d0fe37752d46811f848402c274e4 diff --git a/gcc/cfganal.c b/gcc/cfganal.c index 96a20b8f3c3..75cb49d293c 100644 --- a/gcc/cfganal.c +++ b/gcc/cfganal.c @@ -1,12 +1,13 @@ /* Control flow graph analysis code for GNU compiler. Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001 Free Software Foundation, Inc. + 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007, 2008 + Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2, or (at your option) any later +Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY @@ -15,19 +16,25 @@ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING. If not, write to the Free -Software Foundation, 59 Temple Place - Suite 330, Boston, MA -02111-1307, USA. */ +along with GCC; see the file COPYING3. If not see +. */ /* This file contains various simple utilities to analyze the CFG. */ #include "config.h" #include "system.h" +#include "coretypes.h" +#include "tm.h" #include "rtl.h" +#include "obstack.h" #include "hard-reg-set.h" #include "basic-block.h" +#include "insn-config.h" +#include "recog.h" #include "toplev.h" - -#include "obstack.h" +#include "tm_p.h" +#include "vec.h" +#include "vecprim.h" +#include "timevar.h" /* Store the data structures necessary for depth-first search. */ struct depth_first_search_dsS { @@ -43,110 +50,116 @@ struct depth_first_search_dsS { }; typedef struct depth_first_search_dsS *depth_first_search_ds; -static void flow_dfs_compute_reverse_init - PARAMS ((depth_first_search_ds)); -static void flow_dfs_compute_reverse_add_bb - PARAMS ((depth_first_search_ds, basic_block)); -static basic_block flow_dfs_compute_reverse_execute - PARAMS ((depth_first_search_ds)); -static void flow_dfs_compute_reverse_finish - PARAMS ((depth_first_search_ds)); -static void remove_fake_successors PARAMS ((basic_block)); -static bool need_fake_edge_p PARAMS ((rtx)); +static void flow_dfs_compute_reverse_init (depth_first_search_ds); +static void flow_dfs_compute_reverse_add_bb (depth_first_search_ds, + basic_block); +static basic_block flow_dfs_compute_reverse_execute (depth_first_search_ds, + basic_block); +static void flow_dfs_compute_reverse_finish (depth_first_search_ds); +static bool flow_active_insn_p (const_rtx); +/* Like active_insn_p, except keep the return value clobber around + even after reload. */ + +static bool +flow_active_insn_p (const_rtx insn) +{ + if (active_insn_p (insn)) + return true; + + /* A clobber of the function return value exists for buggy + programs that fail to return a value. Its effect is to + keep the return value from being live across the entire + function. If we allow it to be skipped, we introduce the + possibility for register lifetime confusion. */ + if (GET_CODE (PATTERN (insn)) == CLOBBER + && REG_P (XEXP (PATTERN (insn), 0)) + && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0))) + return true; + + return false; +} + /* Return true if the block has no effect and only forwards control flow to its single destination. */ + bool -forwarder_block_p (bb) - basic_block bb; +forwarder_block_p (const_basic_block bb) { - rtx insn = bb->head; + rtx insn; + if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR - || !bb->succ || bb->succ->succ_next) + || !single_succ_p (bb)) return false; - while (insn != bb->end) - { - if (active_insn_p (insn)) - return false; - insn = NEXT_INSN (insn); - } - return (!active_insn_p (insn) - || (GET_CODE (insn) == JUMP_INSN && onlyjump_p (insn))); + for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn)) + if (INSN_P (insn) && flow_active_insn_p (insn)) + return false; + + return (!INSN_P (insn) + || (JUMP_P (insn) && simplejump_p (insn)) + || !flow_active_insn_p (insn)); } -/* Return nonzero if we can reach target from src by falling trought. */ +/* Return nonzero if we can reach target from src by falling through. */ + bool -can_fallthru (src, target) - basic_block src, target; +can_fallthru (basic_block src, basic_block target) { - rtx insn = src->end; - rtx insn2 = target->head; + rtx insn = BB_END (src); + rtx insn2; + edge e; + edge_iterator ei; - if (src->index + 1 == target->index && !active_insn_p (insn2)) + if (target == EXIT_BLOCK_PTR) + return true; + if (src->next_bb != target) + return 0; + FOR_EACH_EDGE (e, ei, src->succs) + if (e->dest == EXIT_BLOCK_PTR + && e->flags & EDGE_FALLTHRU) + return 0; + + insn2 = BB_HEAD (target); + if (insn2 && !active_insn_p (insn2)) insn2 = next_active_insn (insn2); + /* ??? Later we may add code to move jump tables offline. */ return next_active_insn (insn) == insn2; } - -/* Identify critical edges and set the bits appropriately. */ -void -mark_critical_edges () +/* Return nonzero if we could reach target from src by falling through, + if the target was made adjacent. If we already have a fall-through + edge to the exit block, we can't do that. */ +bool +could_fall_through (basic_block src, basic_block target) { - int i, n = n_basic_blocks; - basic_block bb; - - /* We begin with the entry block. This is not terribly important now, - but could be if a front end (Fortran) implemented alternate entry - points. */ - bb = ENTRY_BLOCK_PTR; - i = -1; - - while (1) - { - edge e; - - /* (1) Critical edges must have a source with multiple successors. */ - if (bb->succ && bb->succ->succ_next) - { - for (e = bb->succ; e; e = e->succ_next) - { - /* (2) Critical edges must have a destination with multiple - predecessors. Note that we know there is at least one - predecessor -- the edge we followed to get here. */ - if (e->dest->pred->pred_next) - e->flags |= EDGE_CRITICAL; - else - e->flags &= ~EDGE_CRITICAL; - } - } - else - { - for (e = bb->succ; e; e = e->succ_next) - e->flags &= ~EDGE_CRITICAL; - } + edge e; + edge_iterator ei; - if (++i >= n) - break; - bb = BASIC_BLOCK (i); - } + if (target == EXIT_BLOCK_PTR) + return true; + FOR_EACH_EDGE (e, ei, src->succs) + if (e->dest == EXIT_BLOCK_PTR + && e->flags & EDGE_FALLTHRU) + return 0; + return true; } - + /* Mark the back edges in DFS traversal. - Return non-zero if a loop (natural or otherwise) is present. + Return nonzero if a loop (natural or otherwise) is present. Inspired by Depth_First_Search_PP described in: Advanced Compiler Design and Implementation Steven Muchnick Morgan Kaufmann, 1997 - and heavily borrowed from flow_depth_first_order_compute. */ + and heavily borrowed from pre_and_rev_post_order_compute. */ bool -mark_dfs_back_edges () +mark_dfs_back_edges (void) { - edge *stack; + edge_iterator *stack; int *pre; int *post; int sp; @@ -156,33 +169,33 @@ mark_dfs_back_edges () bool found = false; /* Allocate the preorder and postorder number arrays. */ - pre = (int *) xcalloc (n_basic_blocks, sizeof (int)); - post = (int *) xcalloc (n_basic_blocks, sizeof (int)); + pre = XCNEWVEC (int, last_basic_block); + post = XCNEWVEC (int, last_basic_block); /* Allocate stack for back-tracking up CFG. */ - stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge)); + stack = XNEWVEC (edge_iterator, n_basic_blocks + 1); sp = 0; /* Allocate bitmap to track nodes that have been visited. */ - visited = sbitmap_alloc (n_basic_blocks); + visited = sbitmap_alloc (last_basic_block); /* None of the nodes in the CFG have been visited yet. */ sbitmap_zero (visited); /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; + stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs); while (sp) { - edge e; + edge_iterator ei; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; - e->flags &= ~EDGE_DFS_BACK; + ei = stack[sp - 1]; + src = ei_edge (ei)->src; + dest = ei_edge (ei)->dest; + ei_edge (ei)->flags &= ~EDGE_DFS_BACK; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) @@ -191,12 +204,11 @@ mark_dfs_back_edges () SET_BIT (visited, dest->index); pre[dest->index] = prenum++; - - if (dest->succ) + if (EDGE_COUNT (dest->succs) > 0) { /* Since the DEST node has been visited for the first time, check its successors. */ - stack[sp++] = dest->succ; + stack[sp++] = ei_start (dest->succs); } else post[dest->index] = postnum++; @@ -206,13 +218,13 @@ mark_dfs_back_edges () if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR && pre[src->index] >= pre[dest->index] && post[dest->index] == 0) - e->flags |= EDGE_DFS_BACK, found = true; + ei_edge (ei)->flags |= EDGE_DFS_BACK, found = true; - if (! e->succ_next && src != ENTRY_BLOCK_PTR) + if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR) post[src->index] = postnum++; - if (e->succ_next) - stack[sp - 1] = e->succ_next; + if (!ei_one_before_end_p (ei)) + ei_next (&stack[sp - 1]); else sp--; } @@ -226,165 +238,64 @@ mark_dfs_back_edges () return found; } -/* Return true if we need to add fake edge to exit. - Helper function for the flow_call_edges_add. */ - -static bool -need_fake_edge_p (insn) - rtx insn; -{ - if (!INSN_P (insn)) - return false; - - if ((GET_CODE (insn) == CALL_INSN - && !SIBLING_CALL_P (insn) - && !find_reg_note (insn, REG_NORETURN, NULL) - && !find_reg_note (insn, REG_ALWAYS_RETURN, NULL) - && !CONST_OR_PURE_CALL_P (insn))) - return true; - - return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS - && MEM_VOLATILE_P (PATTERN (insn))) - || (GET_CODE (PATTERN (insn)) == PARALLEL - && asm_noperands (insn) != -1 - && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0))) - || GET_CODE (PATTERN (insn)) == ASM_INPUT); -} +/* Set the flag EDGE_CAN_FALLTHRU for edges that can be fallthru. */ -/* Add fake edges to the function exit for any non constant and non noreturn - calls, volatile inline assembly in the bitmap of blocks specified by - BLOCKS or to the whole CFG if BLOCKS is zero. Return the nuber of blocks - that were split. - - The goal is to expose cases in which entering a basic block does not imply - that all subsequent instructions must be executed. */ - -int -flow_call_edges_add (blocks) - sbitmap blocks; +void +set_edge_can_fallthru_flag (void) { - int i; - int blocks_split = 0; - int bb_num = 0; - basic_block *bbs; - bool check_last_block = false; - - /* Map bb indicies into basic block pointers since split_block - will renumber the basic blocks. */ - - bbs = xmalloc (n_basic_blocks * sizeof (*bbs)); - - if (! blocks) - { - for (i = 0; i < n_basic_blocks; i++) - bbs[bb_num++] = BASIC_BLOCK (i); - check_last_block = true; - } - else - { - EXECUTE_IF_SET_IN_SBITMAP (blocks, 0, i, - { - bbs[bb_num++] = BASIC_BLOCK (i); - if (i == n_basic_blocks - 1) - check_last_block = true; - }); - } - - /* In the last basic block, before epilogue generation, there will be - a fallthru edge to EXIT. Special care is required if the last insn - of the last basic block is a call because make_edge folds duplicate - edges, which would result in the fallthru edge also being marked - fake, which would result in the fallthru edge being removed by - remove_fake_edges, which would result in an invalid CFG. - - Moreover, we can't elide the outgoing fake edge, since the block - profiler needs to take this into account in order to solve the minimal - spanning tree in the case that the call doesn't return. - - Handle this by adding a dummy instruction in a new last basic block. */ - if (check_last_block - && need_fake_edge_p (BASIC_BLOCK (n_basic_blocks - 1)->end)) - { - edge e; - for (e = BASIC_BLOCK (n_basic_blocks - 1)->succ; e; e = e->succ_next) - if (e->dest == EXIT_BLOCK_PTR) - break; - insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e); - commit_edge_insertions (); - } - - - /* Now add fake edges to the function exit for any non constant - calls since there is no way that we can determine if they will - return or not... */ + basic_block bb; - for (i = 0; i < bb_num; i++) + FOR_EACH_BB (bb) { - basic_block bb = bbs[i]; - rtx insn; - rtx prev_insn; + edge e; + edge_iterator ei; - for (insn = bb->end; ; insn = prev_insn) + FOR_EACH_EDGE (e, ei, bb->succs) { - prev_insn = PREV_INSN (insn); - if (need_fake_edge_p (insn)) - { - edge e; - - /* The above condition should be enought to verify that there is - no edge to the exit block in CFG already. Calling make_edge in - such case would make us to mark that edge as fake and remove it - later. */ -#ifdef ENABLE_CHECKING - if (insn == bb->end) - for (e = bb->succ; e; e = e->succ_next) - if (e->dest == EXIT_BLOCK_PTR) - abort (); -#endif - - /* Note that the following may create a new basic block - and renumber the existing basic blocks. */ - e = split_block (bb, insn); - if (e) - blocks_split++; - - make_edge (NULL, bb, EXIT_BLOCK_PTR, EDGE_FAKE); - } - if (insn == bb->head) - break; - } - } + e->flags &= ~EDGE_CAN_FALLTHRU; - if (blocks_split) - verify_flow_info (); + /* The FALLTHRU edge is also CAN_FALLTHRU edge. */ + if (e->flags & EDGE_FALLTHRU) + e->flags |= EDGE_CAN_FALLTHRU; + } - free (bbs); - return blocks_split; + /* If the BB ends with an invertible condjump all (2) edges are + CAN_FALLTHRU edges. */ + if (EDGE_COUNT (bb->succs) != 2) + continue; + if (!any_condjump_p (BB_END (bb))) + continue; + if (!invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0)) + continue; + invert_jump (BB_END (bb), JUMP_LABEL (BB_END (bb)), 0); + EDGE_SUCC (bb, 0)->flags |= EDGE_CAN_FALLTHRU; + EDGE_SUCC (bb, 1)->flags |= EDGE_CAN_FALLTHRU; + } } + /* Find unreachable blocks. An unreachable block will have 0 in - the reachable bit in block->flags. A non-zero value indicates the + the reachable bit in block->flags. A nonzero value indicates the block is reachable. */ void -find_unreachable_blocks () +find_unreachable_blocks (void) { edge e; - int i, n; - basic_block *tos, *worklist; + edge_iterator ei; + basic_block *tos, *worklist, bb; - n = n_basic_blocks; - tos = worklist = (basic_block *) xmalloc (sizeof (basic_block) * n); + tos = worklist = XNEWVEC (basic_block, n_basic_blocks); /* Clear all the reachability flags. */ - for (i = 0; i < n; ++i) - BASIC_BLOCK (i)->flags &= ~BB_REACHABLE; + FOR_EACH_BB (bb) + bb->flags &= ~BB_REACHABLE; /* Add our starting points to the worklist. Almost always there will - be only one. It isn't inconcievable that we might one day directly + be only one. It isn't inconceivable that we might one day directly support Fortran alternate entry points. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) { *tos++ = e->dest; @@ -398,12 +309,16 @@ find_unreachable_blocks () { basic_block b = *--tos; - for (e = b->succ; e; e = e->succ_next) - if (!(e->dest->flags & BB_REACHABLE)) - { - *tos++ = e->dest; - e->dest->flags |= BB_REACHABLE; - } + FOR_EACH_EDGE (e, ei, b->succs) + { + basic_block dest = e->dest; + + if (!(dest->flags & BB_REACHABLE)) + { + *tos++ = dest; + dest->flags |= BB_REACHABLE; + } + } } free (worklist); @@ -423,64 +338,45 @@ find_unreachable_blocks () and the data structure is filled in. */ struct edge_list * -create_edge_list () +create_edge_list (void) { struct edge_list *elist; edge e; int num_edges; - int x; int block_count; + basic_block bb; + edge_iterator ei; - block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */ + block_count = n_basic_blocks; /* Include the entry and exit blocks. */ num_edges = 0; /* Determine the number of edges in the flow graph by counting successor edges on each basic block. */ - for (x = 0; x < n_basic_blocks; x++) + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - basic_block bb = BASIC_BLOCK (x); - - for (e = bb->succ; e; e = e->succ_next) - num_edges++; + num_edges += EDGE_COUNT (bb->succs); } - /* Don't forget successors of the entry block. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) - num_edges++; - elist = (struct edge_list *) xmalloc (sizeof (struct edge_list)); + elist = XNEW (struct edge_list); elist->num_blocks = block_count; elist->num_edges = num_edges; - elist->index_to_edge = (edge *) xmalloc (sizeof (edge) * num_edges); + elist->index_to_edge = XNEWVEC (edge, num_edges); num_edges = 0; - /* Follow successors of the entry block, and register these edges. */ - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) - { - elist->index_to_edge[num_edges] = e; - num_edges++; - } + /* Follow successors of blocks, and register these edges. */ + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) + FOR_EACH_EDGE (e, ei, bb->succs) + elist->index_to_edge[num_edges++] = e; - for (x = 0; x < n_basic_blocks; x++) - { - basic_block bb = BASIC_BLOCK (x); - - /* Follow all successors of blocks, and register these edges. */ - for (e = bb->succ; e; e = e->succ_next) - { - elist->index_to_edge[num_edges] = e; - num_edges++; - } - } return elist; } /* This function free's memory associated with an edge list. */ void -free_edge_list (elist) - struct edge_list *elist; +free_edge_list (struct edge_list *elist) { if (elist) { @@ -492,13 +388,12 @@ free_edge_list (elist) /* This function provides debug output showing an edge list. */ void -print_edge_list (f, elist) - FILE *f; - struct edge_list *elist; +print_edge_list (FILE *f, struct edge_list *elist) { int x; + fprintf (f, "Compressed edge list, %d BBs + entry & exit, and %d edges\n", - elist->num_blocks - 2, elist->num_edges); + elist->num_blocks, elist->num_edges); for (x = 0; x < elist->num_edges; x++) { @@ -520,18 +415,16 @@ print_edge_list (f, elist) extra edges. */ void -verify_edge_list (f, elist) - FILE *f; - struct edge_list *elist; +verify_edge_list (FILE *f, struct edge_list *elist) { - int x, pred, succ, index; + int pred, succ, index; edge e; + basic_block bb, p, s; + edge_iterator ei; - for (x = 0; x < n_basic_blocks; x++) + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) { - basic_block bb = BASIC_BLOCK (x); - - for (e = bb->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, bb->succs) { pred = e->src->index; succ = e->dest->index; @@ -541,6 +434,7 @@ verify_edge_list (f, elist) fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ); continue; } + if (INDEX_EDGE_PRED_BB (elist, index)->index != pred) fprintf (f, "*p* Pred for index %d should be %d not %d\n", index, pred, INDEX_EDGE_PRED_BB (elist, index)->index); @@ -549,160 +443,102 @@ verify_edge_list (f, elist) index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index); } } - for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next) - { - pred = e->src->index; - succ = e->dest->index; - index = EDGE_INDEX (elist, e->src, e->dest); - if (index == EDGE_INDEX_NO_EDGE) - { - fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ); - continue; - } - if (INDEX_EDGE_PRED_BB (elist, index)->index != pred) - fprintf (f, "*p* Pred for index %d should be %d not %d\n", - index, pred, INDEX_EDGE_PRED_BB (elist, index)->index); - if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ) - fprintf (f, "*p* Succ for index %d should be %d not %d\n", - index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index); - } - /* We've verified that all the edges are in the list, no lets make sure + + /* We've verified that all the edges are in the list, now lets make sure there are no spurious edges in the list. */ - for (pred = 0; pred < n_basic_blocks; pred++) - for (succ = 0; succ < n_basic_blocks; succ++) + FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) + FOR_BB_BETWEEN (s, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb) { - basic_block p = BASIC_BLOCK (pred); - basic_block s = BASIC_BLOCK (succ); - int found_edge = 0; - for (e = p->succ; e; e = e->succ_next) + FOR_EACH_EDGE (e, ei, p->succs) if (e->dest == s) { found_edge = 1; break; } - for (e = s->pred; e; e = e->pred_next) + + FOR_EACH_EDGE (e, ei, s->preds) if (e->src == p) { found_edge = 1; break; } - if (EDGE_INDEX (elist, BASIC_BLOCK (pred), BASIC_BLOCK (succ)) + + if (EDGE_INDEX (elist, p, s) == EDGE_INDEX_NO_EDGE && found_edge != 0) fprintf (f, "*** Edge (%d, %d) appears to not have an index\n", - pred, succ); - if (EDGE_INDEX (elist, BASIC_BLOCK (pred), BASIC_BLOCK (succ)) + p->index, s->index); + if (EDGE_INDEX (elist, p, s) != EDGE_INDEX_NO_EDGE && found_edge == 0) fprintf (f, "*** Edge (%d, %d) has index %d, but there is no edge\n", - pred, succ, EDGE_INDEX (elist, BASIC_BLOCK (pred), - BASIC_BLOCK (succ))); + p->index, s->index, EDGE_INDEX (elist, p, s)); } - for (succ = 0; succ < n_basic_blocks; succ++) +} + +/* Given PRED and SUCC blocks, return the edge which connects the blocks. + If no such edge exists, return NULL. */ + +edge +find_edge (basic_block pred, basic_block succ) +{ + edge e; + edge_iterator ei; + + if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds)) { - basic_block p = ENTRY_BLOCK_PTR; - basic_block s = BASIC_BLOCK (succ); - - int found_edge = 0; - - for (e = p->succ; e; e = e->succ_next) - if (e->dest == s) - { - found_edge = 1; - break; - } - for (e = s->pred; e; e = e->pred_next) - if (e->src == p) - { - found_edge = 1; - break; - } - if (EDGE_INDEX (elist, ENTRY_BLOCK_PTR, BASIC_BLOCK (succ)) - == EDGE_INDEX_NO_EDGE && found_edge != 0) - fprintf (f, "*** Edge (entry, %d) appears to not have an index\n", - succ); - if (EDGE_INDEX (elist, ENTRY_BLOCK_PTR, BASIC_BLOCK (succ)) - != EDGE_INDEX_NO_EDGE && found_edge == 0) - fprintf (f, "*** Edge (entry, %d) has index %d, but no edge exists\n", - succ, EDGE_INDEX (elist, ENTRY_BLOCK_PTR, - BASIC_BLOCK (succ))); + FOR_EACH_EDGE (e, ei, pred->succs) + if (e->dest == succ) + return e; } - for (pred = 0; pred < n_basic_blocks; pred++) + else { - basic_block p = BASIC_BLOCK (pred); - basic_block s = EXIT_BLOCK_PTR; - - int found_edge = 0; - - for (e = p->succ; e; e = e->succ_next) - if (e->dest == s) - { - found_edge = 1; - break; - } - for (e = s->pred; e; e = e->pred_next) - if (e->src == p) - { - found_edge = 1; - break; - } - if (EDGE_INDEX (elist, BASIC_BLOCK (pred), EXIT_BLOCK_PTR) - == EDGE_INDEX_NO_EDGE && found_edge != 0) - fprintf (f, "*** Edge (%d, exit) appears to not have an index\n", - pred); - if (EDGE_INDEX (elist, BASIC_BLOCK (pred), EXIT_BLOCK_PTR) - != EDGE_INDEX_NO_EDGE && found_edge == 0) - fprintf (f, "*** Edge (%d, exit) has index %d, but no edge exists\n", - pred, EDGE_INDEX (elist, BASIC_BLOCK (pred), - EXIT_BLOCK_PTR)); + FOR_EACH_EDGE (e, ei, succ->preds) + if (e->src == pred) + return e; } + + return NULL; } /* This routine will determine what, if any, edge there is between a specified predecessor and successor. */ int -find_edge_index (edge_list, pred, succ) - struct edge_list *edge_list; - basic_block pred, succ; +find_edge_index (struct edge_list *edge_list, basic_block pred, basic_block succ) { int x; + for (x = 0; x < NUM_EDGES (edge_list); x++) - { - if (INDEX_EDGE_PRED_BB (edge_list, x) == pred - && INDEX_EDGE_SUCC_BB (edge_list, x) == succ) - return x; - } + if (INDEX_EDGE_PRED_BB (edge_list, x) == pred + && INDEX_EDGE_SUCC_BB (edge_list, x) == succ) + return x; + return (EDGE_INDEX_NO_EDGE); } /* Dump the list of basic blocks in the bitmap NODES. */ void -flow_nodes_print (str, nodes, file) - const char *str; - const sbitmap nodes; - FILE *file; +flow_nodes_print (const char *str, const_sbitmap nodes, FILE *file) { - int node; + unsigned int node = 0; + sbitmap_iterator sbi; if (! nodes) return; fprintf (file, "%s { ", str); - EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, {fprintf (file, "%d ", node);}); + EXECUTE_IF_SET_IN_SBITMAP (nodes, 0, node, sbi) + fprintf (file, "%d ", node); fputs ("}\n", file); } /* Dump the list of edges in the array EDGE_LIST. */ void -flow_edge_list_print (str, edge_list, num_edges, file) - const char *str; - const edge *edge_list; - int num_edges; - FILE *file; +flow_edge_list_print (const char *str, const edge *edge_list, int num_edges, FILE *file) { int i; @@ -713,25 +549,27 @@ flow_edge_list_print (str, edge_list, num_edges, file) for (i = 0; i < num_edges; i++) fprintf (file, "%d->%d ", edge_list[i]->src->index, edge_list[i]->dest->index); + fputs ("}\n", file); } -/* This routine will remove any fake successor edges for a basic block. - When the edge is removed, it is also removed from whatever predecessor +/* This routine will remove any fake predecessor edges for a basic block. + When the edge is removed, it is also removed from whatever successor list it is in. */ static void -remove_fake_successors (bb) - basic_block bb; +remove_fake_predecessors (basic_block bb) { edge e; - for (e = bb->succ; e;) + edge_iterator ei; + + for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); ) { - edge tmp = e; - e = e->succ_next; - if ((tmp->flags & EDGE_FAKE) == EDGE_FAKE) - remove_edge (tmp); + if ((e->flags & EDGE_FAKE) == EDGE_FAKE) + remove_edge (e); + else + ei_next (&ei); } } @@ -740,29 +578,35 @@ remove_fake_successors (bb) fake predecessors. */ void -remove_fake_edges () +remove_fake_edges (void) { - int x; + basic_block bb; - for (x = 0; x < n_basic_blocks; x++) - remove_fake_successors (BASIC_BLOCK (x)); + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb) + remove_fake_predecessors (bb); +} - /* We've handled all successors except the entry block's. */ - remove_fake_successors (ENTRY_BLOCK_PTR); +/* This routine will remove all fake edges to the EXIT_BLOCK. */ + +void +remove_fake_exit_edges (void) +{ + remove_fake_predecessors (EXIT_BLOCK_PTR); } + /* This function will add a fake edge between any block which has no successors, and the exit block. Some data flow equations require these edges to exist. */ void -add_noreturn_fake_exit_edges () +add_noreturn_fake_exit_edges (void) { - int x; + basic_block bb; - for (x = 0; x < n_basic_blocks; x++) - if (BASIC_BLOCK (x)->succ == NULL) - make_edge (NULL, BASIC_BLOCK (x), EXIT_BLOCK_PTR, EDGE_FAKE); + FOR_EACH_BB (bb) + if (EDGE_COUNT (bb->succs) == 0) + make_single_succ_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE); } /* This function adds a fake edge between any infinite loops to the @@ -777,65 +621,73 @@ add_noreturn_fake_exit_edges () nodes not reachable from the exit block. */ void -connect_infinite_loops_to_exit () +connect_infinite_loops_to_exit (void) { - basic_block unvisited_block; + basic_block unvisited_block = EXIT_BLOCK_PTR; + struct depth_first_search_dsS dfs_ds; /* Perform depth-first search in the reverse graph to find nodes reachable from the exit block. */ - struct depth_first_search_dsS dfs_ds; - flow_dfs_compute_reverse_init (&dfs_ds); flow_dfs_compute_reverse_add_bb (&dfs_ds, EXIT_BLOCK_PTR); /* Repeatedly add fake edges, updating the unreachable nodes. */ while (1) { - unvisited_block = flow_dfs_compute_reverse_execute (&dfs_ds); + unvisited_block = flow_dfs_compute_reverse_execute (&dfs_ds, + unvisited_block); if (!unvisited_block) break; - make_edge (NULL, unvisited_block, EXIT_BLOCK_PTR, EDGE_FAKE); + + make_edge (unvisited_block, EXIT_BLOCK_PTR, EDGE_FAKE); flow_dfs_compute_reverse_add_bb (&dfs_ds, unvisited_block); } flow_dfs_compute_reverse_finish (&dfs_ds); - return; } -/* Compute reverse top sort order */ -void -flow_reverse_top_sort_order_compute (rts_order) - int *rts_order; +/* Compute reverse top sort order. This is computing a post order + numbering of the graph. If INCLUDE_ENTRY_EXIT is true, then then + ENTRY_BLOCK and EXIT_BLOCK are included. If DELETE_UNREACHABLE is + true, unreachable blocks are deleted. */ + +int +post_order_compute (int *post_order, bool include_entry_exit, + bool delete_unreachable) { - edge *stack; + edge_iterator *stack; int sp; - int postnum = 0; + int post_order_num = 0; sbitmap visited; + int count; + + if (include_entry_exit) + post_order[post_order_num++] = EXIT_BLOCK; /* Allocate stack for back-tracking up CFG. */ - stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge)); + stack = XNEWVEC (edge_iterator, n_basic_blocks + 1); sp = 0; /* Allocate bitmap to track nodes that have been visited. */ - visited = sbitmap_alloc (n_basic_blocks); + visited = sbitmap_alloc (last_basic_block); /* None of the nodes in the CFG have been visited yet. */ sbitmap_zero (visited); /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; + stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs); while (sp) { - edge e; + edge_iterator ei; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; + ei = stack[sp - 1]; + src = ei_edge (ei)->src; + dest = ei_edge (ei)->dest; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) @@ -843,72 +695,301 @@ flow_reverse_top_sort_order_compute (rts_order) /* Mark that we have visited the destination. */ SET_BIT (visited, dest->index); - if (dest->succ) - { - /* Since the DEST node has been visited for the first - time, check its successors. */ - stack[sp++] = dest->succ; - } + if (EDGE_COUNT (dest->succs) > 0) + /* Since the DEST node has been visited for the first + time, check its successors. */ + stack[sp++] = ei_start (dest->succs); else - rts_order[postnum++] = dest->index; + post_order[post_order_num++] = dest->index; } else { - if (! e->succ_next && src != ENTRY_BLOCK_PTR) - rts_order[postnum++] = src->index; + if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR) + post_order[post_order_num++] = src->index; - if (e->succ_next) - stack[sp - 1] = e->succ_next; + if (!ei_one_before_end_p (ei)) + ei_next (&stack[sp - 1]); else sp--; } } + if (include_entry_exit) + { + post_order[post_order_num++] = ENTRY_BLOCK; + count = post_order_num; + } + else + count = post_order_num + 2; + + /* Delete the unreachable blocks if some were found and we are + supposed to do it. */ + if (delete_unreachable && (count != n_basic_blocks)) + { + basic_block b; + basic_block next_bb; + for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb) + { + next_bb = b->next_bb; + + if (!(TEST_BIT (visited, b->index))) + delete_basic_block (b); + } + + tidy_fallthru_edges (); + } + free (stack); sbitmap_free (visited); + return post_order_num; +} + + +/* Helper routine for inverted_post_order_compute. + BB has to belong to a region of CFG + unreachable by inverted traversal from the exit. + i.e. there's no control flow path from ENTRY to EXIT + that contains this BB. + This can happen in two cases - if there's an infinite loop + or if there's a block that has no successor + (call to a function with no return). + Some RTL passes deal with this condition by + calling connect_infinite_loops_to_exit () and/or + add_noreturn_fake_exit_edges (). + However, those methods involve modifying the CFG itself + which may not be desirable. + Hence, we deal with the infinite loop/no return cases + by identifying a unique basic block that can reach all blocks + in such a region by inverted traversal. + This function returns a basic block that guarantees + that all blocks in the region are reachable + by starting an inverted traversal from the returned block. */ + +static basic_block +dfs_find_deadend (basic_block bb) +{ + sbitmap visited = sbitmap_alloc (last_basic_block); + sbitmap_zero (visited); + + for (;;) + { + SET_BIT (visited, bb->index); + if (EDGE_COUNT (bb->succs) == 0 + || TEST_BIT (visited, EDGE_SUCC (bb, 0)->dest->index)) + { + sbitmap_free (visited); + return bb; + } + + bb = EDGE_SUCC (bb, 0)->dest; + } + + gcc_unreachable (); +} + + +/* Compute the reverse top sort order of the inverted CFG + i.e. starting from the exit block and following the edges backward + (from successors to predecessors). + This ordering can be used for forward dataflow problems among others. + + This function assumes that all blocks in the CFG are reachable + from the ENTRY (but not necessarily from EXIT). + + If there's an infinite loop, + a simple inverted traversal starting from the blocks + with no successors can't visit all blocks. + To solve this problem, we first do inverted traversal + starting from the blocks with no successor. + And if there's any block left that's not visited by the regular + inverted traversal from EXIT, + those blocks are in such problematic region. + Among those, we find one block that has + any visited predecessor (which is an entry into such a region), + and start looking for a "dead end" from that block + and do another inverted traversal from that block. */ + +int +inverted_post_order_compute (int *post_order) +{ + basic_block bb; + edge_iterator *stack; + int sp; + int post_order_num = 0; + sbitmap visited; + + /* Allocate stack for back-tracking up CFG. */ + stack = XNEWVEC (edge_iterator, n_basic_blocks + 1); + sp = 0; + + /* Allocate bitmap to track nodes that have been visited. */ + visited = sbitmap_alloc (last_basic_block); + + /* None of the nodes in the CFG have been visited yet. */ + sbitmap_zero (visited); + + /* Put all blocks that have no successor into the initial work list. */ + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb) + if (EDGE_COUNT (bb->succs) == 0) + { + /* Push the initial edge on to the stack. */ + if (EDGE_COUNT (bb->preds) > 0) + { + stack[sp++] = ei_start (bb->preds); + SET_BIT (visited, bb->index); + } + } + + do + { + bool has_unvisited_bb = false; + + /* The inverted traversal loop. */ + while (sp) + { + edge_iterator ei; + basic_block pred; + + /* Look at the edge on the top of the stack. */ + ei = stack[sp - 1]; + bb = ei_edge (ei)->dest; + pred = ei_edge (ei)->src; + + /* Check if the predecessor has been visited yet. */ + if (! TEST_BIT (visited, pred->index)) + { + /* Mark that we have visited the destination. */ + SET_BIT (visited, pred->index); + + if (EDGE_COUNT (pred->preds) > 0) + /* Since the predecessor node has been visited for the first + time, check its predecessors. */ + stack[sp++] = ei_start (pred->preds); + else + post_order[post_order_num++] = pred->index; + } + else + { + if (bb != EXIT_BLOCK_PTR && ei_one_before_end_p (ei)) + post_order[post_order_num++] = bb->index; + + if (!ei_one_before_end_p (ei)) + ei_next (&stack[sp - 1]); + else + sp--; + } + } + + /* Detect any infinite loop and activate the kludge. + Note that this doesn't check EXIT_BLOCK itself + since EXIT_BLOCK is always added after the outer do-while loop. */ + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) + if (!TEST_BIT (visited, bb->index)) + { + has_unvisited_bb = true; + + if (EDGE_COUNT (bb->preds) > 0) + { + edge_iterator ei; + edge e; + basic_block visited_pred = NULL; + + /* Find an already visited predecessor. */ + FOR_EACH_EDGE (e, ei, bb->preds) + { + if (TEST_BIT (visited, e->src->index)) + visited_pred = e->src; + } + + if (visited_pred) + { + basic_block be = dfs_find_deadend (bb); + gcc_assert (be != NULL); + SET_BIT (visited, be->index); + stack[sp++] = ei_start (be->preds); + break; + } + } + } + + if (has_unvisited_bb && sp == 0) + { + /* No blocks are reachable from EXIT at all. + Find a dead-end from the ENTRY, and restart the iteration. */ + basic_block be = dfs_find_deadend (ENTRY_BLOCK_PTR); + gcc_assert (be != NULL); + SET_BIT (visited, be->index); + stack[sp++] = ei_start (be->preds); + } + + /* The only case the below while fires is + when there's an infinite loop. */ + } + while (sp); + + /* EXIT_BLOCK is always included. */ + post_order[post_order_num++] = EXIT_BLOCK; + + free (stack); + sbitmap_free (visited); + return post_order_num; } /* Compute the depth first search order and store in the array - DFS_ORDER if non-zero, marking the nodes visited in VISITED. If - RC_ORDER is non-zero, return the reverse completion number for each + PRE_ORDER if nonzero, marking the nodes visited in VISITED. If + REV_POST_ORDER is nonzero, return the reverse completion number for each node. Returns the number of nodes visited. A depth first search tries to get as far away from the starting point as quickly as - possible. */ + possible. + + pre_order is a really a preorder numbering of the graph. + rev_post_order is really a reverse postorder numbering of the graph. + */ int -flow_depth_first_order_compute (dfs_order, rc_order) - int *dfs_order; - int *rc_order; +pre_and_rev_post_order_compute (int *pre_order, int *rev_post_order, + bool include_entry_exit) { - edge *stack; + edge_iterator *stack; int sp; - int dfsnum = 0; - int rcnum = n_basic_blocks - 1; + int pre_order_num = 0; + int rev_post_order_num = n_basic_blocks - 1; sbitmap visited; /* Allocate stack for back-tracking up CFG. */ - stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge)); + stack = XNEWVEC (edge_iterator, n_basic_blocks + 1); sp = 0; + if (include_entry_exit) + { + if (pre_order) + pre_order[pre_order_num] = ENTRY_BLOCK; + pre_order_num++; + if (rev_post_order) + rev_post_order[rev_post_order_num--] = ENTRY_BLOCK; + } + else + rev_post_order_num -= NUM_FIXED_BLOCKS; + /* Allocate bitmap to track nodes that have been visited. */ - visited = sbitmap_alloc (n_basic_blocks); + visited = sbitmap_alloc (last_basic_block); /* None of the nodes in the CFG have been visited yet. */ sbitmap_zero (visited); /* Push the first edge on to the stack. */ - stack[sp++] = ENTRY_BLOCK_PTR->succ; + stack[sp++] = ei_start (ENTRY_BLOCK_PTR->succs); while (sp) { - edge e; + edge_iterator ei; basic_block src; basic_block dest; /* Look at the edge on the top of the stack. */ - e = stack[sp - 1]; - src = e->src; - dest = e->dest; + ei = stack[sp - 1]; + src = ei_edge (ei)->src; + dest = ei_edge (ei)->dest; /* Check if the edge destination has been visited yet. */ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index)) @@ -916,35 +997,30 @@ flow_depth_first_order_compute (dfs_order, rc_order) /* Mark that we have visited the destination. */ SET_BIT (visited, dest->index); - if (dfs_order) - dfs_order[dfsnum++] = dest->index; + if (pre_order) + pre_order[pre_order_num] = dest->index; - if (dest->succ) - { - /* Since the DEST node has been visited for the first - time, check its successors. */ - stack[sp++] = dest->succ; - } - else - { - /* There are no successors for the DEST node so assign - its reverse completion number. */ - if (rc_order) - rc_order[rcnum--] = dest->index; - } + pre_order_num++; + + if (EDGE_COUNT (dest->succs) > 0) + /* Since the DEST node has been visited for the first + time, check its successors. */ + stack[sp++] = ei_start (dest->succs); + else if (rev_post_order) + /* There are no successors for the DEST node so assign + its reverse completion number. */ + rev_post_order[rev_post_order_num--] = dest->index; } else { - if (! e->succ_next && src != ENTRY_BLOCK_PTR) - { - /* There are no more successors for the SRC node - so assign its reverse completion number. */ - if (rc_order) - rc_order[rcnum--] = src->index; - } - - if (e->succ_next) - stack[sp - 1] = e->succ_next; + if (ei_one_before_end_p (ei) && src != ENTRY_BLOCK_PTR + && rev_post_order) + /* There are no more successors for the SRC node + so assign its reverse completion number. */ + rev_post_order[rev_post_order_num--] = src->index; + + if (!ei_one_before_end_p (ei)) + ei_next (&stack[sp - 1]); else sp--; } @@ -953,15 +1029,22 @@ flow_depth_first_order_compute (dfs_order, rc_order) free (stack); sbitmap_free (visited); - /* The number of nodes visited should not be greater than - n_basic_blocks. */ - if (dfsnum > n_basic_blocks) - abort (); + if (include_entry_exit) + { + if (pre_order) + pre_order[pre_order_num] = EXIT_BLOCK; + pre_order_num++; + if (rev_post_order) + rev_post_order[rev_post_order_num--] = EXIT_BLOCK; + /* The number of nodes visited should be the number of blocks. */ + gcc_assert (pre_order_num == n_basic_blocks); + } + else + /* The number of nodes visited should be the number of blocks minus + the entry and exit blocks which are not visited here. */ + gcc_assert (pre_order_num == n_basic_blocks - NUM_FIXED_BLOCKS); - /* There are some nodes left in the CFG that are unreachable. */ - if (dfsnum < n_basic_blocks) - abort (); - return dfsnum; + return pre_order_num; } /* Compute the depth first search order on the _reverse_ graph and @@ -991,21 +1074,18 @@ flow_depth_first_order_compute (dfs_order, rc_order) /* Initialize the data structures used for depth-first search on the reverse graph. If INITIALIZE_STACK is nonzero, the exit block is added to the basic block stack. DATA is the current depth-first - search context. If INITIALIZE_STACK is non-zero, there is an + search context. If INITIALIZE_STACK is nonzero, there is an element on the stack. */ static void -flow_dfs_compute_reverse_init (data) - depth_first_search_ds data; +flow_dfs_compute_reverse_init (depth_first_search_ds data) { /* Allocate stack for back-tracking up CFG. */ - data->stack = - (basic_block *) xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1)) - * sizeof (basic_block)); + data->stack = XNEWVEC (basic_block, n_basic_blocks); data->sp = 0; /* Allocate bitmap to track nodes that have been visited. */ - data->visited_blocks = sbitmap_alloc (n_basic_blocks - (INVALID_BLOCK + 1)); + data->visited_blocks = sbitmap_alloc (last_basic_block); /* None of the nodes in the CFG have been visited yet. */ sbitmap_zero (data->visited_blocks); @@ -1018,46 +1098,40 @@ flow_dfs_compute_reverse_init (data) block. */ static void -flow_dfs_compute_reverse_add_bb (data, bb) - depth_first_search_ds data; - basic_block bb; +flow_dfs_compute_reverse_add_bb (depth_first_search_ds data, basic_block bb) { data->stack[data->sp++] = bb; - return; + SET_BIT (data->visited_blocks, bb->index); } -/* Continue the depth-first search through the reverse graph starting - with the block at the stack's top and ending when the stack is - empty. Visited nodes are marked. Returns an unvisited basic - block, or NULL if there is none available. */ +/* Continue the depth-first search through the reverse graph starting with the + block at the stack's top and ending when the stack is empty. Visited nodes + are marked. Returns an unvisited basic block, or NULL if there is none + available. */ static basic_block -flow_dfs_compute_reverse_execute (data) - depth_first_search_ds data; +flow_dfs_compute_reverse_execute (depth_first_search_ds data, + basic_block last_unvisited) { basic_block bb; edge e; - int i; + edge_iterator ei; while (data->sp > 0) { bb = data->stack[--data->sp]; - /* Mark that we have visited this node. */ - if (!TEST_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1))) - { - SET_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1)); - - /* Perform depth-first search on adjacent vertices. */ - for (e = bb->pred; e; e = e->pred_next) - flow_dfs_compute_reverse_add_bb (data, e->src); - } + /* Perform depth-first search on adjacent vertices. */ + FOR_EACH_EDGE (e, ei, bb->preds) + if (!TEST_BIT (data->visited_blocks, e->src->index)) + flow_dfs_compute_reverse_add_bb (data, e->src); } /* Determine if there are unvisited basic blocks. */ - for (i = n_basic_blocks - (INVALID_BLOCK + 1); --i >= 0;) - if (!TEST_BIT (data->visited_blocks, i)) - return BASIC_BLOCK (i + (INVALID_BLOCK + 1)); + FOR_BB_BETWEEN (bb, last_unvisited, NULL, prev_bb) + if (!TEST_BIT (data->visited_blocks, bb->index)) + return bb; + return NULL; } @@ -1065,10 +1139,220 @@ flow_dfs_compute_reverse_execute (data) reverse graph. */ static void -flow_dfs_compute_reverse_finish (data) - depth_first_search_ds data; +flow_dfs_compute_reverse_finish (depth_first_search_ds data) { free (data->stack); sbitmap_free (data->visited_blocks); - return; } + +/* Performs dfs search from BB over vertices satisfying PREDICATE; + if REVERSE, go against direction of edges. Returns number of blocks + found and their list in RSLT. RSLT can contain at most RSLT_MAX items. */ +int +dfs_enumerate_from (basic_block bb, int reverse, + bool (*predicate) (const_basic_block, const void *), + basic_block *rslt, int rslt_max, const void *data) +{ + basic_block *st, lbb; + int sp = 0, tv = 0; + unsigned size; + + /* A bitmap to keep track of visited blocks. Allocating it each time + this function is called is not possible, since dfs_enumerate_from + is often used on small (almost) disjoint parts of cfg (bodies of + loops), and allocating a large sbitmap would lead to quadratic + behavior. */ + static sbitmap visited; + static unsigned v_size; + +#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index)) +#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index)) +#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index)) + + /* Resize the VISITED sbitmap if necessary. */ + size = last_basic_block; + if (size < 10) + size = 10; + + if (!visited) + { + + visited = sbitmap_alloc (size); + sbitmap_zero (visited); + v_size = size; + } + else if (v_size < size) + { + /* Ensure that we increase the size of the sbitmap exponentially. */ + if (2 * v_size > size) + size = 2 * v_size; + + visited = sbitmap_resize (visited, size, 0); + v_size = size; + } + + st = XCNEWVEC (basic_block, rslt_max); + rslt[tv++] = st[sp++] = bb; + MARK_VISITED (bb); + while (sp) + { + edge e; + edge_iterator ei; + lbb = st[--sp]; + if (reverse) + { + FOR_EACH_EDGE (e, ei, lbb->preds) + if (!VISITED_P (e->src) && predicate (e->src, data)) + { + gcc_assert (tv != rslt_max); + rslt[tv++] = st[sp++] = e->src; + MARK_VISITED (e->src); + } + } + else + { + FOR_EACH_EDGE (e, ei, lbb->succs) + if (!VISITED_P (e->dest) && predicate (e->dest, data)) + { + gcc_assert (tv != rslt_max); + rslt[tv++] = st[sp++] = e->dest; + MARK_VISITED (e->dest); + } + } + } + free (st); + for (sp = 0; sp < tv; sp++) + UNMARK_VISITED (rslt[sp]); + return tv; +#undef MARK_VISITED +#undef UNMARK_VISITED +#undef VISITED_P +} + + +/* Compute dominance frontiers, ala Harvey, Ferrante, et al. + + This algorithm can be found in Timothy Harvey's PhD thesis, at + http://www.cs.rice.edu/~harv/dissertation.pdf in the section on iterative + dominance algorithms. + + First, we identify each join point, j (any node with more than one + incoming edge is a join point). + + We then examine each predecessor, p, of j and walk up the dominator tree + starting at p. + + We stop the walk when we reach j's immediate dominator - j is in the + dominance frontier of each of the nodes in the walk, except for j's + immediate dominator. Intuitively, all of the rest of j's dominators are + shared by j's predecessors as well. + Since they dominate j, they will not have j in their dominance frontiers. + + The number of nodes touched by this algorithm is equal to the size + of the dominance frontiers, no more, no less. +*/ + + +static void +compute_dominance_frontiers_1 (bitmap *frontiers) +{ + edge p; + edge_iterator ei; + basic_block b; + FOR_EACH_BB (b) + { + if (EDGE_COUNT (b->preds) >= 2) + { + FOR_EACH_EDGE (p, ei, b->preds) + { + basic_block runner = p->src; + basic_block domsb; + if (runner == ENTRY_BLOCK_PTR) + continue; + + domsb = get_immediate_dominator (CDI_DOMINATORS, b); + while (runner != domsb) + { + if (bitmap_bit_p (frontiers[runner->index], b->index)) + break; + bitmap_set_bit (frontiers[runner->index], + b->index); + runner = get_immediate_dominator (CDI_DOMINATORS, + runner); + } + } + } + } +} + + +void +compute_dominance_frontiers (bitmap *frontiers) +{ + timevar_push (TV_DOM_FRONTIERS); + + compute_dominance_frontiers_1 (frontiers); + + timevar_pop (TV_DOM_FRONTIERS); +} + +/* Given a set of blocks with variable definitions (DEF_BLOCKS), + return a bitmap with all the blocks in the iterated dominance + frontier of the blocks in DEF_BLOCKS. DFS contains dominance + frontier information as returned by compute_dominance_frontiers. + + The resulting set of blocks are the potential sites where PHI nodes + are needed. The caller is responsible for freeing the memory + allocated for the return value. */ + +bitmap +compute_idf (bitmap def_blocks, bitmap *dfs) +{ + bitmap_iterator bi; + unsigned bb_index, i; + VEC(int,heap) *work_stack; + bitmap phi_insertion_points; + + work_stack = VEC_alloc (int, heap, n_basic_blocks); + phi_insertion_points = BITMAP_ALLOC (NULL); + + /* Seed the work list with all the blocks in DEF_BLOCKS. We use + VEC_quick_push here for speed. This is safe because we know that + the number of definition blocks is no greater than the number of + basic blocks, which is the initial capacity of WORK_STACK. */ + EXECUTE_IF_SET_IN_BITMAP (def_blocks, 0, bb_index, bi) + VEC_quick_push (int, work_stack, bb_index); + + /* Pop a block off the worklist, add every block that appears in + the original block's DF that we have not already processed to + the worklist. Iterate until the worklist is empty. Blocks + which are added to the worklist are potential sites for + PHI nodes. */ + while (VEC_length (int, work_stack) > 0) + { + bb_index = VEC_pop (int, work_stack); + + /* Since the registration of NEW -> OLD name mappings is done + separately from the call to update_ssa, when updating the SSA + form, the basic blocks where new and/or old names are defined + may have disappeared by CFG cleanup calls. In this case, + we may pull a non-existing block from the work stack. */ + gcc_assert (bb_index < (unsigned) last_basic_block); + + EXECUTE_IF_AND_COMPL_IN_BITMAP (dfs[bb_index], phi_insertion_points, + 0, i, bi) + { + /* Use a safe push because if there is a definition of VAR + in every basic block, then WORK_STACK may eventually have + more than N_BASIC_BLOCK entries. */ + VEC_safe_push (int, heap, work_stack, i); + bitmap_set_bit (phi_insertion_points, i); + } + } + + VEC_free (int, heap, work_stack); + + return phi_insertion_points; +} + +