+
+/* Performs dfs search from BB over vertices satisfying PREDICATE;
+ if REVERSE, go against direction of edges. Returns number of blocks
+ found and their list in RSLT. RSLT can contain at most RSLT_MAX items. */
+int
+dfs_enumerate_from (basic_block bb, int reverse,
+ bool (*predicate) (const_basic_block, const void *),
+ basic_block *rslt, int rslt_max, const void *data)
+{
+ basic_block *st, lbb;
+ int sp = 0, tv = 0;
+ unsigned size;
+
+ /* A bitmap to keep track of visited blocks. Allocating it each time
+ this function is called is not possible, since dfs_enumerate_from
+ is often used on small (almost) disjoint parts of cfg (bodies of
+ loops), and allocating a large sbitmap would lead to quadratic
+ behavior. */
+ static sbitmap visited;
+ static unsigned v_size;
+
+#define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index))
+#define UNMARK_VISITED(BB) (RESET_BIT (visited, (BB)->index))
+#define VISITED_P(BB) (TEST_BIT (visited, (BB)->index))
+
+ /* Resize the VISITED sbitmap if necessary. */
+ size = last_basic_block;
+ if (size < 10)
+ size = 10;
+
+ if (!visited)
+ {
+
+ visited = sbitmap_alloc (size);
+ sbitmap_zero (visited);
+ v_size = size;
+ }
+ else if (v_size < size)
+ {
+ /* Ensure that we increase the size of the sbitmap exponentially. */
+ if (2 * v_size > size)
+ size = 2 * v_size;
+
+ visited = sbitmap_resize (visited, size, 0);
+ v_size = size;
+ }
+
+ st = XCNEWVEC (basic_block, rslt_max);
+ rslt[tv++] = st[sp++] = bb;
+ MARK_VISITED (bb);
+ while (sp)
+ {
+ edge e;
+ edge_iterator ei;
+ lbb = st[--sp];
+ if (reverse)
+ {
+ FOR_EACH_EDGE (e, ei, lbb->preds)
+ if (!VISITED_P (e->src) && predicate (e->src, data))
+ {
+ gcc_assert (tv != rslt_max);
+ rslt[tv++] = st[sp++] = e->src;
+ MARK_VISITED (e->src);
+ }
+ }
+ else
+ {
+ FOR_EACH_EDGE (e, ei, lbb->succs)
+ if (!VISITED_P (e->dest) && predicate (e->dest, data))
+ {
+ gcc_assert (tv != rslt_max);
+ rslt[tv++] = st[sp++] = e->dest;
+ MARK_VISITED (e->dest);
+ }
+ }
+ }
+ free (st);
+ for (sp = 0; sp < tv; sp++)
+ UNMARK_VISITED (rslt[sp]);
+ return tv;
+#undef MARK_VISITED
+#undef UNMARK_VISITED
+#undef VISITED_P
+}
+
+
+/* Compute dominance frontiers, ala Harvey, Ferrante, et al.
+
+ This algorithm can be found in Timothy Harvey's PhD thesis, at
+ http://www.cs.rice.edu/~harv/dissertation.pdf in the section on iterative
+ dominance algorithms.
+
+ First, we identify each join point, j (any node with more than one
+ incoming edge is a join point).
+
+ We then examine each predecessor, p, of j and walk up the dominator tree
+ starting at p.
+
+ We stop the walk when we reach j's immediate dominator - j is in the
+ dominance frontier of each of the nodes in the walk, except for j's
+ immediate dominator. Intuitively, all of the rest of j's dominators are
+ shared by j's predecessors as well.
+ Since they dominate j, they will not have j in their dominance frontiers.
+
+ The number of nodes touched by this algorithm is equal to the size
+ of the dominance frontiers, no more, no less.
+*/
+
+
+static void
+compute_dominance_frontiers_1 (bitmap *frontiers)
+{
+ edge p;
+ edge_iterator ei;
+ basic_block b;
+ FOR_EACH_BB (b)
+ {
+ if (EDGE_COUNT (b->preds) >= 2)
+ {
+ FOR_EACH_EDGE (p, ei, b->preds)
+ {
+ basic_block runner = p->src;
+ basic_block domsb;
+ if (runner == ENTRY_BLOCK_PTR)
+ continue;
+
+ domsb = get_immediate_dominator (CDI_DOMINATORS, b);
+ while (runner != domsb)
+ {
+ if (bitmap_bit_p (frontiers[runner->index], b->index))
+ break;
+ bitmap_set_bit (frontiers[runner->index],
+ b->index);
+ runner = get_immediate_dominator (CDI_DOMINATORS,
+ runner);
+ }
+ }
+ }
+ }
+}
+
+
+void
+compute_dominance_frontiers (bitmap *frontiers)
+{
+ timevar_push (TV_DOM_FRONTIERS);
+
+ compute_dominance_frontiers_1 (frontiers);
+
+ timevar_pop (TV_DOM_FRONTIERS);
+}
+
+/* Given a set of blocks with variable definitions (DEF_BLOCKS),
+ return a bitmap with all the blocks in the iterated dominance
+ frontier of the blocks in DEF_BLOCKS. DFS contains dominance
+ frontier information as returned by compute_dominance_frontiers.
+
+ The resulting set of blocks are the potential sites where PHI nodes
+ are needed. The caller is responsible for freeing the memory
+ allocated for the return value. */
+
+bitmap
+compute_idf (bitmap def_blocks, bitmap *dfs)
+{
+ bitmap_iterator bi;
+ unsigned bb_index, i;
+ VEC(int,heap) *work_stack;
+ bitmap phi_insertion_points;
+
+ work_stack = VEC_alloc (int, heap, n_basic_blocks);
+ phi_insertion_points = BITMAP_ALLOC (NULL);
+
+ /* Seed the work list with all the blocks in DEF_BLOCKS. We use
+ VEC_quick_push here for speed. This is safe because we know that
+ the number of definition blocks is no greater than the number of
+ basic blocks, which is the initial capacity of WORK_STACK. */
+ EXECUTE_IF_SET_IN_BITMAP (def_blocks, 0, bb_index, bi)
+ VEC_quick_push (int, work_stack, bb_index);
+
+ /* Pop a block off the worklist, add every block that appears in
+ the original block's DF that we have not already processed to
+ the worklist. Iterate until the worklist is empty. Blocks
+ which are added to the worklist are potential sites for
+ PHI nodes. */
+ while (VEC_length (int, work_stack) > 0)
+ {
+ bb_index = VEC_pop (int, work_stack);
+
+ /* Since the registration of NEW -> OLD name mappings is done
+ separately from the call to update_ssa, when updating the SSA
+ form, the basic blocks where new and/or old names are defined
+ may have disappeared by CFG cleanup calls. In this case,
+ we may pull a non-existing block from the work stack. */
+ gcc_assert (bb_index < (unsigned) last_basic_block);
+
+ EXECUTE_IF_AND_COMPL_IN_BITMAP (dfs[bb_index], phi_insertion_points,
+ 0, i, bi)
+ {
+ /* Use a safe push because if there is a definition of VAR
+ in every basic block, then WORK_STACK may eventually have
+ more than N_BASIC_BLOCK entries. */
+ VEC_safe_push (int, heap, work_stack, i);
+ bitmap_set_bit (phi_insertion_points, i);
+ }
+ }
+
+ VEC_free (int, heap, work_stack);
+
+ return phi_insertion_points;
+}
+
+