#define EPILOGUE_USES(REGNO) 0
#endif
-/* The contents of the current function definition are allocated
- in this obstack, and all are freed at the end of the function.
- For top-level functions, this is temporary_obstack.
- Separate obstacks are made for nested functions. */
+/* The obstack on which the flow graph components are allocated. */
-extern struct obstack *function_obstack;
+struct obstack flow_obstack;
+static char *flow_firstobj;
/* Number of basic blocks in the current function. */
static void invalidate_mems_from_autoinc PARAMS ((struct propagate_block_info *,
rtx));
+static void invalidate_mems_from_set PARAMS ((struct propagate_block_info *,
+ rtx));
static void remove_fake_successors PARAMS ((basic_block));
static void flow_nodes_print PARAMS ((const char *, const sbitmap,
FILE *));
static void flow_loops_tree_build PARAMS ((struct loops *));
static int flow_loop_level_compute PARAMS ((struct loop *, int));
static int flow_loops_level_compute PARAMS ((struct loops *));
+static void allocate_bb_life_data PARAMS ((void));
\f
/* Find basic blocks of the current function.
F is the first insn of the function and NREGS the number of register
the same lifetime by allocating it off the function obstack
rather than using malloc. */
- bb = (basic_block) obstack_alloc (function_obstack, sizeof (*bb));
+ bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*bb));
memset (bb, 0, sizeof (*bb));
if (GET_CODE (head) == CODE_LABEL)
return 0;
/* Create the new structures. */
- new_bb = (basic_block) obstack_alloc (function_obstack, sizeof (*new_bb));
+ new_bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*new_bb));
new_edge = (edge) xcalloc (1, sizeof (*new_edge));
n_edges++;
if (bb->global_live_at_start)
{
- new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (function_obstack);
- new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (function_obstack);
+ new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
COPY_REG_SET (new_bb->global_live_at_end, bb->global_live_at_end);
/* We now have to calculate which registers are live at the end
}
/* Create the new structures. */
- bb = (basic_block) obstack_alloc (function_obstack, sizeof (*bb));
+ bb = (basic_block) obstack_alloc (&flow_obstack, sizeof (*bb));
edge_out = (edge) xcalloc (1, sizeof (*edge_out));
n_edges++;
/* ??? This info is likely going to be out of date very soon. */
if (old_succ->global_live_at_start)
{
- bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (function_obstack);
- bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (function_obstack);
+ bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
COPY_REG_SET (bb->global_live_at_start, old_succ->global_live_at_start);
COPY_REG_SET (bb->global_live_at_end, old_succ->global_live_at_start);
}
if (bb->local_set == NULL)
{
- bb->local_set = OBSTACK_ALLOC_REG_SET (function_obstack);
+ bb->local_set = OBSTACK_ALLOC_REG_SET (&flow_obstack);
rescan = 1;
}
else
/* Allocate the permanent data structures that represent the results
of life analysis. Not static since used also for stupid life analysis. */
-void
+static void
allocate_bb_life_data ()
{
register int i;
{
basic_block bb = BASIC_BLOCK (i);
- bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (function_obstack);
- bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (function_obstack);
+ bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
}
ENTRY_BLOCK_PTR->global_live_at_end
- = OBSTACK_ALLOC_REG_SET (function_obstack);
+ = OBSTACK_ALLOC_REG_SET (&flow_obstack);
EXIT_BLOCK_PTR->global_live_at_start
- = OBSTACK_ALLOC_REG_SET (function_obstack);
+ = OBSTACK_ALLOC_REG_SET (&flow_obstack);
- regs_live_at_setjmp = OBSTACK_ALLOC_REG_SET (function_obstack);
+ regs_live_at_setjmp = OBSTACK_ALLOC_REG_SET (&flow_obstack);
}
void
|| (GET_CODE (XEXP (mem, 0)) == PLUS
&& XEXP (XEXP (mem, 0), 0) == frame_pointer_rtx
&& GET_CODE (XEXP (XEXP (mem, 0), 1)) == CONST_INT))
- pbi->mem_set_list = alloc_EXPR_LIST (0, mem, pbi->mem_set_list);
+ {
+#ifdef AUTO_INC_DEC
+ /* Store a copy of mem, otherwise the address may be scrogged
+ by find_auto_inc. This matters because insn_dead_p uses
+ an rtx_equal_p check to determine if two addresses are
+ the same. This works before find_auto_inc, but fails
+ after find_auto_inc, causing discrepencies between the
+ set of live registers calculated during the
+ calculate_global_regs_live phase and what actually exists
+ after flow completes, leading to aborts. */
+ if (flags & PROP_AUTOINC)
+ mem = shallow_copy_rtx (mem);
+#endif
+ pbi->mem_set_list = alloc_EXPR_LIST (0, mem, pbi->mem_set_list);
+ }
}
}
}
}
+/* EXP is either a MEM or a REG. Remove any dependant entries
+ from pbi->mem_set_list. */
+
+static void
+invalidate_mems_from_set (pbi, exp)
+ struct propagate_block_info *pbi;
+ rtx exp;
+{
+ rtx temp = pbi->mem_set_list;
+ rtx prev = NULL_RTX;
+ rtx next;
+
+ while (temp)
+ {
+ next = XEXP (temp, 1);
+ if ((GET_CODE (exp) == MEM
+ && output_dependence (XEXP (temp, 0), exp))
+ || (GET_CODE (exp) == REG
+ && reg_overlap_mentioned_p (exp, XEXP (temp, 0))))
+ {
+ /* Splice this entry out of the list. */
+ if (prev)
+ XEXP (prev, 1) = next;
+ else
+ pbi->mem_set_list = next;
+ free_EXPR_LIST_node (temp);
+ }
+ else
+ prev = temp;
+ temp = next;
+ }
+}
+
/* Process the registers that are set within X. Their bits are set to
1 in the regset DEAD, because they are dead prior to this insn.
if (optimize && (flags & PROP_SCAN_DEAD_CODE))
{
if (GET_CODE (reg) == MEM || GET_CODE (reg) == REG)
- {
- rtx temp = pbi->mem_set_list;
- rtx prev = NULL_RTX;
- rtx next;
-
- while (temp)
- {
- next = XEXP (temp, 1);
- if ((GET_CODE (reg) == MEM
- && output_dependence (XEXP (temp, 0), reg))
- || (GET_CODE (reg) == REG
- && reg_overlap_mentioned_p (reg, XEXP (temp, 0))))
- {
- /* Splice this entry out of the list. */
- if (prev)
- XEXP (prev, 1) = next;
- else
- pbi->mem_set_list = next;
- free_EXPR_LIST_node (temp);
- }
- else
- prev = temp;
- temp = next;
- }
- }
+ invalidate_mems_from_set (pbi, reg);
/* If the memory reference had embedded side effects (autoincrement
address modes. Then we may need to kill some entries on the
everything that invalidates it. To be safe, don't eliminate any
stores though SP; none of them should be redundant anyway. */
&& ! reg_mentioned_p (stack_pointer_rtx, reg))
- pbi->mem_set_list = alloc_EXPR_LIST (0, reg, pbi->mem_set_list);
+ {
+#ifdef AUTO_INC_DEC
+ /* Store a copy of mem, otherwise the address may be
+ scrogged by find_auto_inc. */
+ if (flags & PROP_AUTOINC)
+ reg = shallow_copy_rtx (reg);
+#endif
+ pbi->mem_set_list = alloc_EXPR_LIST (0, reg, pbi->mem_set_list);
+ }
}
if (GET_CODE (reg) == REG
&& ! dead_or_set_p (y, SET_DEST (x))
&& try_pre_increment (y, SET_DEST (x), amount))
{
- /* We have found a suitable auto-increment
- and already changed insn Y to do it.
- So flush this increment-instruction. */
- PUT_CODE (insn, NOTE);
- NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
- NOTE_SOURCE_FILE (insn) = 0;
- /* Count a reference to this reg for the increment
- insn we are deleting. When a reg is incremented.
- spilling it is worse, so we want to make that
- less likely. */
+ /* We have found a suitable auto-increment and already changed
+ insn Y to do it. So flush this increment instruction. */
+ propagate_block_delete_insn (pbi->bb, insn);
+
+ /* Count a reference to this reg for the increment insn we are
+ deleting. When a reg is incremented, spilling it is worse,
+ so we want to make that less likely. */
if (regno >= FIRST_PSEUDO_REGISTER)
{
REG_N_REFS (regno) += (optimize_size ? 1
: pbi->bb->loop_depth + 1);
REG_N_SETS (regno)++;
}
+
+ /* Flush any remembered memories depending on the value of
+ the incremented register. */
+ invalidate_mems_from_set (pbi, SET_DEST (x));
+
return 1;
}
return 0;
}
}
-/* Compute dominator relationships using new flow graph structures. */
-
-void
-compute_flow_dominators (dominators, post_dominators)
- sbitmap *dominators;
- sbitmap *post_dominators;
-{
- int bb;
- sbitmap *temp_bitmap;
- edge e;
- basic_block *worklist, *workend, *qin, *qout;
- int qlen;
-
- /* Allocate a worklist array/queue. Entries are only added to the
- list if they were not already on the list. So the size is
- bounded by the number of basic blocks. */
- worklist = (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks);
- workend = &worklist[n_basic_blocks];
-
- temp_bitmap = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
- sbitmap_vector_zero (temp_bitmap, n_basic_blocks);
-
- if (dominators)
- {
- /* The optimistic setting of dominators requires us to put every
- block on the work list initially. */
- qin = qout = worklist;
- for (bb = 0; bb < n_basic_blocks; bb++)
- {
- *qin++ = BASIC_BLOCK (bb);
- BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb);
- }
- qlen = n_basic_blocks;
- qin = worklist;
-
- /* We want a maximal solution, so initially assume everything dominates
- everything else. */
- sbitmap_vector_ones (dominators, n_basic_blocks);
-
- /* Mark successors of the entry block so we can identify them below. */
- for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
- e->dest->aux = ENTRY_BLOCK_PTR;
-
- /* Iterate until the worklist is empty. */
- while (qlen)
- {
- /* Take the first entry off the worklist. */
- basic_block b = *qout++;
- if (qout >= workend)
- qout = worklist;
- qlen--;
-
- bb = b->index;
-
- /* Compute the intersection of the dominators of all the
- predecessor blocks.
-
- If one of the predecessor blocks is the ENTRY block, then the
- intersection of the dominators of the predecessor blocks is
- defined as the null set. We can identify such blocks by the
- special value in the AUX field in the block structure. */
- if (b->aux == ENTRY_BLOCK_PTR)
- {
- /* Do not clear the aux field for blocks which are
- successors of the ENTRY block. That way we never add
- them to the worklist again.
-
- The intersect of dominators of the preds of this block is
- defined as the null set. */
- sbitmap_zero (temp_bitmap[bb]);
- }
- else
- {
- /* Clear the aux field of this block so it can be added to
- the worklist again if necessary. */
- b->aux = NULL;
- sbitmap_intersection_of_preds (temp_bitmap[bb], dominators, bb);
- }
-
- /* Make sure each block always dominates itself. */
- SET_BIT (temp_bitmap[bb], bb);
-
- /* If the out state of this block changed, then we need to
- add the successors of this block to the worklist if they
- are not already on the worklist. */
- if (sbitmap_a_and_b (dominators[bb], dominators[bb], temp_bitmap[bb]))
- {
- for (e = b->succ; e; e = e->succ_next)
- {
- if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR)
- {
- *qin++ = e->dest;
- if (qin >= workend)
- qin = worklist;
- qlen++;
-
- e->dest->aux = e;
- }
- }
- }
- }
- }
-
- if (post_dominators)
- {
- /* The optimistic setting of dominators requires us to put every
- block on the work list initially. */
- qin = qout = worklist;
- for (bb = 0; bb < n_basic_blocks; bb++)
- {
- *qin++ = BASIC_BLOCK (bb);
- BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb);
- }
- qlen = n_basic_blocks;
- qin = worklist;
-
- /* We want a maximal solution, so initially assume everything post
- dominates everything else. */
- sbitmap_vector_ones (post_dominators, n_basic_blocks);
-
- /* Mark predecessors of the exit block so we can identify them below. */
- for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
- e->src->aux = EXIT_BLOCK_PTR;
-
- /* Iterate until the worklist is empty. */
- while (qlen)
- {
- /* Take the first entry off the worklist. */
- basic_block b = *qout++;
- if (qout >= workend)
- qout = worklist;
- qlen--;
-
- bb = b->index;
-
- /* Compute the intersection of the post dominators of all the
- successor blocks.
-
- If one of the successor blocks is the EXIT block, then the
- intersection of the dominators of the successor blocks is
- defined as the null set. We can identify such blocks by the
- special value in the AUX field in the block structure. */
- if (b->aux == EXIT_BLOCK_PTR)
- {
- /* Do not clear the aux field for blocks which are
- predecessors of the EXIT block. That way we we never
- add them to the worklist again.
-
- The intersect of dominators of the succs of this block is
- defined as the null set. */
- sbitmap_zero (temp_bitmap[bb]);
- }
- else
- {
- /* Clear the aux field of this block so it can be added to
- the worklist again if necessary. */
- b->aux = NULL;
- sbitmap_intersection_of_succs (temp_bitmap[bb],
- post_dominators, bb);
- }
-
- /* Make sure each block always post dominates itself. */
- SET_BIT (temp_bitmap[bb], bb);
-
- /* If the out state of this block changed, then we need to
- add the successors of this block to the worklist if they
- are not already on the worklist. */
- if (sbitmap_a_and_b (post_dominators[bb],
- post_dominators[bb],
- temp_bitmap[bb]))
- {
- for (e = b->pred; e; e = e->pred_next)
- {
- if (!e->src->aux && e->src != ENTRY_BLOCK_PTR)
- {
- *qin++ = e->src;
- if (qin >= workend)
- qin = worklist;
- qlen++;
-
- e->src->aux = e;
- }
- }
- }
- }
- }
-
- free (worklist);
- free (temp_bitmap);
-}
-
-/* Given DOMINATORS, compute the immediate dominators into IDOM. If a
- block dominates only itself, its entry remains as INVALID_BLOCK. */
-
-void
-compute_immediate_dominators (idom, dominators)
- int *idom;
- sbitmap *dominators;
-{
- sbitmap *tmp;
- int b;
-
- tmp = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
-
- /* Begin with tmp(n) = dom(n) - { n }. */
- for (b = n_basic_blocks; --b >= 0;)
- {
- sbitmap_copy (tmp[b], dominators[b]);
- RESET_BIT (tmp[b], b);
- }
-
- /* Subtract out all of our dominator's dominators. */
- for (b = n_basic_blocks; --b >= 0;)
- {
- sbitmap tmp_b = tmp[b];
- int s;
-
- for (s = n_basic_blocks; --s >= 0;)
- if (TEST_BIT (tmp_b, s))
- sbitmap_difference (tmp_b, tmp_b, tmp[s]);
- }
-
- /* Find the one bit set in the bitmap and put it in the output array. */
- for (b = n_basic_blocks; --b >= 0;)
- {
- int t;
- EXECUTE_IF_SET_IN_SBITMAP (tmp[b], 0, t, { idom[b] = t; });
- }
-
- sbitmap_vector_free (tmp);
-}
-
-/* Given POSTDOMINATORS, compute the immediate postdominators into
- IDOM. If a block is only dominated by itself, its entry remains as
- INVALID_BLOCK. */
-
-void
-compute_immediate_postdominators (idom, postdominators)
- int *idom;
- sbitmap *postdominators;
-{
- compute_immediate_dominators (idom, postdominators);
-}
-
/* Recompute register set/reference counts immediately prior to register
allocation.
loop->depth, loop->level,
(long) (loop->outer ? loop->outer->num : -1));
- if (loop->pre_header_root)
- fprintf (file, ";; pre-header root %d\n",
- loop->pre_header_root->index);
- if (loop->pre_header_trace)
- flow_nodes_print (";; pre-header trace", loop->pre_header_trace,
- file);
+ if (loop->pre_header_edges)
+ flow_edge_list_print (";; pre-header edges", loop->pre_header_edges,
+ loop->num_pre_header_edges, file);
flow_edge_list_print (";; entry edges", loop->entry_edges,
loop->num_entries, file);
fprintf (file, ";; %d", loop->num_nodes);
{
struct loop *loop = &loops->array[i];
- if (loop->pre_header_trace)
- sbitmap_free (loop->pre_header_trace);
+ if (loop->pre_header_edges)
+ free (loop->pre_header_edges);
if (loop->nodes)
sbitmap_free (loop->nodes);
if (loop->entry_edges)
/* Find the root node of the loop pre-header extended basic block and
- the blocks along the trace from the root node to the loop header. */
+ the edges along the trace from the root node to the loop header. */
static void
flow_loop_pre_header_scan (loop)
struct loop *loop;
{
+ int num = 0;
basic_block ebb;
+ loop->num_pre_header_edges = 0;
+
if (loop->num_entries != 1)
return;
- /* Find pre_header root note and trace from root node to pre_header. */
- loop->pre_header_trace = sbitmap_alloc (n_basic_blocks);
- sbitmap_zero (loop->pre_header_trace);
-
ebb = loop->entry_edges[0]->src;
if (ebb != ENTRY_BLOCK_PTR)
{
- SET_BIT (loop->pre_header_trace, ebb->index);
- while (ebb->pred->src != ENTRY_BLOCK_PTR
- && ! ebb->pred->pred_next)
+ edge e;
+
+ /* Count number of edges along trace from loop header to
+ root of pre-header extended basic block. Usually this is
+ only one or two edges. */
+ num++;
+ while (ebb->pred->src != ENTRY_BLOCK_PTR && ! ebb->pred->pred_next)
{
ebb = ebb->pred->src;
- SET_BIT (loop->pre_header_trace, ebb->index);
+ num++;
+ }
+
+ loop->pre_header_edges = (edge *) xmalloc (num * sizeof (edge *));
+ loop->num_pre_header_edges = num;
+
+ /* Store edges in order that they are followed. The source
+ of the first edge is the root node of the pre-header extended
+ basic block and the destination of the last last edge is
+ the loop header. */
+ for (e = loop->entry_edges[0]; num; e = e->src->pred)
+ {
+ loop->pre_header_edges[--num] = e;
}
}
-
- loop->pre_header_root = ebb;
}
/* Compute the dominators. */
dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
- compute_flow_dominators (dom, NULL);
+ calculate_dominance_info (NULL, dom, CDI_DOMINATORS);
/* Count the number of loop edges (back edges). This should be the
same as the number of natural loops. */
SET_HARD_REG_BIT (*to, i);
});
}
+
+/* Called once at intialization time. */
+
+void
+init_flow ()
+{
+ static int initialized;
+
+ if (!initialized)
+ {
+ gcc_obstack_init (&flow_obstack);
+ flow_firstobj = (char *) obstack_alloc (&flow_obstack, 0);
+ initialized = 1;
+ }
+ else
+ {
+ obstack_free (&flow_obstack, flow_firstobj);
+ flow_firstobj = (char *) obstack_alloc (&flow_obstack, 0);
+ }
+}