/* Control flow graph manipulation code for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of GCC.
rtx insn, end, tmp;
/* If the head of this block is a CODE_LABEL, then it might be the
- label for an exception handler which can't be reached.
-
- We need to remove the label from the exception_handler_label list
- and remove the associated NOTE_INSN_EH_REGION_BEG and
- NOTE_INSN_EH_REGION_END notes. */
-
+ label for an exception handler which can't be reached. We need
+ to remove the label from the exception_handler_label list. */
insn = BB_HEAD (b);
-
if (LABEL_P (insn))
maybe_remove_eh_handler (insn);
if (tablejump_p (end, NULL, &tmp))
end = tmp;
- /* Include any barrier that may follow the basic block. */
+ /* Include any barriers that may follow the basic block. */
tmp = next_nonnote_insn (end);
- if (tmp && BARRIER_P (tmp))
- end = tmp;
+ while (tmp && BARRIER_P (tmp))
+ {
+ end = tmp;
+ tmp = next_nonnote_insn (end);
+ }
/* Selectively delete the entire chain. */
BB_HEAD (b) = NULL;
if (bb->global_live_at_start)
{
- new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
- new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ new_bb->global_live_at_start = ALLOC_REG_SET (®_obstack);
+ new_bb->global_live_at_end = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (new_bb->global_live_at_end, bb->global_live_at_end);
/* We now have to calculate which registers are live at the end
return false;
/* There must be exactly one edge in between the blocks. */
- return (EDGE_COUNT (a->succs) == 1
- && EDGE_SUCC (a, 0)->dest == b
- && EDGE_COUNT (b->preds) == 1
+ return (single_succ_p (a)
+ && single_succ (a) == b
+ && single_pred_p (b)
&& a != b
/* Must be simple edge. */
- && !(EDGE_SUCC (a, 0)->flags & EDGE_COMPLEX)
+ && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
&& a->next_bb == b
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
{
basic_block src = e->src;
rtx insn = BB_END (src), kill_from;
- edge tmp;
rtx set;
int fallthru = 0;
- edge_iterator ei;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
|| BB_PARTITION (src) != BB_PARTITION (target)))
return NULL;
- /* Verify that all targets will be TARGET. */
- FOR_EACH_EDGE (tmp, ei, src->succs)
- if (tmp->dest != target && tmp != e)
- break;
+ /* We can replace or remove a complex jump only when we have exactly
+ two edges. Also, if we have exactly one outgoing edge, we can
+ redirect that. */
+ if (EDGE_COUNT (src->succs) >= 3
+ /* Verify that all targets will be TARGET. Specifically, the
+ edge that is not E must also go to TARGET. */
+ || (EDGE_COUNT (src->succs) == 2
+ && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
+ return NULL;
- if (tmp || !onlyjump_p (insn))
+ if (!onlyjump_p (insn))
return NULL;
if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
return NULL;
}
/* Keep only one edge out and set proper flags. */
- while (EDGE_COUNT (src->succs) > 1)
+ if (!single_succ_p (src))
remove_edge (e);
+ gcc_assert (single_succ_p (src));
- e = EDGE_SUCC (src, 0);
+ e = single_succ_edge (src);
if (fallthru)
e->flags = EDGE_FALLTHRU;
else
if (target->global_live_at_start)
{
- jump_block->global_live_at_start
- = OBSTACK_ALLOC_REG_SET (&flow_obstack);
- jump_block->global_live_at_end
- = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ jump_block->global_live_at_start = ALLOC_REG_SET (®_obstack);
+ jump_block->global_live_at_end = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (jump_block->global_live_at_start,
target->global_live_at_start);
COPY_REG_SET (jump_block->global_live_at_end,
}
if (JUMP_P (BB_END (jump_block))
&& !any_condjump_p (BB_END (jump_block))
- && (EDGE_SUCC (jump_block, 0)->flags & EDGE_CROSSING))
+ && (single_succ_edge (jump_block)->flags & EDGE_CROSSING))
REG_NOTES (BB_END (jump_block)) = gen_rtx_EXPR_LIST
(REG_CROSSING_JUMP, NULL_RTX,
REG_NOTES (BB_END (jump_block)));
{
rtx q;
basic_block b = e->src, c = b->next_bb;
- edge e2;
- edge_iterator ei;
-
- FOR_EACH_EDGE (e2, ei, b->succs)
- if (e == e2)
- break;
/* ??? In a late-running flow pass, other folks may have deleted basic
blocks by nopping out blocks, leaving multiple BARRIERs between here
if (JUMP_P (q)
&& onlyjump_p (q)
&& (any_uncondjump_p (q)
- || (EDGE_SUCC (b, 0) == e && ei.index == EDGE_COUNT (b->succs) - 1)))
+ || single_succ_p (b)))
{
#ifdef HAVE_cc0
/* If this was a conditional jump, we need to also delete
/* ??? This info is likely going to be out of date very soon. */
if (edge_in->dest->global_live_at_start)
{
- bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
- bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ bb->global_live_at_start = ALLOC_REG_SET (®_obstack);
+ bb->global_live_at_end = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (bb->global_live_at_start,
edge_in->dest->global_live_at_start);
COPY_REG_SET (bb->global_live_at_end,
safe_insert_insn_on_edge (rtx insn, edge e)
{
rtx x;
- regset_head killed_head;
- regset killed = INITIALIZE_REG_SET (killed_head);
+ regset killed;
rtx save_regs = NULL_RTX;
unsigned regno;
int noccmode;
noccmode = false;
#endif
+ killed = ALLOC_REG_SET (®_obstack);
+
for (x = insn; x; x = NEXT_INSN (x))
if (INSN_P (x))
note_stores (PATTERN (x), mark_killed_regs, killed);
+
+ /* Mark all hard registers as killed. Register allocator/reload cannot
+ cope with the situation when life range of hard register spans operation
+ for that the appropriate register is needed, i.e. it would be unsafe to
+ extend the life ranges of hard registers. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (!fixed_regs[regno]
+ && !REGNO_PTR_FRAME_P (regno))
+ SET_REGNO_REG_SET (killed, regno);
+
bitmap_and_into (killed, e->dest->global_live_at_start);
EXECUTE_IF_SET_IN_REG_SET (killed, 0, regno, rsi)
insert_insn_on_edge (insn, e);
FREE_REG_SET (killed);
+
return true;
}
/* Special case -- avoid inserting code between call and storing
its return value. */
if (watch_calls && (e->flags & EDGE_FALLTHRU)
- && EDGE_COUNT (e->dest->preds) == 1
+ && single_pred_p (e->dest)
&& e->src != ENTRY_BLOCK_PTR
&& CALL_P (BB_END (e->src)))
{
{
/* Figure out where to put these things. If the destination has
one predecessor, insert there. Except for the exit block. */
- if (EDGE_COUNT (e->dest->preds) == 1 && e->dest != EXIT_BLOCK_PTR)
+ if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR)
{
bb = e->dest;
/* If the source has one successor and the edge is not abnormal,
insert there. Except for the entry block. */
else if ((e->flags & EDGE_ABNORMAL) == 0
- && EDGE_COUNT (e->src->succs) == 1
+ && single_succ_p (e->src)
&& e->src != ENTRY_BLOCK_PTR)
{
bb = e->src;
NOTE_BASIC_BLOCK (new_note) = bb;
if (JUMP_P (BB_END (bb))
&& !any_condjump_p (BB_END (bb))
- && (EDGE_SUCC (bb, 0)->flags & EDGE_CROSSING))
+ && (single_succ_edge (bb)->flags & EDGE_CROSSING))
REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST
(REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb)));
if (after == bb_note)
for the (single) epilogue, which already has a fallthru edge
to EXIT. */
- e = EDGE_SUCC (bb, 0);
+ e = single_succ_edge (bb);
gcc_assert (e->dest == EXIT_BLOCK_PTR
- && EDGE_COUNT (bb->succs) == 1 && (e->flags & EDGE_FALLTHRU));
+ && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
e->flags &= ~EDGE_FALLTHRU;
emit_barrier_after (last);
else
gcc_assert (!JUMP_P (last));
- /* Mark the basic block for find_sub_basic_blocks. */
+ /* Mark the basic block for find_many_sub_basic_blocks. */
bb->aux = &bb->aux;
}
basic_block *bb_info;
rtx x;
int err = 0;
- basic_block bb, last_bb_seen;
+ basic_block bb;
bb_info = xcalloc (max_uid, sizeof (basic_block));
- /* Check bb chain & numbers. */
- last_bb_seen = ENTRY_BLOCK_PTR;
-
FOR_EACH_BB_REVERSE (bb)
{
rtx head = BB_HEAD (bb);
rtx note;
edge_iterator ei;
- if (INSN_P (BB_END (bb))
+ if (JUMP_P (BB_END (bb))
&& (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
&& EDGE_COUNT (bb->succs) >= 2
&& any_condjump_p (BB_END (bb)))
else
for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
insn = NEXT_INSN (insn))
- if (BARRIER_P (insn)
-#ifndef CASE_DROPS_THROUGH
- || INSN_P (insn)
-#else
- || (INSN_P (insn) && ! JUMP_TABLE_DATA_P (insn))
-#endif
- )
+ if (BARRIER_P (insn) || INSN_P (insn))
{
error ("verify_flow_info: Incorrect fallthru %i->%i",
e->src->index, e->dest->index);
return purged;
/* Redistribute probabilities. */
- if (EDGE_COUNT (bb->succs) == 1)
+ if (single_succ_p (bb))
{
- EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE;
- EDGE_SUCC (bb, 0)->count = bb->count;
+ single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
+ single_succ_edge (bb)->count = bb->count;
}
else
{
from non-local gotos and the like. If there were, we shouldn't
have created the sibcall in the first place. Second, there
should of course never have been a fallthru edge. */
- gcc_assert (EDGE_COUNT (bb->succs) == 1);
- gcc_assert (EDGE_SUCC (bb, 0)->flags == (EDGE_SIBCALL | EDGE_ABNORMAL));
+ gcc_assert (single_succ_p (bb));
+ gcc_assert (single_succ_edge (bb)->flags
+ == (EDGE_SIBCALL | EDGE_ABNORMAL));
return 0;
}
ei_next (&ei);
}
- gcc_assert (EDGE_COUNT (bb->succs) == 1);
+ gcc_assert (single_succ_p (bb));
- EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE;
- EDGE_SUCC (bb, 0)->count = bb->count;
+ single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
+ single_succ_edge (bb)->count = bb->count;
if (dump_file)
fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
of conditional jump, remove it. */
if (EDGE_COUNT (src->succs) == 2)
{
- bool found = false;
- unsigned ix = 0;
- edge tmp, s;
- edge_iterator ei;
-
- FOR_EACH_EDGE (tmp, ei, src->succs)
- if (e == tmp)
- {
- found = true;
- ix = ei.index;
- break;
- }
-
- gcc_assert (found);
-
- if (EDGE_COUNT (src->succs) > (ix + 1))
- s = EDGE_SUCC (src, ix + 1);
- else
- s = EDGE_SUCC (src, 0);
+ /* Find the edge that is different from E. */
+ edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
if (s->dest == dest
&& any_condjump_p (BB_END (src))
return false;
/* There must be exactly one edge in between the blocks. */
- return (EDGE_COUNT (a->succs) == 1
- && EDGE_SUCC (a, 0)->dest == b
- && EDGE_COUNT (b->preds) == 1
+ return (single_succ_p (a)
+ && single_succ (a) == b
+ && single_pred_p (b) == 1
&& a != b
/* Must be simple edge. */
- && !(EDGE_SUCC (a, 0)->flags & EDGE_COMPLEX)
+ && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
we can't kill the edge. */
static basic_block
cfg_layout_split_edge (edge e)
{
- edge new_e;
basic_block new_bb =
create_basic_block (e->src != ENTRY_BLOCK_PTR
? NEXT_INSN (BB_END (e->src)) : get_insns (),
create it to avoid getting an ICE later. */
if (e->dest->global_live_at_start)
{
- new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
- new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ new_bb->global_live_at_start = ALLOC_REG_SET (®_obstack);
+ new_bb->global_live_at_end = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (new_bb->global_live_at_start,
e->dest->global_live_at_start);
COPY_REG_SET (new_bb->global_live_at_end,
e->dest->global_live_at_start);
}
- new_e = make_edge (new_bb, e->dest, EDGE_FALLTHRU);
+ make_edge (new_bb, e->dest, EDGE_FALLTHRU);
redirect_edge_and_branch_force (e, new_bb);
return new_bb;
if ((CALL_P (insn)
&& !SIBLING_CALL_P (insn)
&& !find_reg_note (insn, REG_NORETURN, NULL)
- && !find_reg_note (insn, REG_ALWAYS_RETURN, NULL)
&& !CONST_OR_PURE_CALL_P (insn)))
return true;
if (need_fake_edge_p (insn))
{
edge e;
- edge_iterator ei;
- FOR_EACH_EDGE (e, ei, bb->succs)
- if (e->dest == EXIT_BLOCK_PTR)
- {
- insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e);
- commit_edge_insertions ();
- break;
- }
+ e = find_edge (bb, EXIT_BLOCK_PTR);
+ if (e)
+ {
+ insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e);
+ commit_edge_insertions ();
+ }
}
}
#ifdef ENABLE_CHECKING
if (split_at_insn == BB_END (bb))
{
- edge_iterator ei;
- FOR_EACH_EDGE (e, ei, bb->succs)
- gcc_assert (e->dest != EXIT_BLOCK_PTR);
+ e = find_edge (bb, EXIT_BLOCK_PTR);
+ gcc_assert (e == NULL);
}
#endif
rtl_tidy_fallthru_edge,
rtl_block_ends_with_call_p,
rtl_block_ends_with_condjump_p,
- rtl_flow_call_edges_add
+ rtl_flow_call_edges_add,
+ NULL, /* execute_on_growing_pred */
+ NULL /* execute_on_shrinking_pred */
};
/* Implementation of CFG manipulation for cfg layout RTL, where
NULL,
rtl_block_ends_with_call_p,
rtl_block_ends_with_condjump_p,
- rtl_flow_call_edges_add
+ rtl_flow_call_edges_add,
+ NULL, /* execute_on_growing_pred */
+ NULL /* execute_on_shrinking_pred */
};