/* Control flow graph manipulation code for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of GCC.
#include "cfglayout.h"
#include "expr.h"
#include "target.h"
-
-
-/* The labels mentioned in non-jump rtl. Valid during find_basic_blocks. */
-/* ??? Should probably be using LABEL_NUSES instead. It would take a
- bit of surgery to be able to use or co-opt the routines in jump. */
-rtx label_value_list;
+#include "cfgloop.h"
static int can_delete_note_p (rtx);
static int can_delete_label_p (rtx);
static void commit_one_edge_insertion (edge, int);
static rtx last_loop_beg_note (rtx);
static bool back_edge_of_syntactic_loop_p (basic_block, basic_block);
-basic_block force_nonfallthru_and_redirect (edge, basic_block);
static basic_block rtl_split_edge (edge);
static bool rtl_move_block_after (basic_block, basic_block);
static int rtl_verify_flow_info (void);
can_delete_note_p (rtx note)
{
return (NOTE_LINE_NUMBER (note) == NOTE_INSN_DELETED
- || NOTE_LINE_NUMBER (note) == NOTE_INSN_BASIC_BLOCK
- || NOTE_LINE_NUMBER (note) == NOTE_INSN_UNLIKELY_EXECUTED_CODE);
+ || NOTE_LINE_NUMBER (note) == NOTE_INSN_BASIC_BLOCK);
}
/* True if a given label can be deleted. */
return (!LABEL_PRESERVE_P (label)
/* User declared labels must be preserved. */
&& LABEL_NAME (label) == 0
- && !in_expr_list_p (forced_labels, label)
- && !in_expr_list_p (label_value_list, label));
+ && !in_expr_list_p (forced_labels, label));
}
/* Delete INSN by patching it out. Return the next insn. */
if (really_delete)
{
/* If this insn has already been deleted, something is very wrong. */
- if (INSN_DELETED_P (insn))
- abort ();
+ gcc_assert (!INSN_DELETED_P (insn));
remove_insn (insn);
INSN_DELETED_P (insn) = 1;
}
link_block (bb, after);
BASIC_BLOCK (bb->index) = bb;
update_bb_for_insn (bb);
- bb->partition = UNPARTITIONED;
+ BB_SET_PARTITION (bb, BB_UNPARTITIONED);
/* Tag the block so that we know it has been used when considering
other basic block notes. */
rtx insn, end, tmp;
/* If the head of this block is a CODE_LABEL, then it might be the
- label for an exception handler which can't be reached.
-
- We need to remove the label from the exception_handler_label list
- and remove the associated NOTE_INSN_EH_REGION_BEG and
- NOTE_INSN_EH_REGION_END notes. */
-
- /* Get rid of all NOTE_INSN_LOOP_CONTs hanging before the block. */
-
- for (insn = PREV_INSN (BB_HEAD (b)); insn; insn = PREV_INSN (insn))
- {
- if (!NOTE_P (insn))
- break;
- if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
- NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
- }
-
+ label for an exception handler which can't be reached. We need
+ to remove the label from the exception_handler_label list. */
insn = BB_HEAD (b);
-
if (LABEL_P (insn))
maybe_remove_eh_handler (insn);
if (tablejump_p (end, NULL, &tmp))
end = tmp;
- /* Include any barrier that may follow the basic block. */
+ /* Include any barriers that may follow the basic block. */
tmp = next_nonnote_insn (end);
- if (tmp && BARRIER_P (tmp))
- end = tmp;
+ while (tmp && BARRIER_P (tmp))
+ {
+ end = tmp;
+ tmp = next_nonnote_insn (end);
+ }
/* Selectively delete the entire chain. */
BB_HEAD (b) = NULL;
basic_block new_bb;
rtx insn = insnp;
edge e;
+ edge_iterator ei;
if (!insn)
{
/* Create the new basic block. */
new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
- new_bb->partition = bb->partition;
+ BB_COPY_PARTITION (new_bb, bb);
BB_END (bb) = insn;
/* Redirect the outgoing edges. */
- new_bb->succ = bb->succ;
- bb->succ = NULL;
- for (e = new_bb->succ; e; e = e->succ_next)
+ new_bb->succs = bb->succs;
+ bb->succs = NULL;
+ FOR_EACH_EDGE (e, ei, new_bb->succs)
e->src = new_bb;
if (bb->global_live_at_start)
{
- new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
- new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ new_bb->global_live_at_start = ALLOC_REG_SET (®_obstack);
+ new_bb->global_live_at_end = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (new_bb->global_live_at_end, bb->global_live_at_end);
/* We now have to calculate which registers are live at the end
static bool
rtl_can_merge_blocks (basic_block a,basic_block b)
{
- bool partitions_ok = true;
-
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
- and cold sections. */
-
- if (flag_reorder_blocks_and_partition
- && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
- || find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
- || a->partition != b->partition))
- partitions_ok = false;
+ and cold sections.
+
+ Basic block partitioning may result in some jumps that appear to
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
+ bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
+
+ if (BB_PARTITION (a) != BB_PARTITION (b))
+ return false;
/* There must be exactly one edge in between the blocks. */
- return (a->succ && !a->succ->succ_next && a->succ->dest == b
- && !b->pred->pred_next && a != b
+ return (single_succ_p (a)
+ && single_succ (a) == b
+ && single_pred_p (b)
+ && a != b
/* Must be simple edge. */
- && !(a->succ->flags & EDGE_COMPLEX)
- && partitions_ok
+ && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
&& a->next_bb == b
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
{
basic_block src = e->src;
rtx insn = BB_END (src), kill_from;
- edge tmp;
rtx set;
int fallthru = 0;
-
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
- and cold sections. */
+ and cold sections.
+
+ Basic block partitioning may result in some jumps that appear to
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
+ bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
- if (flag_reorder_blocks_and_partition
- && (find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX)
- || (src->partition != target->partition)))
+ if (find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX)
+ || BB_PARTITION (src) != BB_PARTITION (target))
return NULL;
- /* Verify that all targets will be TARGET. */
- for (tmp = src->succ; tmp; tmp = tmp->succ_next)
- if (tmp->dest != target && tmp != e)
- break;
+ /* We can replace or remove a complex jump only when we have exactly
+ two edges. Also, if we have exactly one outgoing edge, we can
+ redirect that. */
+ if (EDGE_COUNT (src->succs) >= 3
+ /* Verify that all targets will be TARGET. Specifically, the
+ edge that is not E must also go to TARGET. */
+ || (EDGE_COUNT (src->succs) == 2
+ && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
+ return NULL;
- if (tmp || !onlyjump_p (insn))
+ if (!onlyjump_p (insn))
return NULL;
if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
return NULL;
INSN_UID (insn), e->dest->index, target->index);
if (!redirect_jump (insn, block_label (target), 0))
{
- if (target == EXIT_BLOCK_PTR)
- return NULL;
- abort ();
+ gcc_assert (target == EXIT_BLOCK_PTR);
+ return NULL;
}
}
rtx target_label = block_label (target);
rtx barrier, label, table;
- emit_jump_insn_after (gen_jump (target_label), insn);
+ emit_jump_insn_after_noloc (gen_jump (target_label), insn);
JUMP_LABEL (BB_END (src)) = target_label;
LABEL_NUSES (target_label)++;
if (dump_file)
}
/* Keep only one edge out and set proper flags. */
- while (src->succ->succ_next)
- remove_edge (src->succ);
- e = src->succ;
+ if (!single_succ_p (src))
+ remove_edge (e);
+ gcc_assert (single_succ_p (src));
+
+ e = single_succ_edge (src);
if (fallthru)
e->flags = EDGE_FALLTHRU;
else
&& GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF
&& XEXP (XEXP (SET_SRC (tmp), 2), 0) == old_label)
{
- XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (VOIDmode,
+ XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
new_label);
--LABEL_NUSES (old_label);
++LABEL_NUSES (new_label);
return NULL;
/* If the insn doesn't go where we think, we're confused. */
- if (JUMP_LABEL (insn) != old_label)
- abort ();
+ gcc_assert (JUMP_LABEL (insn) == old_label);
/* If the substitution doesn't succeed, die. This can happen
if the back end emitted unrecognizable instructions or if
target is exit block on some arches. */
if (!redirect_jump (insn, block_label (target), 0))
{
- if (target == EXIT_BLOCK_PTR)
- return NULL;
- abort ();
+ gcc_assert (target == EXIT_BLOCK_PTR);
+ return NULL;
}
}
/* Like force_nonfallthru below, but additionally performs redirection
Used by redirect_edge_and_branch_force. */
-basic_block
+static basic_block
force_nonfallthru_and_redirect (edge e, basic_block target)
{
basic_block jump_block, new_bb = NULL, src = e->src;
{
rtx note;
edge b = unchecked_make_edge (e->src, target, 0);
+ bool redirected;
- if (!redirect_jump (BB_END (e->src), block_label (target), 0))
- abort ();
+ redirected = redirect_jump (BB_END (e->src), block_label (target), 0);
+ gcc_assert (redirected);
+
note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
if (note)
{
We can't redirect abnormal edge, but we still can split the fallthru
one and create separate abnormal edge to original destination.
This allows bb-reorder to make such edge non-fallthru. */
- if (e->dest != target)
- abort ();
+ gcc_assert (e->dest == target);
abnormal_edge_flags = e->flags & ~(EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
e->flags &= EDGE_FALLTHRU | EDGE_CAN_FALLTHRU;
}
- else if (!(e->flags & EDGE_FALLTHRU))
- abort ();
- else if (e->src == ENTRY_BLOCK_PTR)
+ else
{
- /* We can't redirect the entry block. Create an empty block at the
- start of the function which we use to add the new jump. */
- edge *pe1;
- basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR);
-
- /* Change the existing edge's source to be the new block, and add
- a new edge from the entry block to the new block. */
- e->src = bb;
- for (pe1 = &ENTRY_BLOCK_PTR->succ; *pe1; pe1 = &(*pe1)->succ_next)
- if (*pe1 == e)
- {
- *pe1 = e->succ_next;
- break;
- }
- e->succ_next = 0;
- bb->succ = e;
- make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
+ gcc_assert (e->flags & EDGE_FALLTHRU);
+ if (e->src == ENTRY_BLOCK_PTR)
+ {
+ /* We can't redirect the entry block. Create an empty block
+ at the start of the function which we use to add the new
+ jump. */
+ edge tmp;
+ edge_iterator ei;
+ bool found = false;
+
+ basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR);
+
+ /* Change the existing edge's source to be the new block, and add
+ a new edge from the entry block to the new block. */
+ e->src = bb;
+ for (ei = ei_start (ENTRY_BLOCK_PTR->succs); (tmp = ei_safe_edge (ei)); )
+ {
+ if (tmp == e)
+ {
+ VEC_unordered_remove (edge, ENTRY_BLOCK_PTR->succs, ei.index);
+ found = true;
+ break;
+ }
+ else
+ ei_next (&ei);
+ }
+
+ gcc_assert (found);
+
+ VEC_safe_push (edge, gc, bb->succs, e);
+ make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
+ }
}
- if (e->src->succ->succ_next || abnormal_edge_flags)
+ if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags)
{
/* Create the new structures. */
if (target->global_live_at_start)
{
- jump_block->global_live_at_start
- = OBSTACK_ALLOC_REG_SET (&flow_obstack);
- jump_block->global_live_at_end
- = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ jump_block->global_live_at_start = ALLOC_REG_SET (®_obstack);
+ jump_block->global_live_at_end = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (jump_block->global_live_at_start,
target->global_live_at_start);
COPY_REG_SET (jump_block->global_live_at_end,
/* Make sure new block ends up in correct hot/cold section. */
- jump_block->partition = e->src->partition;
+ BB_COPY_PARTITION (jump_block, e->src);
if (flag_reorder_blocks_and_partition
- && targetm.have_named_sections)
- {
- if (e->src->partition == COLD_PARTITION)
- {
- rtx bb_note, new_note;
- for (bb_note = BB_HEAD (jump_block);
- bb_note && bb_note != NEXT_INSN (BB_END (jump_block));
- bb_note = NEXT_INSN (bb_note))
- if (NOTE_P (bb_note)
- && NOTE_LINE_NUMBER (bb_note) == NOTE_INSN_BASIC_BLOCK)
- break;
- new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE,
- bb_note);
- NOTE_BASIC_BLOCK (new_note) = jump_block;
- jump_block->partition = COLD_PARTITION;
- }
- if (JUMP_P (BB_END (jump_block))
- && !any_condjump_p (BB_END (jump_block))
- && jump_block->succ->crossing_edge )
- REG_NOTES (BB_END (jump_block)) = gen_rtx_EXPR_LIST
- (REG_CROSSING_JUMP, NULL_RTX,
- REG_NOTES (BB_END (jump_block)));
- }
-
+ && targetm.have_named_sections
+ && JUMP_P (BB_END (jump_block))
+ && !any_condjump_p (BB_END (jump_block))
+ && (EDGE_SUCC (jump_block, 0)->flags & EDGE_CROSSING))
+ REG_NOTES (BB_END (jump_block)) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
+ NULL_RTX,
+ REG_NOTES
+ (BB_END
+ (jump_block)));
+
/* Wire edge in. */
new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
new_edge->probability = e->probability;
if (target == EXIT_BLOCK_PTR)
{
#ifdef HAVE_return
- emit_jump_insn_after (gen_return (), BB_END (jump_block));
+ emit_jump_insn_after_noloc (gen_return (), BB_END (jump_block));
#else
- abort ();
+ gcc_unreachable ();
#endif
}
else
{
rtx label = block_label (target);
- emit_jump_insn_after (gen_jump (label), BB_END (jump_block));
+ emit_jump_insn_after_noloc (gen_jump (label), BB_END (jump_block));
JUMP_LABEL (BB_END (jump_block)) = label;
LABEL_NUSES (label)++;
}
/* Redirect edge even at the expense of creating new jump insn or
basic block. Return new basic block if created, NULL otherwise.
- Abort if conversion is impossible. */
+ Conversion must be possible. */
static basic_block
rtl_redirect_edge_and_branch_force (edge e, basic_block target)
/* ??? In a late-running flow pass, other folks may have deleted basic
blocks by nopping out blocks, leaving multiple BARRIERs between here
- and the target label. They ought to be chastized and fixed.
+ and the target label. They ought to be chastised and fixed.
We can also wind up with a sequence of undeletable labels between
one block and the next.
if (JUMP_P (q)
&& onlyjump_p (q)
&& (any_uncondjump_p (q)
- || (b->succ == e && e->succ_next == NULL)))
+ || single_succ_p (b)))
{
#ifdef HAVE_cc0
/* If this was a conditional jump, we need to also delete
}
/* Split a (typically critical) edge. Return the new block.
- Abort on abnormal edges.
+ The edge must not be abnormal.
??? The code generally expects to be called on critical edges.
The case of a block ending in an unconditional jump to a
rtx before;
/* Abnormal edges cannot be split. */
- if ((edge_in->flags & EDGE_ABNORMAL) != 0)
- abort ();
+ gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
/* We are going to place the new block in front of edge destination.
Avoid existence of fallthru predecessors. */
if ((edge_in->flags & EDGE_FALLTHRU) == 0)
{
edge e;
+ edge_iterator ei;
- for (e = edge_in->dest->pred; e; e = e->pred_next)
+ FOR_EACH_EDGE (e, ei, edge_in->dest->preds)
if (e->flags & EDGE_FALLTHRU)
break;
&& NOTE_LINE_NUMBER (before) == NOTE_INSN_LOOP_END)
before = NEXT_INSN (before);
bb = create_basic_block (before, NULL, edge_in->src);
- bb->partition = edge_in->src->partition;
+ BB_COPY_PARTITION (bb, edge_in->src);
}
else
{
bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
- bb->partition = edge_in->dest->partition;
+ /* ??? Why not edge_in->dest->prev_bb here? */
+ BB_COPY_PARTITION (bb, edge_in->dest);
}
/* ??? This info is likely going to be out of date very soon. */
if (edge_in->dest->global_live_at_start)
{
- bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
- bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ bb->global_live_at_start = ALLOC_REG_SET (®_obstack);
+ bb->global_live_at_end = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (bb->global_live_at_start,
edge_in->dest->global_live_at_start);
COPY_REG_SET (bb->global_live_at_end,
jump instruction to target our new block. */
if ((edge_in->flags & EDGE_FALLTHRU) == 0)
{
- if (!redirect_edge_and_branch (edge_in, bb))
- abort ();
+ edge redirected = redirect_edge_and_branch (edge_in, bb);
+ gcc_assert (redirected);
}
else
redirect_edge_succ (edge_in, bb);
{
/* We cannot insert instructions on an abnormal critical edge.
It will be easier to find the culprit if we die now. */
- if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
- abort ();
+ gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
if (e->insns.r == NULL_RTX)
start_sequence ();
safe_insert_insn_on_edge (rtx insn, edge e)
{
rtx x;
- regset_head killed_head;
- regset killed = INITIALIZE_REG_SET (killed_head);
+ regset killed;
rtx save_regs = NULL_RTX;
- int regno, noccmode;
+ unsigned regno;
+ int noccmode;
enum machine_mode mode;
+ reg_set_iterator rsi;
#ifdef AVOID_CCMODE_COPIES
noccmode = true;
noccmode = false;
#endif
+ killed = ALLOC_REG_SET (®_obstack);
+
for (x = insn; x; x = NEXT_INSN (x))
if (INSN_P (x))
note_stores (PATTERN (x), mark_killed_regs, killed);
- bitmap_operation (killed, killed, e->dest->global_live_at_start,
- BITMAP_AND);
- EXECUTE_IF_SET_IN_REG_SET (killed, 0, regno,
+ /* Mark all hard registers as killed. Register allocator/reload cannot
+ cope with the situation when life range of hard register spans operation
+ for that the appropriate register is needed, i.e. it would be unsafe to
+ extend the life ranges of hard registers. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (!fixed_regs[regno]
+ && !REGNO_PTR_FRAME_P (regno))
+ SET_REGNO_REG_SET (killed, regno);
+
+ bitmap_and_into (killed, e->dest->global_live_at_start);
+
+ EXECUTE_IF_SET_IN_REG_SET (killed, 0, regno, rsi)
{
mode = regno < FIRST_PSEUDO_REGISTER
? reg_raw_mode[regno]
gen_reg_rtx (mode),
gen_raw_REG (mode, regno)),
save_regs);
- });
+ }
if (save_regs)
{
insert_insn_on_edge (insn, e);
FREE_REG_SET (killed);
+
return true;
}
/* Special case -- avoid inserting code between call and storing
its return value. */
- if (watch_calls && (e->flags & EDGE_FALLTHRU) && !e->dest->pred->pred_next
+ if (watch_calls && (e->flags & EDGE_FALLTHRU)
+ && single_pred_p (e->dest)
&& e->src != ENTRY_BLOCK_PTR
&& CALL_P (BB_END (e->src)))
{
{
/* Figure out where to put these things. If the destination has
one predecessor, insert there. Except for the exit block. */
- if (e->dest->pred->pred_next == NULL && e->dest != EXIT_BLOCK_PTR)
+ if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR)
{
bb = e->dest;
tmp = NEXT_INSN (tmp);
if (NOTE_INSN_BASIC_BLOCK_P (tmp))
tmp = NEXT_INSN (tmp);
- if (tmp
- && NOTE_P (tmp)
- && NOTE_LINE_NUMBER (tmp) == NOTE_INSN_UNLIKELY_EXECUTED_CODE)
- tmp = NEXT_INSN (tmp);
if (tmp == BB_HEAD (bb))
before = tmp;
else if (tmp)
/* If the source has one successor and the edge is not abnormal,
insert there. Except for the entry block. */
else if ((e->flags & EDGE_ABNORMAL) == 0
- && e->src->succ->succ_next == NULL
+ && single_succ_p (e->src)
&& e->src != ENTRY_BLOCK_PTR)
{
bb = e->src;
;
else
{
- /* We'd better be fallthru, or we've lost track of what's what. */
- if ((e->flags & EDGE_FALLTHRU) == 0)
- abort ();
+ /* We'd better be fallthru, or we've lost track of
+ what's what. */
+ gcc_assert (e->flags & EDGE_FALLTHRU);
after = BB_END (bb);
}
if (flag_reorder_blocks_and_partition
&& targetm.have_named_sections
&& e->src != ENTRY_BLOCK_PTR
- && e->src->partition == COLD_PARTITION
- && !e->crossing_edge)
+ && BB_PARTITION (e->src) == BB_COLD_PARTITION
+ && !(e->flags & EDGE_CROSSING))
{
- rtx bb_note, new_note, cur_insn;
+ rtx bb_note, cur_insn;
bb_note = NULL_RTX;
for (cur_insn = BB_HEAD (bb); cur_insn != NEXT_INSN (BB_END (bb));
break;
}
- new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE,
- bb_note);
- NOTE_BASIC_BLOCK (new_note) = bb;
if (JUMP_P (BB_END (bb))
&& !any_condjump_p (BB_END (bb))
- && bb->succ->crossing_edge )
+ && (single_succ_edge (bb)->flags & EDGE_CROSSING))
REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST
(REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb)));
- if (after == bb_note)
- after = new_note;
}
}
}
if (before)
{
- emit_insn_before (insns, before);
+ emit_insn_before_noloc (insns, before);
last = prev_nonnote_insn (before);
}
else
- last = emit_insn_after (insns, after);
+ last = emit_insn_after_noloc (insns, after);
if (returnjump_p (last))
{
for the (single) epilogue, which already has a fallthru edge
to EXIT. */
- e = bb->succ;
- if (e->dest != EXIT_BLOCK_PTR
- || e->succ_next != NULL || (e->flags & EDGE_FALLTHRU) == 0)
- abort ();
+ e = single_succ_edge (bb);
+ gcc_assert (e->dest == EXIT_BLOCK_PTR
+ && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
e->flags &= ~EDGE_FALLTHRU;
emit_barrier_after (last);
if (before)
delete_insn (before);
}
- else if (JUMP_P (last))
- abort ();
+ else
+ gcc_assert (!JUMP_P (last));
- /* Mark the basic block for find_sub_basic_blocks. */
+ /* Mark the basic block for find_many_sub_basic_blocks. */
bb->aux = &bb->aux;
}
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
{
- edge e, next;
+ edge e;
+ edge_iterator ei;
- for (e = bb->succ; e; e = next)
- {
- next = e->succ_next;
- if (e->insns.r)
- {
- changed = true;
- commit_one_edge_insertion (e, false);
- }
- }
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->insns.r)
+ {
+ changed = true;
+ commit_one_edge_insertion (e, false);
+ }
}
if (!changed)
SET_BIT (blocks, bb->index);
/* Check for forgotten bb->aux values before commit_edge_insertions
call. */
- if (bb->aux != &bb->aux)
- abort ();
+ gcc_assert (bb->aux == &bb->aux);
bb->aux = NULL;
}
find_many_sub_basic_blocks (blocks);
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
{
- edge e, next;
+ edge e;
+ edge_iterator ei;
- for (e = bb->succ; e; e = next)
- {
- next = e->succ_next;
- if (e->insns.r)
- {
- changed = true;
- commit_one_edge_insertion (e, true);
- }
- }
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->insns.r)
+ {
+ changed = true;
+ commit_one_edge_insertion (e, true);
+ }
}
if (!changed)
SET_BIT (blocks, bb->index);
/* Check for forgotten bb->aux values before commit_edge_insertions
call. */
- if (bb->aux != &bb->aux)
- abort ();
+ gcc_assert (bb->aux == &bb->aux);
bb->aux = NULL;
}
find_many_sub_basic_blocks (blocks);
basic_block *bb_info;
rtx x;
int err = 0;
- basic_block bb, last_bb_seen;
+ basic_block bb;
bb_info = xcalloc (max_uid, sizeof (basic_block));
- /* Check bb chain & numbers. */
- last_bb_seen = ENTRY_BLOCK_PTR;
-
FOR_EACH_BB_REVERSE (bb)
{
rtx head = BB_HEAD (bb);
int n_fallthru = 0, n_eh = 0, n_call = 0, n_abnormal = 0, n_branch = 0;
edge e, fallthru = NULL;
rtx note;
+ edge_iterator ei;
- if (INSN_P (BB_END (bb))
+ if (JUMP_P (BB_END (bb))
&& (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
- && bb->succ && bb->succ->succ_next
+ && EDGE_COUNT (bb->succs) >= 2
&& any_condjump_p (BB_END (bb)))
{
- if (INTVAL (XEXP (note, 0)) != BRANCH_EDGE (bb)->probability)
+ if (INTVAL (XEXP (note, 0)) != BRANCH_EDGE (bb)->probability
+ && profile_status != PROFILE_ABSENT)
{
error ("verify_flow_info: REG_BR_PROB does not match cfg %wi %i",
INTVAL (XEXP (note, 0)), BRANCH_EDGE (bb)->probability);
err = 1;
}
}
- for (e = bb->succ; e; e = e->succ_next)
+ FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e->flags & EDGE_FALLTHRU)
{
n_fallthru++, fallthru = e;
- if (e->crossing_edge
- || (e->src->partition != e->dest->partition
+ if ((e->flags & EDGE_CROSSING)
+ || (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
&& e->src != ENTRY_BLOCK_PTR
&& e->dest != EXIT_BLOCK_PTR))
{
if ((e->flags & ~(EDGE_DFS_BACK
| EDGE_CAN_FALLTHRU
| EDGE_IRREDUCIBLE_LOOP
- | EDGE_LOOP_EXIT)) == 0)
+ | EDGE_LOOP_EXIT
+ | EDGE_CROSSING)) == 0)
n_branch++;
if (e->flags & EDGE_ABNORMAL_CALL)
err = 1;
}
if (n_branch != 1 && any_condjump_p (BB_END (bb))
- && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
+ && JUMP_LABEL (BB_END (bb)) == BB_HEAD (fallthru->dest))
{
error ("Wrong amount of branch edges after conditional jump %i", bb->index);
err = 1;
}
for (x = BB_HEAD (bb); x != NEXT_INSN (BB_END (bb)); x = NEXT_INSN (x))
- if (BLOCK_FOR_INSN (x) != bb)
+ /* We may have a barrier inside a basic block before dead code
+ elimination. There is no BLOCK_FOR_INSN field in a barrier. */
+ if (!BARRIER_P (x) && BLOCK_FOR_INSN (x) != bb)
{
debug_rtx (x);
if (! BLOCK_FOR_INSN (x))
}
if (BB_END (bb) == x)
- /* Do checks for empty blocks her. e */
+ /* Do checks for empty blocks here. */
;
else
for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
FOR_EACH_BB_REVERSE (bb)
{
edge e;
- for (e = bb->succ; e; e = e->succ_next)
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
if (e->flags & EDGE_FALLTHRU)
break;
if (!e)
else
for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
insn = NEXT_INSN (insn))
- if (BARRIER_P (insn)
-#ifndef CASE_DROPS_THROUGH
- || INSN_P (insn)
-#else
- || (INSN_P (insn) && ! JUMP_TABLE_DATA_P (insn))
-#endif
- )
+ if (BARRIER_P (insn) || INSN_P (insn))
{
error ("verify_flow_info: Incorrect fallthru %i->%i",
e->src->index, e->dest->index);
}
}
- if (INSN_P (x)
- && JUMP_P (x)
+ if (JUMP_P (x)
&& returnjump_p (x) && ! condjump_p (x)
&& ! (NEXT_INSN (x) && BARRIER_P (NEXT_INSN (x))))
fatal_insn ("return not followed by barrier", x);
bool
purge_dead_edges (basic_block bb)
{
- edge e, next;
+ edge e;
rtx insn = BB_END (bb), note;
bool purged = false;
+ bool found;
+ edge_iterator ei;
/* If this instruction cannot trap, remove REG_EH_REGION notes. */
if (NONJUMP_INSN_P (insn)
}
/* Cleanup abnormal edges caused by exceptions or non-local gotos. */
- for (e = bb->succ; e; e = next)
+ for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
{
- next = e->succ_next;
if (e->flags & EDGE_EH)
{
if (can_throw_internal (BB_END (bb)))
- continue;
+ {
+ ei_next (&ei);
+ continue;
+ }
}
else if (e->flags & EDGE_ABNORMAL_CALL)
{
if (CALL_P (BB_END (bb))
&& (! (note = find_reg_note (insn, REG_EH_REGION, NULL))
|| INTVAL (XEXP (note, 0)) >= 0))
- continue;
+ {
+ ei_next (&ei);
+ continue;
+ }
}
else
- continue;
+ {
+ ei_next (&ei);
+ continue;
+ }
remove_edge (e);
bb->flags |= BB_DIRTY;
{
rtx note;
edge b,f;
+ edge_iterator ei;
/* We do care only about conditional jumps and simplejumps. */
if (!any_condjump_p (insn)
remove_note (insn, note);
}
- for (e = bb->succ; e; e = next)
+ for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
{
- next = e->succ_next;
-
/* Avoid abnormal flags to leak from computed jumps turned
into simplejumps. */
if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
/* A conditional jump can fall through into the next
block, so we should keep the edge. */
- continue;
+ {
+ ei_next (&ei);
+ continue;
+ }
else if (e->dest != EXIT_BLOCK_PTR
&& BB_HEAD (e->dest) == JUMP_LABEL (insn))
/* If the destination block is the target of the jump,
keep the edge. */
- continue;
+ {
+ ei_next (&ei);
+ continue;
+ }
else if (e->dest == EXIT_BLOCK_PTR && returnjump_p (insn))
/* If the destination block is the exit block, and this
instruction is a return, then keep the edge. */
- continue;
+ {
+ ei_next (&ei);
+ continue;
+ }
else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
/* Keep the edges that correspond to exceptions thrown by
this instruction and rematerialize the EDGE_ABNORMAL
flag we just cleared above. */
{
e->flags |= EDGE_ABNORMAL;
+ ei_next (&ei);
continue;
}
remove_edge (e);
}
- if (!bb->succ || !purged)
+ if (EDGE_COUNT (bb->succs) == 0 || !purged)
return purged;
if (dump_file)
return purged;
/* Redistribute probabilities. */
- if (!bb->succ->succ_next)
+ if (single_succ_p (bb))
{
- bb->succ->probability = REG_BR_PROB_BASE;
- bb->succ->count = bb->count;
+ single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
+ single_succ_edge (bb)->count = bb->count;
}
else
{
from non-local gotos and the like. If there were, we shouldn't
have created the sibcall in the first place. Second, there
should of course never have been a fallthru edge. */
- if (!bb->succ || bb->succ->succ_next)
- abort ();
- if (bb->succ->flags != (EDGE_SIBCALL | EDGE_ABNORMAL))
- abort ();
+ gcc_assert (single_succ_p (bb));
+ gcc_assert (single_succ_edge (bb)->flags
+ == (EDGE_SIBCALL | EDGE_ABNORMAL));
return 0;
}
as these are only created by conditional branches. If we find such an
edge we know that there used to be a jump here and can then safely
remove all non-fallthru edges. */
- for (e = bb->succ; e && (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU));
- e = e->succ_next)
- ;
+ found = false;
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
+ {
+ found = true;
+ break;
+ }
- if (!e)
+ if (!found)
return purged;
- for (e = bb->succ; e; e = next)
+ /* Remove all but the fake and fallthru edges. The fake edge may be
+ the only successor for this block in the case of noreturn
+ calls. */
+ for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
{
- next = e->succ_next;
- if (!(e->flags & EDGE_FALLTHRU))
+ if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
{
bb->flags |= BB_DIRTY;
remove_edge (e);
purged = true;
}
+ else
+ ei_next (&ei);
}
- if (!bb->succ || bb->succ->succ_next)
- abort ();
+ gcc_assert (single_succ_p (bb));
- bb->succ->probability = REG_BR_PROB_BASE;
- bb->succ->count = bb->count;
+ single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
+ single_succ_edge (bb)->count = bb->count;
if (dump_file)
fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
true if some edge has been eliminated. */
bool
-purge_all_dead_edges (int update_life_p)
+purge_all_dead_edges (void)
{
int purged = false;
- sbitmap blocks = 0;
basic_block bb;
- if (update_life_p)
- {
- blocks = sbitmap_alloc (last_basic_block);
- sbitmap_zero (blocks);
- }
-
FOR_EACH_BB (bb)
{
bool purged_here = purge_dead_edges (bb);
purged |= purged_here;
- if (purged_here && update_life_p)
- SET_BIT (blocks, bb->index);
}
- if (update_life_p && purged)
- update_life_info (blocks, UPDATE_LIFE_GLOBAL,
- PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
- | PROP_KILL_DEAD_CODE);
-
- if (update_life_p)
- sbitmap_free (blocks);
return purged;
}
&& label_is_jump_target_p (BB_HEAD (e->dest),
BB_END (src)))
{
+ edge redirected;
+
if (dump_file)
fprintf (dump_file, "Fallthru edge unified with branch "
"%i->%i redirected to %i\n",
e->src->index, e->dest->index, dest->index);
e->flags &= ~EDGE_FALLTHRU;
- if (!redirect_branch_edge (e, dest))
- abort ();
+ redirected = redirect_branch_edge (e, dest);
+ gcc_assert (redirected);
e->flags |= EDGE_FALLTHRU;
e->src->flags |= BB_DIRTY;
return e;
}
/* In case we are redirecting fallthru edge to the branch edge
of conditional jump, remove it. */
- if (src->succ->succ_next
- && !src->succ->succ_next->succ_next)
+ if (EDGE_COUNT (src->succs) == 2)
{
- edge s = e->succ_next ? e->succ_next : src->succ;
+ /* Find the edge that is different from E. */
+ edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
+
if (s->dest == dest
&& any_condjump_p (BB_END (src))
&& onlyjump_p (BB_END (src)))
ret = redirect_branch_edge (e, dest);
/* We don't want simplejumps in the insn stream during cfglayout. */
- if (simplejump_p (BB_END (src)))
- abort ();
+ gcc_assert (!simplejump_p (BB_END (src)));
src->flags |= BB_DIRTY;
return ret;
static basic_block
cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
{
- if (!cfg_layout_redirect_edge_and_branch (e, dest))
- abort ();
+ edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
+
+ gcc_assert (redirected);
return NULL;
}
to = &bb->next_bb->rbi->header;
else
to = &cfg_layout_function_footer;
+
+ bb->rbi = NULL;
+
rtl_delete_block (bb);
if (prev)
static bool
cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
{
- bool partitions_ok = true;
-
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
- and cold sections. */
-
- if (flag_reorder_blocks_and_partition
- && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
- || find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
- || a->partition != b->partition))
- partitions_ok = false;
+ and cold sections.
+
+ Basic block partitioning may result in some jumps that appear to
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
+ bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
+
+ if (BB_PARTITION (a) != BB_PARTITION (b))
+ return false;
/* There must be exactly one edge in between the blocks. */
- return (a->succ && !a->succ->succ_next && a->succ->dest == b
- && !b->pred->pred_next && a != b
+ return (single_succ_p (a)
+ && single_succ (a) == b
+ && single_pred_p (b) == 1
+ && a != b
/* Must be simple edge. */
- && !(a->succ->flags & EDGE_COMPLEX)
- && partitions_ok
+ && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
we can't kill the edge. */
? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
}
-/* Merge block A and B, abort when it is not possible. */
+/* Merge block A and B. The blocks must be mergeable. */
+
static void
cfg_layout_merge_blocks (basic_block a, basic_block b)
{
#ifdef ENABLE_CHECKING
- if (!cfg_layout_can_merge_blocks_p (a, b))
- abort ();
+ gcc_assert (cfg_layout_can_merge_blocks_p (a, b));
#endif
/* If there was a CODE_LABEL beginning B, delete it. */
/* We should have fallthru edge in a, or we can do dummy redirection to get
it cleaned up. */
if (JUMP_P (BB_END (a)))
- try_redirect_by_replacing_jump (a->succ, b, true);
- if (JUMP_P (BB_END (a)))
- abort ();
+ try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
+ gcc_assert (!JUMP_P (BB_END (a)));
/* Possible line number notes should appear in between. */
if (b->rbi->header)
{
rtx first = BB_END (a), last;
- last = emit_insn_after (b->rbi->header, BB_END (a));
+ last = emit_insn_after_noloc (b->rbi->header, BB_END (a));
delete_insn_chain (NEXT_INSN (first), last);
b->rbi->header = NULL;
}
{
rtx first = unlink_insn_chain (BB_HEAD (b), BB_END (b));
- emit_insn_after (first, BB_END (a));
+ emit_insn_after_noloc (first, BB_END (a));
/* Skip possible DELETED_LABEL insn. */
if (!NOTE_INSN_BASIC_BLOCK_P (first))
first = NEXT_INSN (first);
- if (!NOTE_INSN_BASIC_BLOCK_P (first))
- abort ();
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (first));
BB_HEAD (b) = NULL;
delete_insn (first);
}
/* Skip possible DELETED_LABEL insn. */
if (!NOTE_INSN_BASIC_BLOCK_P (insn))
insn = NEXT_INSN (insn);
- if (!NOTE_INSN_BASIC_BLOCK_P (insn))
- abort ();
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
BB_HEAD (b) = NULL;
BB_END (a) = BB_END (b);
delete_insn (insn);
static basic_block
cfg_layout_split_edge (edge e)
{
- edge new_e;
basic_block new_bb =
create_basic_block (e->src != ENTRY_BLOCK_PTR
? NEXT_INSN (BB_END (e->src)) : get_insns (),
create it to avoid getting an ICE later. */
if (e->dest->global_live_at_start)
{
- new_bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
- new_bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
+ new_bb->global_live_at_start = ALLOC_REG_SET (®_obstack);
+ new_bb->global_live_at_end = ALLOC_REG_SET (®_obstack);
COPY_REG_SET (new_bb->global_live_at_start,
e->dest->global_live_at_start);
COPY_REG_SET (new_bb->global_live_at_end,
e->dest->global_live_at_start);
}
- new_e = make_edge (new_bb, e->dest, EDGE_FALLTHRU);
+ make_edge (new_bb, e->dest, EDGE_FALLTHRU);
redirect_edge_and_branch_force (e, new_bb);
return new_bb;
if ((CALL_P (insn)
&& !SIBLING_CALL_P (insn)
&& !find_reg_note (insn, REG_NORETURN, NULL)
- && !find_reg_note (insn, REG_ALWAYS_RETURN, NULL)
&& !CONST_OR_PURE_CALL_P (insn)))
return true;
{
edge e;
- for (e = bb->succ; e; e = e->succ_next)
- if (e->dest == EXIT_BLOCK_PTR)
- {
- insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e);
- commit_edge_insertions ();
- break;
- }
+ e = find_edge (bb, EXIT_BLOCK_PTR);
+ if (e)
+ {
+ insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e);
+ commit_edge_insertions ();
+ }
}
}
#ifdef ENABLE_CHECKING
if (split_at_insn == BB_END (bb))
- for (e = bb->succ; e; e = e->succ_next)
- if (e->dest == EXIT_BLOCK_PTR)
- abort ();
+ {
+ e = find_edge (bb, EXIT_BLOCK_PTR);
+ gcc_assert (e == NULL);
+ }
#endif
/* Note that the following may create a new basic block
return blocks_split;
}
+/* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
+ the conditional branch target, SECOND_HEAD should be the fall-thru
+ there is no need to handle this here the loop versioning code handles
+ this. the reason for SECON_HEAD is that it is needed for condition
+ in trees, and this should be of the same type since it is a hook. */
+static void
+rtl_lv_add_condition_to_bb (basic_block first_head ,
+ basic_block second_head ATTRIBUTE_UNUSED,
+ basic_block cond_bb, void *comp_rtx)
+{
+ rtx label, seq, jump;
+ rtx op0 = XEXP ((rtx)comp_rtx, 0);
+ rtx op1 = XEXP ((rtx)comp_rtx, 1);
+ enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
+ enum machine_mode mode;
+
+
+ label = block_label (first_head);
+ mode = GET_MODE (op0);
+ if (mode == VOIDmode)
+ mode = GET_MODE (op1);
+
+ start_sequence ();
+ op0 = force_operand (op0, NULL_RTX);
+ op1 = force_operand (op1, NULL_RTX);
+ do_compare_rtx_and_jump (op0, op1, comp, 0,
+ mode, NULL_RTX, NULL_RTX, label);
+ jump = get_last_insn ();
+ JUMP_LABEL (jump) = label;
+ LABEL_NUSES (label)++;
+ seq = get_insns ();
+ end_sequence ();
+
+ /* Add the new cond , in the new head. */
+ emit_insn_after(seq, BB_END(cond_bb));
+}
+
+
+/* Given a block B with unconditional branch at its end, get the
+ store the return the branch edge and the fall-thru edge in
+ BRANCH_EDGE and FALLTHRU_EDGE respectively. */
+static void
+rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
+ edge *fallthru_edge)
+{
+ edge e = EDGE_SUCC (b, 0);
+
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ *fallthru_edge = e;
+ *branch_edge = EDGE_SUCC (b, 1);
+ }
+ else
+ {
+ *branch_edge = e;
+ *fallthru_edge = EDGE_SUCC (b, 1);
+ }
+}
+
+
/* Implementation of CFG manipulation for linearized RTL. */
struct cfg_hooks rtl_cfg_hooks = {
"rtl",
rtl_tidy_fallthru_edge,
rtl_block_ends_with_call_p,
rtl_block_ends_with_condjump_p,
- rtl_flow_call_edges_add
+ rtl_flow_call_edges_add,
+ NULL, /* execute_on_growing_pred */
+ NULL, /* execute_on_shrinking_pred */
+ NULL, /* duplicate loop for trees */
+ NULL, /* lv_add_condition_to_bb */
+ NULL, /* lv_adjust_loop_header_phi*/
+ NULL, /* extract_cond_bb_edges */
+ NULL /* flush_pending_stmts */
};
/* Implementation of CFG manipulation for cfg layout RTL, where
NULL,
rtl_block_ends_with_call_p,
rtl_block_ends_with_condjump_p,
- rtl_flow_call_edges_add
+ rtl_flow_call_edges_add,
+ NULL, /* execute_on_growing_pred */
+ NULL, /* execute_on_shrinking_pred */
+ duplicate_loop_to_header_edge, /* duplicate loop for trees */
+ rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
+ NULL, /* lv_adjust_loop_header_phi*/
+ rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
+ NULL /* flush_pending_stmts */
};