- CFG-aware instruction chain manipulation
delete_insn, delete_insn_chain
- Basic block manipulation
- create_basic_block, flow_delete_block, split_block,
+ create_basic_block, rtl_delete_block,rtl_split_block,
merge_blocks_nomove
- Infrastructure to determine quickly basic block for insn
compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
- Edge redirection with updating and optimizing of insn chain
block_label, redirect_edge_and_branch,
redirect_edge_and_branch_force, tidy_fallthru_edge, force_nonfallthru
- - Edge splitting and commiting to edges
+ - Edge splitting and committing to edges
split_edge, insert_insn_on_edge, commit_edge_insertions
- Dumping and debugging
print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
\f
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "hard-reg-set.h"
#include "tm_p.h"
#include "obstack.h"
#include "insn-config.h"
+#include "cfglayout.h"
/* Stubs in case we don't have a return insn. */
#ifndef HAVE_return
#define gen_return() NULL_RTX
#endif
-/* The basic block structure for every insn, indexed by uid. */
-varray_type basic_block_for_insn;
-
/* The labels mentioned in non-jump rtl. Valid during find_basic_blocks. */
/* ??? Should probably be using LABEL_NUSES instead. It would take a
bit of surgery to be able to use or co-opt the routines in jump. */
static bool try_redirect_by_replacing_jump PARAMS ((edge, basic_block));
static rtx last_loop_beg_note PARAMS ((rtx));
static bool back_edge_of_syntactic_loop_p PARAMS ((basic_block, basic_block));
-static basic_block force_nonfallthru_and_redirect PARAMS ((edge, basic_block));
+basic_block force_nonfallthru_and_redirect PARAMS ((edge, basic_block));
+static basic_block rtl_split_edge PARAMS ((edge));
+static void rtl_verify_flow_info PARAMS ((void));
+static edge cfg_layout_split_block PARAMS ((basic_block, void *));
+static bool cfg_layout_redirect_edge_and_branch PARAMS ((edge, basic_block));
+static basic_block cfg_layout_redirect_edge_and_branch_force PARAMS ((edge, basic_block));
+static void cfg_layout_delete_block PARAMS ((basic_block));
+static void rtl_delete_block PARAMS ((basic_block));
+static basic_block rtl_redirect_edge_and_branch_force PARAMS ((edge, basic_block));
+static bool rtl_redirect_edge_and_branch PARAMS ((edge, basic_block));
+static edge rtl_split_block PARAMS ((basic_block, void *));
\f
/* Return true if NOTE is not one of the ones that must be kept paired,
so that we may simply delete it. */
rtx x;
bool purge = false;
- if (basic_block_for_insn
- && INSN_P (insn)
- && (unsigned int)INSN_UID (insn) < basic_block_for_insn->num_elements
+ if (INSN_P (insn)
&& BLOCK_FOR_INSN (insn)
&& BLOCK_FOR_INSN (insn)->end == insn)
purge = true;
{
bool purge = false;
- if (basic_block_for_insn
- && INSN_P (last)
- && (unsigned int)INSN_UID (last) < basic_block_for_insn->num_elements
+ if (INSN_P (last)
&& BLOCK_FOR_INSN (last)
&& BLOCK_FOR_INSN (last)->end == last)
purge = true;
AFTER is the basic block we should be put after. */
basic_block
-create_basic_block_structure (index, head, end, bb_note, after)
- int index;
+create_basic_block_structure (head, end, bb_note, after)
rtx head, end, bb_note;
basic_block after;
{
}
if (after != bb_note && NEXT_INSN (after) != bb_note)
- reorder_insns (bb_note, bb_note, after);
+ reorder_insns_nobb (bb_note, bb_note, after);
}
else
{
bb->head = head;
bb->end = end;
- bb->index = index;
+ bb->index = last_basic_block++;
bb->flags = BB_NEW;
link_block (bb, after);
- BASIC_BLOCK (index) = bb;
- if (basic_block_for_insn)
- update_bb_for_insn (bb);
+ BASIC_BLOCK (bb->index) = bb;
+ update_bb_for_insn (bb);
/* Tag the block so that we know it has been used when considering
other basic block notes. */
basic_block after;
{
basic_block bb;
- int i;
- int index = after->index + 1;
- /* Place the new block just after the block being split. */
- VARRAY_GROW (basic_block_info, ++n_basic_blocks);
+ /* Place the new block just after the end. */
+ VARRAY_GROW (basic_block_info, last_basic_block+1);
- /* Some parts of the compiler expect blocks to be number in
- sequential order so insert the new block immediately after the
- block being split.. */
- for (i = n_basic_blocks - 1; i > index; --i)
- {
- basic_block tmp = BASIC_BLOCK (i - 1);
+ n_basic_blocks++;
- BASIC_BLOCK (i) = tmp;
- tmp->index = i;
- }
-
- bb = create_basic_block_structure (index, head, end, NULL, after);
+ bb = create_basic_block_structure (head, end, NULL, after);
bb->aux = NULL;
return bb;
}
/* ??? Preserving all such notes strikes me as wrong. It would be nice
to post-process the stream to remove empty blocks, loops, ranges, etc. */
-int
+void
flow_delete_block_noexpunge (b)
basic_block b;
{
- int deleted_handler = 0;
rtx insn, end, tmp;
/* If the head of this block is a CODE_LABEL, then it might be the
and remove the associated NOTE_INSN_EH_REGION_BEG and
NOTE_INSN_EH_REGION_END notes. */
- /* Get rid of all NOTE_INSN_PREDICTIONs hanging before the block. */
+ /* Get rid of all NOTE_INSN_PREDICTIONs and NOTE_INSN_LOOP_CONTs
+ hanging before the block. */
for (insn = PREV_INSN (b->head); insn; insn = PREV_INSN (insn))
{
if (GET_CODE (insn) != NOTE)
break;
- if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PREDICTION)
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PREDICTION
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
}
/* Include any jump table following the basic block. */
end = b->end;
- if (GET_CODE (end) == JUMP_INSN
- && (tmp = JUMP_LABEL (end)) != NULL_RTX
- && (tmp = NEXT_INSN (tmp)) != NULL_RTX
- && GET_CODE (tmp) == JUMP_INSN
- && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
- || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
+ if (tablejump_p (end, NULL, &tmp))
end = tmp;
/* Include any barrier that may follow the basic block. */
b->pred = NULL;
b->succ = NULL;
-
- return deleted_handler;
}
-int
-flow_delete_block (b)
+static void
+rtl_delete_block (b)
basic_block b;
{
- int deleted_handler = flow_delete_block_noexpunge (b);
+ flow_delete_block_noexpunge (b);
- /* Remove the basic block from the array, and compact behind it. */
+ /* Remove the basic block from the array. */
expunge_block (b);
-
- return deleted_handler;
}
\f
-/* Records the basic block struct in BB_FOR_INSN, for every instruction
- indexed by INSN_UID. MAX is the size of the array. */
+/* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
void
-compute_bb_for_insn (max)
- int max;
+compute_bb_for_insn ()
{
basic_block bb;
- if (basic_block_for_insn)
- VARRAY_FREE (basic_block_for_insn);
-
- VARRAY_BB_INIT (basic_block_for_insn, max, "basic_block_for_insn");
-
FOR_EACH_BB (bb)
{
rtx end = bb->end;
for (insn = bb->head; ; insn = NEXT_INSN (insn))
{
- if (INSN_UID (insn) < max)
- VARRAY_BB (basic_block_for_insn, INSN_UID (insn)) = bb;
-
+ BLOCK_FOR_INSN (insn) = bb;
if (insn == end)
break;
}
void
free_bb_for_insn ()
{
- if (basic_block_for_insn)
- VARRAY_FREE (basic_block_for_insn);
-
- basic_block_for_insn = 0;
+ rtx insn;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) != BARRIER)
+ BLOCK_FOR_INSN (insn) = NULL;
}
/* Update insns block within BB. */
{
rtx insn;
- if (! basic_block_for_insn)
- return;
-
for (insn = bb->head; ; insn = NEXT_INSN (insn))
{
- set_block_for_insn (insn, bb);
+ if (GET_CODE (insn) != BARRIER)
+ set_block_for_insn (insn, bb);
if (insn == bb->end)
break;
}
}
-
-/* Record INSN's block as BB. */
-
-void
-set_block_for_insn (insn, bb)
- rtx insn;
- basic_block bb;
-{
- size_t uid = INSN_UID (insn);
-
- if (uid >= basic_block_for_insn->num_elements)
- {
- /* Add one-eighth the size so we don't keep calling xrealloc. */
- size_t new_size = uid + (uid + 7) / 8;
-
- VARRAY_GROW (basic_block_for_insn, new_size);
- }
-
- VARRAY_BB (basic_block_for_insn, uid) = bb;
-}
\f
/* Split a block BB after insn INSN creating a new fallthru edge.
Return the new edge. Note that to keep other parts of the compiler happy,
this function renumbers all the basic blocks so that the new
one has a number one greater than the block split. */
-edge
-split_block (bb, insn)
+static edge
+rtl_split_block (bb, insnp)
basic_block bb;
- rtx insn;
+ void *insnp;
{
basic_block new_bb;
edge new_edge;
edge e;
+ rtx insn = insnp;
/* There is no point splitting the block after its end. */
if (bb->end == insn)
/* Reassociate the insns of B with A. */
if (!b_empty)
{
- if (basic_block_for_insn)
- {
- rtx x;
+ rtx x;
- for (x = a_end; x != b_end; x = NEXT_INSN (x))
- set_block_for_insn (x, a);
+ for (x = a_end; x != b_end; x = NEXT_INSN (x))
+ set_block_for_insn (x, a);
- set_block_for_insn (b_end, a);
- }
+ set_block_for_insn (b_end, a);
a_end = b_end;
}
if (GET_CODE (block->head) != CODE_LABEL)
{
block->head = emit_label_before (gen_label_rtx (), block->head);
- if (basic_block_for_insn)
- set_block_for_insn (block->head, block);
}
return block->head;
basic_block src = e->src;
rtx insn = src->end, kill_from;
edge tmp;
- rtx set, table;
+ rtx set;
int fallthru = 0;
/* Verify that all targets will be TARGET. */
if (tmp || !onlyjump_p (insn))
return false;
- if (reload_completed && JUMP_LABEL (insn)
- && (table = NEXT_INSN (JUMP_LABEL (insn))) != NULL_RTX
- && GET_CODE (table) == JUMP_INSN
- && (GET_CODE (PATTERN (table)) == ADDR_VEC
- || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
+ if ((!optimize || flow2_completed) && tablejump_p (insn, NULL, NULL))
return false;
/* Avoid removing branch with side effects. */
else
{
rtx target_label = block_label (target);
- rtx barrier, tmp;
+ rtx barrier, label, table;
emit_jump_insn_after (gen_jump (target_label), insn);
JUMP_LABEL (src->end) = target_label;
/* Recognize a tablejump that we are converting to a
simple jump and remove its associated CODE_LABEL
and ADDR_VEC or ADDR_DIFF_VEC. */
- if ((tmp = JUMP_LABEL (insn)) != NULL_RTX
- && (tmp = NEXT_INSN (tmp)) != NULL_RTX
- && GET_CODE (tmp) == JUMP_INSN
- && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
- || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
- {
- delete_insn_chain (JUMP_LABEL (insn), tmp);
- }
+ if (tablejump_p (insn, &label, &table))
+ delete_insn_chain (label, table);
barrier = next_nonnote_insn (src->end);
if (!barrier || GET_CODE (barrier) != BARRIER)
/* Return last loop_beg note appearing after INSN, before start of next
basic block. Return INSN if there are no such notes.
- When emitting jump to redirect an fallthru edge, it should always appear
+ When emitting jump to redirect a fallthru edge, it should always appear
after the LOOP_BEG notes, as loop optimizer expect loop to either start by
fallthru edge or jump following the LOOP_BEG note jumping to the loop exit
test. */
already destinated TARGET and we didn't managed to simplify instruction
stream. */
-bool
-redirect_edge_and_branch (e, target)
+static bool
+rtl_redirect_edge_and_branch (e, target)
edge e;
basic_block target;
{
return false;
/* Recognize a tablejump and adjust all matching cases. */
- if ((tmp = JUMP_LABEL (insn)) != NULL_RTX
- && (tmp = NEXT_INSN (tmp)) != NULL_RTX
- && GET_CODE (tmp) == JUMP_INSN
- && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
- || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
+ if (tablejump_p (insn, NULL, &tmp))
{
rtvec vec;
int j;
/* Like force_nonfallthru below, but additionally performs redirection
Used by redirect_edge_and_branch_force. */
-static basic_block
+basic_block
force_nonfallthru_and_redirect (e, target)
edge e;
basic_block target;
{
- basic_block jump_block, new_bb = NULL;
+ basic_block jump_block, new_bb = NULL, src = e->src;
rtx note;
edge new_edge;
+ int abnormal_edge_flags = 0;
+
+ /* In the case the last instruction is conditional jump to the next
+ instruction, first redirect the jump itself and then continue
+ by creating an basic block afterwards to redirect fallthru edge. */
+ if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR
+ && any_condjump_p (e->src->end)
+ /* When called from cfglayout, fallthru edges do not
+ neccessarily go to the next block. */
+ && e->src->next_bb == e->dest
+ && JUMP_LABEL (e->src->end) == e->dest->head)
+ {
+ rtx note;
+ edge b = unchecked_make_edge (e->src, target, 0);
+
+ if (!redirect_jump (e->src->end, block_label (target), 0))
+ abort ();
+ note = find_reg_note (e->src->end, REG_BR_PROB, NULL_RTX);
+ if (note)
+ {
+ int prob = INTVAL (XEXP (note, 0));
+
+ b->probability = prob;
+ b->count = e->count * prob / REG_BR_PROB_BASE;
+ e->probability -= e->probability;
+ e->count -= b->count;
+ if (e->probability < 0)
+ e->probability = 0;
+ if (e->count < 0)
+ e->count = 0;
+ }
+ }
if (e->flags & EDGE_ABNORMAL)
- abort ();
+ {
+ /* Irritating special case - fallthru edge to the same block as abnormal
+ edge.
+ We can't redirect abnormal edge, but we still can split the fallthru
+ one and create separate abnormal edge to original destination.
+ This allows bb-reorder to make such edge non-fallthru. */
+ if (e->dest != target)
+ abort ();
+ abnormal_edge_flags = e->flags & ~(EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
+ e->flags &= EDGE_FALLTHRU | EDGE_CAN_FALLTHRU;
+ }
else if (!(e->flags & EDGE_FALLTHRU))
abort ();
else if (e->src == ENTRY_BLOCK_PTR)
make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
}
- if (e->src->succ->succ_next)
+ if (e->src->succ->succ_next || abnormal_edge_flags)
{
/* Create the new structures. */
+
+ /* Position the new block correctly relative to loop notes. */
note = last_loop_beg_note (e->src->end);
- jump_block
- = create_basic_block (NEXT_INSN (note), NULL, e->src);
+ note = NEXT_INSN (note);
+
+ /* ... and ADDR_VECs. */
+ if (note != NULL
+ && GET_CODE (note) == CODE_LABEL
+ && NEXT_INSN (note)
+ && GET_CODE (NEXT_INSN (note)) == JUMP_INSN
+ && (GET_CODE (PATTERN (NEXT_INSN (note))) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (NEXT_INSN (note))) == ADDR_VEC))
+ note = NEXT_INSN (NEXT_INSN (note));
+
+ jump_block = create_basic_block (note, NULL, e->src);
jump_block->count = e->count;
jump_block->frequency = EDGE_FREQUENCY (e);
jump_block->loop_depth = target->loop_depth;
emit_barrier_after (jump_block->end);
redirect_edge_succ_nodup (e, target);
+ if (abnormal_edge_flags)
+ make_edge (src, target, abnormal_edge_flags);
+
return new_bb;
}
basic block. Return new basic block if created, NULL otherwise.
Abort if conversion is impossible. */
-basic_block
-redirect_edge_and_branch_force (e, target)
+static basic_block
+rtl_redirect_edge_and_branch_force (e, target)
edge e;
basic_block target;
{
{
rtx insn;
int count = 0;
+ basic_block bb;
- if (bb1->index > bb2->index)
- return false;
- else if (bb1->index == bb2->index)
+ if (bb1 == bb2)
return true;
+ /* ??? Could we guarantee that bb indices are monotone, so that we could
+ just compare them? */
+ for (bb = bb1; bb && bb != bb2; bb = bb->next_bb)
+ continue;
+
+ if (!bb)
+ return false;
+
for (insn = bb1->end; insn != bb2->head && count >= 0;
insn = NEXT_INSN (insn))
if (GET_CODE (insn) == NOTE)
block with multiple predecessors is not handled optimally. */
basic_block
-split_edge (edge_in)
+rtl_split_edge (edge_in)
edge edge_in;
{
basic_block bb;
- edge edge_out;
rtx before;
/* Abnormal edges cannot be split. */
edge_in->dest->global_live_at_start);
}
- edge_out = make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
+ make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
/* For non-fallthry edges, we must adjust the predecessor's
jump instruction to target our new block. */
int watch_calls;
{
rtx before = NULL_RTX, after = NULL_RTX, insns, tmp, last;
- basic_block bb;
+ basic_block bb = NULL;
/* Pull the insns off the edge now since the edge might go away. */
insns = e->insns;
if (before)
{
- emit_insns_before (insns, before);
+ emit_insn_before (insns, before);
last = prev_nonnote_insn (before);
}
else
- last = emit_insns_after (insns, after);
+ last = emit_insn_after (insns, after);
if (returnjump_p (last))
{
else if (GET_CODE (last) == JUMP_INSN)
abort ();
- find_sub_basic_blocks (bb);
+ /* Mark the basic block for find_sub_basic_blocks. */
+ bb->aux = &bb->aux;
}
/* Update the CFG for all queued instructions. */
commit_edge_insertions ()
{
basic_block bb;
+ sbitmap blocks;
+ bool changed = false;
#ifdef ENABLE_CHECKING
verify_flow_info ();
{
next = e->succ_next;
if (e->insns)
- commit_one_edge_insertion (e, false);
+ {
+ changed = true;
+ commit_one_edge_insertion (e, false);
+ }
}
}
+
+ if (!changed)
+ return;
+
+ blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (blocks);
+ FOR_EACH_BB (bb)
+ if (bb->aux)
+ {
+ SET_BIT (blocks, bb->index);
+ /* Check for forgotten bb->aux values before commit_edge_insertions
+ call. */
+ if (bb->aux != &bb->aux)
+ abort ();
+ bb->aux = NULL;
+ }
+ find_many_sub_basic_blocks (blocks);
+ sbitmap_free (blocks);
}
\f
/* Update the CFG for all queued instructions, taking special care of inserting
commit_edge_insertions_watch_calls ()
{
basic_block bb;
+ sbitmap blocks;
+ bool changed = false;
#ifdef ENABLE_CHECKING
verify_flow_info ();
{
next = e->succ_next;
if (e->insns)
- commit_one_edge_insertion (e, true);
+ {
+ changed = true;
+ commit_one_edge_insertion (e, true);
+ }
}
}
+
+ if (!changed)
+ return;
+
+ blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (blocks);
+ FOR_EACH_BB (bb)
+ if (bb->aux)
+ {
+ SET_BIT (blocks, bb->index);
+ /* Check for forgotten bb->aux values before commit_edge_insertions
+ call. */
+ if (bb->aux != &bb->aux)
+ abort ();
+ bb->aux = NULL;
+ }
+ find_many_sub_basic_blocks (blocks);
+ sbitmap_free (blocks);
}
\f
/* Print out one basic block with live information at start and end. */
dump_bb (bb, stderr);
}
-void
+basic_block
debug_bb_n (n)
int n;
{
- dump_bb (BASIC_BLOCK (n), stderr);
+ basic_block bb = BASIC_BLOCK (n);
+ dump_bb (bb, stderr);
+ return bb;
}
\f
/* Like print_rtl, but also print out live information for the start of each
(reachability of basic blocks, life information, etc. etc.). */
void
-verify_flow_info ()
+rtl_verify_flow_info ()
{
const int max_uid = get_max_uid ();
const rtx rtx_first = get_insns ();
basic_block *bb_info, *last_visited;
size_t *edge_checksum;
rtx x;
- int i, num_bb_notes, err = 0;
+ int num_bb_notes, err = 0;
basic_block bb, last_bb_seen;
bb_info = (basic_block *) xcalloc (max_uid, sizeof (basic_block));
- last_visited = (basic_block *) xcalloc (n_basic_blocks + 2,
+ last_visited = (basic_block *) xcalloc (last_basic_block + 2,
sizeof (basic_block));
- edge_checksum = (size_t *) xcalloc (n_basic_blocks + 2, sizeof (size_t));
+ edge_checksum = (size_t *) xcalloc (last_basic_block + 2, sizeof (size_t));
/* Check bb chain & numbers. */
last_bb_seen = ENTRY_BLOCK_PTR;
err = 1;
}
- /* For now, also check that we didn't change the order. */
- if (bb != EXIT_BLOCK_PTR && bb->index != last_bb_seen->index + 1)
- {
- error ("Wrong order of blocks %d and %d",
- last_bb_seen->index, bb->index);
- err = 1;
- }
-
- if (bb == EXIT_BLOCK_PTR && last_bb_seen->index != n_basic_blocks - 1)
- {
- error ("Only %d of %d blocks in chain",
- last_bb_seen->index + 1, n_basic_blocks);
- err = 1;
- }
-
last_bb_seen = bb;
}
if (e->flags & EDGE_FALLTHRU)
n_fallthru++;
- if ((e->flags & ~EDGE_DFS_BACK) == 0)
+ if ((e->flags & ~(EDGE_DFS_BACK | EDGE_CAN_FALLTHRU | EDGE_IRREDUCIBLE_LOOP)) == 0)
n_branch++;
if (e->flags & EDGE_ABNORMAL_CALL)
}
for (x = bb->head; x != NEXT_INSN (bb->end); x = NEXT_INSN (x))
- if (basic_block_for_insn && BLOCK_FOR_INSN (x) != bb)
+ if (BLOCK_FOR_INSN (x) != bb)
{
debug_rtx (x);
if (! BLOCK_FOR_INSN (x))
if (x == bb->end)
break;
- if (GET_CODE (x) == JUMP_INSN
- || GET_CODE (x) == CODE_LABEL
- || GET_CODE (x) == BARRIER)
+ if (control_flow_insn_p (x))
{
error ("in basic block %d:", bb->index);
fatal_insn ("flow control insn inside a basic block", x);
edge_checksum[e->dest->index + 2] -= (size_t) e;
}
- for (i = -2; i < n_basic_blocks; ++i)
- if (edge_checksum[i + 2])
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ if (edge_checksum[bb->index + 2])
{
- error ("basic block %i edge lists are corrupted", i);
+ error ("basic block %i edge lists are corrupted", bb->index);
err = 1;
}
{
if (NOTE_INSN_BASIC_BLOCK_P (x))
{
- basic_block bb = NOTE_BASIC_BLOCK (x);
+ bb = NOTE_BASIC_BLOCK (x);
num_bb_notes++;
if (bb != last_bb_seen->next_bb)
if (update_life_p)
{
- blocks = sbitmap_alloc (n_basic_blocks);
+ blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
}
sbitmap_free (blocks);
return purged;
}
+
+/* Same as split_block but update cfg_layout structures. */
+static edge
+cfg_layout_split_block (bb, insnp)
+ basic_block bb;
+ void *insnp;
+{
+ rtx insn = insnp;
+
+ edge fallthru = rtl_split_block (bb, insn);
+
+ alloc_aux_for_block (fallthru->dest, sizeof (struct reorder_block_def));
+ RBI (fallthru->dest)->footer = RBI (fallthru->src)->footer;
+ RBI (fallthru->src)->footer = NULL;
+ return fallthru;
+}
+
+
+/* Redirect Edge to DEST. */
+static bool
+cfg_layout_redirect_edge_and_branch (e, dest)
+ edge e;
+ basic_block dest;
+{
+ basic_block src = e->src;
+ basic_block old_next_bb = src->next_bb;
+ bool ret;
+
+ /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
+ in the case the basic block appears to be in sequence. Avoid this
+ transformation. */
+
+ src->next_bb = NULL;
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ /* Redirect any branch edges unified with the fallthru one. */
+ if (GET_CODE (src->end) == JUMP_INSN
+ && JUMP_LABEL (src->end) == e->dest->head)
+ {
+ if (!redirect_jump (src->end, block_label (dest), 0))
+ abort ();
+ }
+ /* In case we are redirecting fallthru edge to the branch edge
+ of conditional jump, remove it. */
+ if (src->succ->succ_next
+ && !src->succ->succ_next->succ_next)
+ {
+ edge s = e->succ_next ? e->succ_next : src->succ;
+ if (s->dest == dest
+ && any_condjump_p (src->end)
+ && onlyjump_p (src->end))
+ delete_insn (src->end);
+ }
+ redirect_edge_succ_nodup (e, dest);
+
+ ret = true;
+ }
+ else
+ ret = rtl_redirect_edge_and_branch (e, dest);
+
+ /* We don't want simplejumps in the insn stream during cfglayout. */
+ if (simplejump_p (src->end))
+ {
+ delete_insn (src->end);
+ delete_barrier (NEXT_INSN (src->end));
+ src->succ->flags |= EDGE_FALLTHRU;
+ }
+ src->next_bb = old_next_bb;
+
+ return ret;
+}
+
+/* Simple wrapper as we always can redirect fallthru edges. */
+static basic_block
+cfg_layout_redirect_edge_and_branch_force (e, dest)
+ edge e;
+ basic_block dest;
+{
+ if (!cfg_layout_redirect_edge_and_branch (e, dest))
+ abort ();
+ return NULL;
+}
+
+/* Same as flow_delete_block but update cfg_layout structures. */
+static void
+cfg_layout_delete_block (bb)
+ basic_block bb;
+{
+ rtx insn, next, prev = PREV_INSN (bb->head), *to, remaints;
+
+ if (RBI (bb)->header)
+ {
+ next = bb->head;
+ if (prev)
+ NEXT_INSN (prev) = RBI (bb)->header;
+ else
+ set_first_insn (RBI (bb)->header);
+ PREV_INSN (RBI (bb)->header) = prev;
+ insn = RBI (bb)->header;
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ NEXT_INSN (insn) = next;
+ PREV_INSN (next) = insn;
+ }
+ next = NEXT_INSN (bb->end);
+ if (RBI (bb)->footer)
+ {
+ insn = bb->end;
+ NEXT_INSN (insn) = RBI (bb)->footer;
+ PREV_INSN (RBI (bb)->footer) = insn;
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ NEXT_INSN (insn) = next;
+ if (next)
+ PREV_INSN (next) = insn;
+ else
+ set_last_insn (insn);
+ }
+ if (bb->next_bb != EXIT_BLOCK_PTR)
+ to = &RBI(bb->next_bb)->header;
+ else
+ to = &cfg_layout_function_footer;
+ rtl_delete_block (bb);
+
+ if (prev)
+ prev = NEXT_INSN (prev);
+ else
+ prev = get_insns ();
+ if (next)
+ next = PREV_INSN (next);
+ else
+ next = get_last_insn ();
+
+ if (next && NEXT_INSN (next) != prev)
+ {
+ remaints = unlink_insn_chain (prev, next);
+ insn = remaints;
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ NEXT_INSN (insn) = *to;
+ if (*to)
+ PREV_INSN (*to) = insn;
+ *to = remaints;
+ }
+}
+
+/* Implementation of CFG manipulation for linearized RTL. */
+struct cfg_hooks rtl_cfg_hooks = {
+ rtl_verify_flow_info,
+ rtl_redirect_edge_and_branch,
+ rtl_redirect_edge_and_branch_force,
+ rtl_delete_block,
+ rtl_split_block,
+ rtl_split_edge
+};
+
+/* Implementation of CFG manipulation for cfg layout RTL, where
+ basic block connected via fallthru edges does not have to be adjacent.
+ This representation will hopefully become the default one in future
+ version of the compiler. */
+struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
+ NULL, /* verify_flow_info. */
+ cfg_layout_redirect_edge_and_branch,
+ cfg_layout_redirect_edge_and_branch_force,
+ cfg_layout_delete_block,
+ cfg_layout_split_block,
+ NULL /* split_edge. */
+};