- CFG-aware instruction chain manipulation
delete_insn, delete_insn_chain
- Basic block manipulation
- create_basic_block, flow_delete_block, split_block,
+ create_basic_block, rtl_delete_block,rtl_split_block,
merge_blocks_nomove
- Infrastructure to determine quickly basic block for insn
compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
- Edge redirection with updating and optimizing of insn chain
block_label, redirect_edge_and_branch,
redirect_edge_and_branch_force, tidy_fallthru_edge, force_nonfallthru
- - Edge splitting and commiting to edges
+ - Edge splitting and committing to edges
split_edge, insert_insn_on_edge, commit_edge_insertions
- Dumping and debugging
print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
\f
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "hard-reg-set.h"
#include "tm_p.h"
#include "obstack.h"
#include "insn-config.h"
+#include "cfglayout.h"
/* Stubs in case we don't have a return insn. */
#ifndef HAVE_return
static bool try_redirect_by_replacing_jump PARAMS ((edge, basic_block));
static rtx last_loop_beg_note PARAMS ((rtx));
static bool back_edge_of_syntactic_loop_p PARAMS ((basic_block, basic_block));
-static basic_block force_nonfallthru_and_redirect PARAMS ((edge, basic_block));
+basic_block force_nonfallthru_and_redirect PARAMS ((edge, basic_block));
+static basic_block rtl_split_edge PARAMS ((edge));
+static void rtl_verify_flow_info PARAMS ((void));
+static edge cfg_layout_split_block PARAMS ((basic_block, void *));
+static bool cfg_layout_redirect_edge_and_branch PARAMS ((edge, basic_block));
+static basic_block cfg_layout_redirect_edge_and_branch_force PARAMS ((edge, basic_block));
+static void cfg_layout_delete_block PARAMS ((basic_block));
+static void rtl_delete_block PARAMS ((basic_block));
+static basic_block rtl_redirect_edge_and_branch_force PARAMS ((edge, basic_block));
+static bool rtl_redirect_edge_and_branch PARAMS ((edge, basic_block));
+static edge rtl_split_block PARAMS ((basic_block, void *));
\f
/* Return true if NOTE is not one of the ones that must be kept paired,
so that we may simply delete it. */
/* ??? Preserving all such notes strikes me as wrong. It would be nice
to post-process the stream to remove empty blocks, loops, ranges, etc. */
-int
+void
flow_delete_block_noexpunge (b)
basic_block b;
{
- int deleted_handler = 0;
rtx insn, end, tmp;
/* If the head of this block is a CODE_LABEL, then it might be the
and remove the associated NOTE_INSN_EH_REGION_BEG and
NOTE_INSN_EH_REGION_END notes. */
- /* Get rid of all NOTE_INSN_PREDICTIONs hanging before the block. */
+ /* Get rid of all NOTE_INSN_PREDICTIONs and NOTE_INSN_LOOP_CONTs
+ hanging before the block. */
for (insn = PREV_INSN (b->head); insn; insn = PREV_INSN (insn))
{
if (GET_CODE (insn) != NOTE)
break;
- if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PREDICTION)
+ if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_PREDICTION
+ || NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_CONT)
NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
}
/* Include any jump table following the basic block. */
end = b->end;
- if (GET_CODE (end) == JUMP_INSN
- && (tmp = JUMP_LABEL (end)) != NULL_RTX
- && (tmp = NEXT_INSN (tmp)) != NULL_RTX
- && GET_CODE (tmp) == JUMP_INSN
- && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
- || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
+ if (tablejump_p (end, NULL, &tmp))
end = tmp;
/* Include any barrier that may follow the basic block. */
b->pred = NULL;
b->succ = NULL;
-
- return deleted_handler;
}
-int
-flow_delete_block (b)
+static void
+rtl_delete_block (b)
basic_block b;
{
- int deleted_handler = flow_delete_block_noexpunge (b);
+ flow_delete_block_noexpunge (b);
/* Remove the basic block from the array. */
expunge_block (b);
-
- return deleted_handler;
}
\f
/* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
for (insn = bb->head; ; insn = NEXT_INSN (insn))
{
- set_block_for_insn (insn, bb);
+ if (GET_CODE (insn) != BARRIER)
+ set_block_for_insn (insn, bb);
if (insn == bb->end)
break;
}
this function renumbers all the basic blocks so that the new
one has a number one greater than the block split. */
-edge
-split_block (bb, insn)
+static edge
+rtl_split_block (bb, insnp)
basic_block bb;
- rtx insn;
+ void *insnp;
{
basic_block new_bb;
edge new_edge;
edge e;
+ rtx insn = insnp;
/* There is no point splitting the block after its end. */
if (bb->end == insn)
basic_block src = e->src;
rtx insn = src->end, kill_from;
edge tmp;
- rtx set, table;
+ rtx set;
int fallthru = 0;
/* Verify that all targets will be TARGET. */
if (tmp || !onlyjump_p (insn))
return false;
- if (reload_completed && JUMP_LABEL (insn)
- && (table = NEXT_INSN (JUMP_LABEL (insn))) != NULL_RTX
- && GET_CODE (table) == JUMP_INSN
- && (GET_CODE (PATTERN (table)) == ADDR_VEC
- || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
+ if ((!optimize || flow2_completed) && tablejump_p (insn, NULL, NULL))
return false;
/* Avoid removing branch with side effects. */
else
{
rtx target_label = block_label (target);
- rtx barrier, tmp;
+ rtx barrier, label, table;
emit_jump_insn_after (gen_jump (target_label), insn);
JUMP_LABEL (src->end) = target_label;
/* Recognize a tablejump that we are converting to a
simple jump and remove its associated CODE_LABEL
and ADDR_VEC or ADDR_DIFF_VEC. */
- if ((tmp = JUMP_LABEL (insn)) != NULL_RTX
- && (tmp = NEXT_INSN (tmp)) != NULL_RTX
- && GET_CODE (tmp) == JUMP_INSN
- && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
- || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
- {
- delete_insn_chain (JUMP_LABEL (insn), tmp);
- }
+ if (tablejump_p (insn, &label, &table))
+ delete_insn_chain (label, table);
barrier = next_nonnote_insn (src->end);
if (!barrier || GET_CODE (barrier) != BARRIER)
/* Return last loop_beg note appearing after INSN, before start of next
basic block. Return INSN if there are no such notes.
- When emitting jump to redirect an fallthru edge, it should always appear
+ When emitting jump to redirect a fallthru edge, it should always appear
after the LOOP_BEG notes, as loop optimizer expect loop to either start by
fallthru edge or jump following the LOOP_BEG note jumping to the loop exit
test. */
already destinated TARGET and we didn't managed to simplify instruction
stream. */
-bool
-redirect_edge_and_branch (e, target)
+static bool
+rtl_redirect_edge_and_branch (e, target)
edge e;
basic_block target;
{
return false;
/* Recognize a tablejump and adjust all matching cases. */
- if ((tmp = JUMP_LABEL (insn)) != NULL_RTX
- && (tmp = NEXT_INSN (tmp)) != NULL_RTX
- && GET_CODE (tmp) == JUMP_INSN
- && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
- || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
+ if (tablejump_p (insn, NULL, &tmp))
{
rtvec vec;
int j;
/* Like force_nonfallthru below, but additionally performs redirection
Used by redirect_edge_and_branch_force. */
-static basic_block
+basic_block
force_nonfallthru_and_redirect (e, target)
edge e;
basic_block target;
{
- basic_block jump_block, new_bb = NULL;
+ basic_block jump_block, new_bb = NULL, src = e->src;
rtx note;
edge new_edge;
+ int abnormal_edge_flags = 0;
+
+ /* In the case the last instruction is conditional jump to the next
+ instruction, first redirect the jump itself and then continue
+ by creating an basic block afterwards to redirect fallthru edge. */
+ if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR
+ && any_condjump_p (e->src->end)
+ /* When called from cfglayout, fallthru edges do not
+ neccessarily go to the next block. */
+ && e->src->next_bb == e->dest
+ && JUMP_LABEL (e->src->end) == e->dest->head)
+ {
+ rtx note;
+ edge b = unchecked_make_edge (e->src, target, 0);
+
+ if (!redirect_jump (e->src->end, block_label (target), 0))
+ abort ();
+ note = find_reg_note (e->src->end, REG_BR_PROB, NULL_RTX);
+ if (note)
+ {
+ int prob = INTVAL (XEXP (note, 0));
+
+ b->probability = prob;
+ b->count = e->count * prob / REG_BR_PROB_BASE;
+ e->probability -= e->probability;
+ e->count -= b->count;
+ if (e->probability < 0)
+ e->probability = 0;
+ if (e->count < 0)
+ e->count = 0;
+ }
+ }
if (e->flags & EDGE_ABNORMAL)
- abort ();
+ {
+ /* Irritating special case - fallthru edge to the same block as abnormal
+ edge.
+ We can't redirect abnormal edge, but we still can split the fallthru
+ one and create separate abnormal edge to original destination.
+ This allows bb-reorder to make such edge non-fallthru. */
+ if (e->dest != target)
+ abort ();
+ abnormal_edge_flags = e->flags & ~(EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
+ e->flags &= EDGE_FALLTHRU | EDGE_CAN_FALLTHRU;
+ }
else if (!(e->flags & EDGE_FALLTHRU))
abort ();
else if (e->src == ENTRY_BLOCK_PTR)
make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
}
- if (e->src->succ->succ_next)
+ if (e->src->succ->succ_next || abnormal_edge_flags)
{
/* Create the new structures. */
emit_barrier_after (jump_block->end);
redirect_edge_succ_nodup (e, target);
+ if (abnormal_edge_flags)
+ make_edge (src, target, abnormal_edge_flags);
+
return new_bb;
}
basic block. Return new basic block if created, NULL otherwise.
Abort if conversion is impossible. */
-basic_block
-redirect_edge_and_branch_force (e, target)
+static basic_block
+rtl_redirect_edge_and_branch_force (e, target)
edge e;
basic_block target;
{
block with multiple predecessors is not handled optimally. */
basic_block
-split_edge (edge_in)
+rtl_split_edge (edge_in)
edge edge_in;
{
basic_block bb;
- edge edge_out;
rtx before;
/* Abnormal edges cannot be split. */
edge_in->dest->global_live_at_start);
}
- edge_out = make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
+ make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
/* For non-fallthry edges, we must adjust the predecessor's
jump instruction to target our new block. */
else if (GET_CODE (last) == JUMP_INSN)
abort ();
- find_sub_basic_blocks (bb);
+ /* Mark the basic block for find_sub_basic_blocks. */
+ bb->aux = &bb->aux;
}
/* Update the CFG for all queued instructions. */
commit_edge_insertions ()
{
basic_block bb;
+ sbitmap blocks;
+ bool changed = false;
#ifdef ENABLE_CHECKING
verify_flow_info ();
{
next = e->succ_next;
if (e->insns)
- commit_one_edge_insertion (e, false);
+ {
+ changed = true;
+ commit_one_edge_insertion (e, false);
+ }
}
}
+
+ if (!changed)
+ return;
+
+ blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (blocks);
+ FOR_EACH_BB (bb)
+ if (bb->aux)
+ {
+ SET_BIT (blocks, bb->index);
+ /* Check for forgotten bb->aux values before commit_edge_insertions
+ call. */
+ if (bb->aux != &bb->aux)
+ abort ();
+ bb->aux = NULL;
+ }
+ find_many_sub_basic_blocks (blocks);
+ sbitmap_free (blocks);
}
\f
/* Update the CFG for all queued instructions, taking special care of inserting
commit_edge_insertions_watch_calls ()
{
basic_block bb;
+ sbitmap blocks;
+ bool changed = false;
#ifdef ENABLE_CHECKING
verify_flow_info ();
{
next = e->succ_next;
if (e->insns)
- commit_one_edge_insertion (e, true);
+ {
+ changed = true;
+ commit_one_edge_insertion (e, true);
+ }
}
}
+
+ if (!changed)
+ return;
+
+ blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (blocks);
+ FOR_EACH_BB (bb)
+ if (bb->aux)
+ {
+ SET_BIT (blocks, bb->index);
+ /* Check for forgotten bb->aux values before commit_edge_insertions
+ call. */
+ if (bb->aux != &bb->aux)
+ abort ();
+ bb->aux = NULL;
+ }
+ find_many_sub_basic_blocks (blocks);
+ sbitmap_free (blocks);
}
\f
/* Print out one basic block with live information at start and end. */
dump_bb (bb, stderr);
}
-void
+basic_block
debug_bb_n (n)
int n;
{
- dump_bb (BASIC_BLOCK (n), stderr);
+ basic_block bb = BASIC_BLOCK (n);
+ dump_bb (bb, stderr);
+ return bb;
}
\f
/* Like print_rtl, but also print out live information for the start of each
(reachability of basic blocks, life information, etc. etc.). */
void
-verify_flow_info ()
+rtl_verify_flow_info ()
{
const int max_uid = get_max_uid ();
const rtx rtx_first = get_insns ();
if (e->flags & EDGE_FALLTHRU)
n_fallthru++;
- if ((e->flags & ~EDGE_DFS_BACK) == 0)
+ if ((e->flags & ~(EDGE_DFS_BACK | EDGE_CAN_FALLTHRU | EDGE_IRREDUCIBLE_LOOP)) == 0)
n_branch++;
if (e->flags & EDGE_ABNORMAL_CALL)
if (x == bb->end)
break;
- if (GET_CODE (x) == JUMP_INSN
- || GET_CODE (x) == CODE_LABEL
- || GET_CODE (x) == BARRIER)
+ if (control_flow_insn_p (x))
{
error ("in basic block %d:", bb->index);
fatal_insn ("flow control insn inside a basic block", x);
sbitmap_free (blocks);
return purged;
}
+
+/* Same as split_block but update cfg_layout structures. */
+static edge
+cfg_layout_split_block (bb, insnp)
+ basic_block bb;
+ void *insnp;
+{
+ rtx insn = insnp;
+
+ edge fallthru = rtl_split_block (bb, insn);
+
+ alloc_aux_for_block (fallthru->dest, sizeof (struct reorder_block_def));
+ RBI (fallthru->dest)->footer = RBI (fallthru->src)->footer;
+ RBI (fallthru->src)->footer = NULL;
+ return fallthru;
+}
+
+
+/* Redirect Edge to DEST. */
+static bool
+cfg_layout_redirect_edge_and_branch (e, dest)
+ edge e;
+ basic_block dest;
+{
+ basic_block src = e->src;
+ basic_block old_next_bb = src->next_bb;
+ bool ret;
+
+ /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
+ in the case the basic block appears to be in sequence. Avoid this
+ transformation. */
+
+ src->next_bb = NULL;
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ /* Redirect any branch edges unified with the fallthru one. */
+ if (GET_CODE (src->end) == JUMP_INSN
+ && JUMP_LABEL (src->end) == e->dest->head)
+ {
+ if (!redirect_jump (src->end, block_label (dest), 0))
+ abort ();
+ }
+ /* In case we are redirecting fallthru edge to the branch edge
+ of conditional jump, remove it. */
+ if (src->succ->succ_next
+ && !src->succ->succ_next->succ_next)
+ {
+ edge s = e->succ_next ? e->succ_next : src->succ;
+ if (s->dest == dest
+ && any_condjump_p (src->end)
+ && onlyjump_p (src->end))
+ delete_insn (src->end);
+ }
+ redirect_edge_succ_nodup (e, dest);
+
+ ret = true;
+ }
+ else
+ ret = rtl_redirect_edge_and_branch (e, dest);
+
+ /* We don't want simplejumps in the insn stream during cfglayout. */
+ if (simplejump_p (src->end))
+ {
+ delete_insn (src->end);
+ delete_barrier (NEXT_INSN (src->end));
+ src->succ->flags |= EDGE_FALLTHRU;
+ }
+ src->next_bb = old_next_bb;
+
+ return ret;
+}
+
+/* Simple wrapper as we always can redirect fallthru edges. */
+static basic_block
+cfg_layout_redirect_edge_and_branch_force (e, dest)
+ edge e;
+ basic_block dest;
+{
+ if (!cfg_layout_redirect_edge_and_branch (e, dest))
+ abort ();
+ return NULL;
+}
+
+/* Same as flow_delete_block but update cfg_layout structures. */
+static void
+cfg_layout_delete_block (bb)
+ basic_block bb;
+{
+ rtx insn, next, prev = PREV_INSN (bb->head), *to, remaints;
+
+ if (RBI (bb)->header)
+ {
+ next = bb->head;
+ if (prev)
+ NEXT_INSN (prev) = RBI (bb)->header;
+ else
+ set_first_insn (RBI (bb)->header);
+ PREV_INSN (RBI (bb)->header) = prev;
+ insn = RBI (bb)->header;
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ NEXT_INSN (insn) = next;
+ PREV_INSN (next) = insn;
+ }
+ next = NEXT_INSN (bb->end);
+ if (RBI (bb)->footer)
+ {
+ insn = bb->end;
+ NEXT_INSN (insn) = RBI (bb)->footer;
+ PREV_INSN (RBI (bb)->footer) = insn;
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ NEXT_INSN (insn) = next;
+ if (next)
+ PREV_INSN (next) = insn;
+ else
+ set_last_insn (insn);
+ }
+ if (bb->next_bb != EXIT_BLOCK_PTR)
+ to = &RBI(bb->next_bb)->header;
+ else
+ to = &cfg_layout_function_footer;
+ rtl_delete_block (bb);
+
+ if (prev)
+ prev = NEXT_INSN (prev);
+ else
+ prev = get_insns ();
+ if (next)
+ next = PREV_INSN (next);
+ else
+ next = get_last_insn ();
+
+ if (next && NEXT_INSN (next) != prev)
+ {
+ remaints = unlink_insn_chain (prev, next);
+ insn = remaints;
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ NEXT_INSN (insn) = *to;
+ if (*to)
+ PREV_INSN (*to) = insn;
+ *to = remaints;
+ }
+}
+
+/* Implementation of CFG manipulation for linearized RTL. */
+struct cfg_hooks rtl_cfg_hooks = {
+ rtl_verify_flow_info,
+ rtl_redirect_edge_and_branch,
+ rtl_redirect_edge_and_branch_force,
+ rtl_delete_block,
+ rtl_split_block,
+ rtl_split_edge
+};
+
+/* Implementation of CFG manipulation for cfg layout RTL, where
+ basic block connected via fallthru edges does not have to be adjacent.
+ This representation will hopefully become the default one in future
+ version of the compiler. */
+struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
+ NULL, /* verify_flow_info. */
+ cfg_layout_redirect_edge_and_branch,
+ cfg_layout_redirect_edge_and_branch_force,
+ cfg_layout_delete_block,
+ cfg_layout_split_block,
+ NULL /* split_edge. */
+};