/* Control flow graph manipulation code for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GCC.
that are aware of the RTL intermediate language.
Available functionality:
+ - Basic CFG/RTL manipulation API documented in cfghooks.h
- CFG-aware instruction chain manipulation
delete_insn, delete_insn_chain
- - Basic block manipulation
- create_basic_block, rtl_delete_block,rtl_split_block,
- merge_blocks_nomove
+ - Edge splitting and committing to edges
+ insert_insn_on_edge, commit_edge_insertions
+ - CFG updating after insn simplification
+ purge_dead_edges, purge_all_dead_edges
+
+ Functions not supposed for generic use:
- Infrastructure to determine quickly basic block for insn
compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
- Edge redirection with updating and optimizing of insn chain
- block_label, redirect_edge_and_branch,
- redirect_edge_and_branch_force, tidy_fallthru_edge, force_nonfallthru
- - Edge splitting and committing to edges
- split_edge, insert_insn_on_edge, commit_edge_insertions
- - CFG updating after constant propagation
- purge_dead_edges, purge_all_dead_edges */
+ block_label, tidy_fallthru_edge, force_nonfallthru */
\f
#include "config.h"
#include "system.h"
#include "obstack.h"
#include "insn-config.h"
#include "cfglayout.h"
+#include "expr.h"
/* Stubs in case we don't have a return insn. */
#ifndef HAVE_return
rtx label_value_list;
rtx tail_recursion_label_list;
-static int can_delete_note_p PARAMS ((rtx));
-static int can_delete_label_p PARAMS ((rtx));
-static void commit_one_edge_insertion PARAMS ((edge, int));
-static bool try_redirect_by_replacing_jump PARAMS ((edge, basic_block));
-static rtx last_loop_beg_note PARAMS ((rtx));
-static bool back_edge_of_syntactic_loop_p PARAMS ((basic_block, basic_block));
-basic_block force_nonfallthru_and_redirect PARAMS ((edge, basic_block));
-static basic_block rtl_split_edge PARAMS ((edge));
-static int rtl_verify_flow_info PARAMS ((void));
-static edge cfg_layout_split_block PARAMS ((basic_block, void *));
-static bool cfg_layout_redirect_edge_and_branch PARAMS ((edge, basic_block));
-static basic_block cfg_layout_redirect_edge_and_branch_force PARAMS ((edge, basic_block));
-static void cfg_layout_delete_block PARAMS ((basic_block));
-static void rtl_delete_block PARAMS ((basic_block));
-static basic_block rtl_redirect_edge_and_branch_force PARAMS ((edge, basic_block));
-static bool rtl_redirect_edge_and_branch PARAMS ((edge, basic_block));
-static edge rtl_split_block PARAMS ((basic_block, void *));
-static void rtl_dump_bb PARAMS ((basic_block, FILE *));
-static int rtl_verify_flow_info_1 PARAMS ((void));
+static int can_delete_note_p (rtx);
+static int can_delete_label_p (rtx);
+static void commit_one_edge_insertion (edge, int);
+static rtx last_loop_beg_note (rtx);
+static bool back_edge_of_syntactic_loop_p (basic_block, basic_block);
+basic_block force_nonfallthru_and_redirect (edge, basic_block);
+static basic_block rtl_split_edge (edge);
+static int rtl_verify_flow_info (void);
+static edge cfg_layout_split_block (basic_block, void *);
+static bool cfg_layout_redirect_edge_and_branch (edge, basic_block);
+static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
+static void cfg_layout_delete_block (basic_block);
+static void rtl_delete_block (basic_block);
+static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
+static bool rtl_redirect_edge_and_branch (edge, basic_block);
+static edge rtl_split_block (basic_block, void *);
+static void rtl_dump_bb (basic_block, FILE *);
+static int rtl_verify_flow_info_1 (void);
+static void mark_killed_regs (rtx, rtx, void *);
\f
/* Return true if NOTE is not one of the ones that must be kept paired,
so that we may simply delete it. */
static int
-can_delete_note_p (note)
- rtx note;
+can_delete_note_p (rtx note)
{
return (NOTE_LINE_NUMBER (note) == NOTE_INSN_DELETED
|| NOTE_LINE_NUMBER (note) == NOTE_INSN_BASIC_BLOCK
/* True if a given label can be deleted. */
static int
-can_delete_label_p (label)
- rtx label;
+can_delete_label_p (rtx label)
{
return (!LABEL_PRESERVE_P (label)
/* User declared labels must be preserved. */
/* Delete INSN by patching it out. Return the next insn. */
rtx
-delete_insn (insn)
- rtx insn;
+delete_insn (rtx insn)
{
rtx next = NEXT_INSN (insn);
rtx note;
/* Like delete_insn but also purge dead edges from BB. */
rtx
-delete_insn_and_edges (insn)
- rtx insn;
+delete_insn_and_edges (rtx insn)
{
rtx x;
bool purge = false;
that must be paired. */
void
-delete_insn_chain (start, finish)
- rtx start, finish;
+delete_insn_chain (rtx start, rtx finish)
{
rtx next;
/* Like delete_insn but also purge dead edges from BB. */
void
-delete_insn_chain_and_edges (first, last)
- rtx first, last;
+delete_insn_chain_and_edges (rtx first, rtx last)
{
bool purge = false;
AFTER is the basic block we should be put after. */
basic_block
-create_basic_block_structure (head, end, bb_note, after)
- rtx head, end, bb_note;
- basic_block after;
+create_basic_block_structure (rtx head, rtx end, rtx bb_note, basic_block after)
{
basic_block bb;
create new empty basic block before HEAD. Both END and HEAD can be NULL to
create basic block at the end of INSN chain. */
-basic_block
-create_basic_block (head, end, after)
- rtx head, end;
- basic_block after;
+static basic_block
+rtl_create_basic_block (void *headp, void *endp, basic_block after)
{
+ rtx head = headp, end = endp;
basic_block bb;
/* Place the new block just after the end. */
bb->aux = NULL;
return bb;
}
+
+static basic_block
+cfg_layout_create_basic_block (void *head, void *end, basic_block after)
+{
+ basic_block newbb = rtl_create_basic_block (head, end, after);
+
+ cfg_layout_initialize_rbi (newbb);
+ return newbb;
+}
\f
/* Delete the insns in a (non-live) block. We physically delete every
non-deleted-note insn, and update the flow graph appropriately.
to post-process the stream to remove empty blocks, loops, ranges, etc. */
static void
-rtl_delete_block (b)
- basic_block b;
+rtl_delete_block (basic_block b)
{
rtx insn, end, tmp;
/* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
void
-compute_bb_for_insn ()
+compute_bb_for_insn (void)
{
basic_block bb;
/* Release the basic_block_for_insn array. */
void
-free_bb_for_insn ()
+free_bb_for_insn (void)
{
rtx insn;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
/* Update insns block within BB. */
void
-update_bb_for_insn (bb)
- basic_block bb;
+update_bb_for_insn (basic_block bb)
{
rtx insn;
one has a number one greater than the block split. */
static edge
-rtl_split_block (bb, insnp)
- basic_block bb;
- void *insnp;
+rtl_split_block (basic_block bb, void *insnp)
{
basic_block new_bb;
edge new_edge;
return new_edge;
}
+/* Assume that the code of basic block B has been merged into A.
+ Do corresponding CFG updates: redirect edges accordingly etc. */
+static void
+update_cfg_after_block_merging (basic_block a, basic_block b)
+{
+ edge e;
+
+ /* Normally there should only be one successor of A and that is B, but
+ partway though the merge of blocks for conditional_execution we'll
+ be merging a TEST block with THEN and ELSE successors. Free the
+ whole lot of them and hope the caller knows what they're doing. */
+ while (a->succ)
+ remove_edge (a->succ);
+
+ /* Adjust the edges out of B for the new owner. */
+ for (e = b->succ; e; e = e->succ_next)
+ e->src = a;
+ a->succ = b->succ;
+ a->flags |= b->flags;
+
+ /* B hasn't quite yet ceased to exist. Attempt to prevent mishap. */
+ b->pred = b->succ = NULL;
+ a->global_live_at_end = b->global_live_at_end;
+
+ expunge_block (b);
+}
+
/* Blocks A and B are to be merged into a single block A. The insns
- are already contiguous, hence `nomove'. */
+ are already contiguous. */
-void
-merge_blocks_nomove (a, b)
- basic_block a, b;
+static void
+rtl_merge_blocks (basic_block a, basic_block b)
{
rtx b_head = b->head, b_end = b->end, a_end = a->end;
rtx del_first = NULL_RTX, del_last = NULL_RTX;
int b_empty = 0;
- edge e;
/* If there was a CODE_LABEL beginning B, delete it. */
if (GET_CODE (b_head) == CODE_LABEL)
else if (GET_CODE (NEXT_INSN (a_end)) == BARRIER)
del_first = NEXT_INSN (a_end);
- /* Normally there should only be one successor of A and that is B, but
- partway though the merge of blocks for conditional_execution we'll
- be merging a TEST block with THEN and ELSE successors. Free the
- whole lot of them and hope the caller knows what they're doing. */
- while (a->succ)
- remove_edge (a->succ);
-
- /* Adjust the edges out of B for the new owner. */
- for (e = b->succ; e; e = e->succ_next)
- e->src = a;
- a->succ = b->succ;
- a->flags |= b->flags;
-
- /* B hasn't quite yet ceased to exist. Attempt to prevent mishap. */
- b->pred = b->succ = NULL;
- a->global_live_at_end = b->global_live_at_end;
-
- expunge_block (b);
+ update_cfg_after_block_merging (a, b);
/* Delete everything marked above as well as crap that might be
hanging out between the two blocks. */
a->end = a_end;
}
+
+/* Return true when block A and B can be merged. */
+static bool
+rtl_can_merge_blocks (basic_block a,basic_block b)
+{
+ /* There must be exactly one edge in between the blocks. */
+ return (a->succ && !a->succ->succ_next && a->succ->dest == b
+ && !b->pred->pred_next && a != b
+ /* Must be simple edge. */
+ && !(a->succ->flags & EDGE_COMPLEX)
+ && a->next_bb == b
+ && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
+ /* If the jump insn has side effects,
+ we can't kill the edge. */
+ && (GET_CODE (a->end) != JUMP_INSN
+ || (flow2_completed
+ ? simplejump_p (a->end) : onlyjump_p (a->end))));
+}
\f
/* Return the label in the head of basic block BLOCK. Create one if it doesn't
exist. */
rtx
-block_label (block)
- basic_block block;
+block_label (basic_block block)
{
if (block == EXIT_BLOCK_PTR)
return NULL_RTX;
return values are equivalent to redirect_edge_and_branch. */
static bool
-try_redirect_by_replacing_jump (e, target)
- edge e;
- basic_block target;
+try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
{
basic_block src = e->src;
rtx insn = src->end, kill_from;
#endif
/* See if we can create the fallthru edge. */
- if (can_fallthru (src, target))
+ if (in_cfglayout || can_fallthru (src, target))
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Removing jump %i.\n", INSN_UID (insn));
fallthru = 1;
/* Selectively unlink whole insn chain. */
- delete_insn_chain (kill_from, PREV_INSN (target->head));
+ if (in_cfglayout)
+ {
+ rtx insn = src->rbi->footer;
+
+ delete_insn_chain (kill_from, src->end);
+
+ /* Remove barriers but keep jumptables. */
+ while (insn)
+ {
+ if (GET_CODE (insn) == BARRIER)
+ {
+ if (PREV_INSN (insn))
+ NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
+ else
+ src->rbi->footer = NEXT_INSN (insn);
+ if (NEXT_INSN (insn))
+ PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
+ }
+ if (GET_CODE (insn) == CODE_LABEL)
+ break;
+ insn = NEXT_INSN (insn);
+ }
+ }
+ else
+ delete_insn_chain (kill_from, PREV_INSN (target->head));
}
/* If this already is simplejump, redirect it. */
test. */
static rtx
-last_loop_beg_note (insn)
- rtx insn;
+last_loop_beg_note (rtx insn)
{
rtx last = insn;
return last;
}
-/* Attempt to change code to redirect edge E to TARGET. Don't do that on
- expense of adding new instructions or reordering basic blocks.
-
- Function can be also called with edge destination equivalent to the TARGET.
- Then it should try the simplifications and do nothing if none is possible.
-
- Return true if transformation succeeded. We still return false in case E
- already destinated TARGET and we didn't managed to simplify instruction
- stream. */
-
+/* Redirect edge representing branch of (un)conditional jump or tablejump. */
static bool
-rtl_redirect_edge_and_branch (e, target)
- edge e;
- basic_block target;
+redirect_branch_edge (edge e, basic_block target)
{
rtx tmp;
rtx old_label = e->dest->head;
basic_block src = e->src;
rtx insn = src->end;
- if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
- return false;
-
- if (try_redirect_by_replacing_jump (e, target))
- return true;
-
- /* Do this fast path late, as we want above code to simplify for cases
- where called on single edge leaving basic block containing nontrivial
- jump insn. */
- else if (e->dest == target)
- return false;
-
/* We can only redirect non-fallthru edges of jump insn. */
if (e->flags & EDGE_FALLTHRU)
return false;
++LABEL_NUSES (new_label);
}
- /* Handle casesi dispatch insns */
+ /* Handle casesi dispatch insns. */
if ((tmp = single_set (insn)) != NULL
&& SET_DEST (tmp) == pc_rtx
&& GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
if (e->dest != target)
redirect_edge_succ_nodup (e, target);
+ return true;
+}
+
+/* Attempt to change code to redirect edge E to TARGET. Don't do that on
+ expense of adding new instructions or reordering basic blocks.
+
+ Function can be also called with edge destination equivalent to the TARGET.
+ Then it should try the simplifications and do nothing if none is possible.
+
+ Return true if transformation succeeded. We still return false in case E
+ already destinated TARGET and we didn't managed to simplify instruction
+ stream. */
+
+static bool
+rtl_redirect_edge_and_branch (edge e, basic_block target)
+{
+ if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
+ return false;
+
+ if (try_redirect_by_replacing_jump (e, target, false))
+ return true;
+
+ /* Do this fast path late, as we want above code to simplify for cases
+ where called on single edge leaving basic block containing nontrivial
+ jump insn. */
+ else if (e->dest == target)
+ return false;
+ else if (!redirect_branch_edge (e, target))
+ return false;
return true;
}
Used by redirect_edge_and_branch_force. */
basic_block
-force_nonfallthru_and_redirect (e, target)
- edge e;
- basic_block target;
+force_nonfallthru_and_redirect (edge e, basic_block target)
{
basic_block jump_block, new_bb = NULL, src = e->src;
rtx note;
/* In the case the last instruction is conditional jump to the next
instruction, first redirect the jump itself and then continue
- by creating an basic block afterwards to redirect fallthru edge. */
+ by creating a basic block afterwards to redirect fallthru edge. */
if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR
&& any_condjump_p (e->src->end)
/* When called from cfglayout, fallthru edges do not
- neccessarily go to the next block. */
+ necessarily go to the next block. */
&& e->src->next_bb == e->dest
&& JUMP_LABEL (e->src->end) == e->dest->head)
{
/* Irritating special case - fallthru edge to the same block as abnormal
edge.
We can't redirect abnormal edge, but we still can split the fallthru
- one and create separate abnormal edge to original destination.
+ one and create separate abnormal edge to original destination.
This allows bb-reorder to make such edge non-fallthru. */
if (e->dest != target)
abort ();
{
/* Create the new structures. */
+ /* If the old block ended with a tablejump, skip its table
+ by searching forward from there. Otherwise start searching
+ forward from the last instruction of the old block. */
+ if (!tablejump_p (e->src->end, NULL, ¬e))
+ note = e->src->end;
+
/* Position the new block correctly relative to loop notes. */
- note = last_loop_beg_note (e->src->end);
+ note = last_loop_beg_note (note);
note = NEXT_INSN (note);
- /* ... and ADDR_VECs. */
- if (note != NULL
- && GET_CODE (note) == CODE_LABEL
- && NEXT_INSN (note)
- && GET_CODE (NEXT_INSN (note)) == JUMP_INSN
- && (GET_CODE (PATTERN (NEXT_INSN (note))) == ADDR_DIFF_VEC
- || GET_CODE (PATTERN (NEXT_INSN (note))) == ADDR_VEC))
- note = NEXT_INSN (NEXT_INSN (note));
-
jump_block = create_basic_block (note, NULL, e->src);
jump_block->count = e->count;
jump_block->frequency = EDGE_FREQUENCY (e);
Return newly created BB or NULL if none. */
basic_block
-force_nonfallthru (e)
- edge e;
+force_nonfallthru (edge e)
{
return force_nonfallthru_and_redirect (e, e->dest);
}
Abort if conversion is impossible. */
static basic_block
-rtl_redirect_edge_and_branch_force (e, target)
- edge e;
- basic_block target;
+rtl_redirect_edge_and_branch_force (edge e, basic_block target)
{
if (redirect_edge_and_branch (e, target)
|| e->dest == target)
fact true, delete the jump and barriers that are in the way. */
void
-tidy_fallthru_edge (e, b, c)
- edge e;
- basic_block b, c;
+tidy_fallthru_edge (edge e, basic_block b, basic_block c)
{
rtx q;
is how find_basic_blocks created them. */
void
-tidy_fallthru_edges ()
+tidy_fallthru_edges (void)
{
basic_block b, c;
is back edge of syntactic loop. */
static bool
-back_edge_of_syntactic_loop_p (bb1, bb2)
- basic_block bb1, bb2;
+back_edge_of_syntactic_loop_p (basic_block bb1, basic_block bb2)
{
rtx insn;
int count = 0;
The case of a block ending in an unconditional jump to a
block with multiple predecessors is not handled optimally. */
-basic_block
-rtl_split_edge (edge_in)
- edge edge_in;
+static basic_block
+rtl_split_edge (edge edge_in)
{
basic_block bb;
rtx before;
make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
- /* For non-fallthry edges, we must adjust the predecessor's
+ /* For non-fallthru edges, we must adjust the predecessor's
jump instruction to target our new block. */
if ((edge_in->flags & EDGE_FALLTHRU) == 0)
{
CFG until commit_edge_insertions is called. */
void
-insert_insn_on_edge (pattern, e)
- rtx pattern;
- edge e;
+insert_insn_on_edge (rtx pattern, edge e)
{
/* We cannot insert instructions on an abnormal critical edge.
It will be easier to find the culprit if we die now. */
end_sequence ();
}
+/* Called from safe_insert_insn_on_edge through note_stores, marks live
+ registers that are killed by the store. */
+static void
+mark_killed_regs (rtx reg, rtx set ATTRIBUTE_UNUSED, void *data)
+{
+ regset killed = data;
+ int regno, i;
+
+ if (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+ if (!REG_P (reg))
+ return;
+ regno = REGNO (reg);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ SET_REGNO_REG_SET (killed, regno);
+ else
+ {
+ for (i = 0; i < (int) HARD_REGNO_NREGS (regno, GET_MODE (reg)); i++)
+ SET_REGNO_REG_SET (killed, regno + i);
+ }
+}
+
+/* Similar to insert_insn_on_edge, tries to put INSN to edge E. Additionally
+ it checks whether this will not clobber the registers that are live on the
+ edge (i.e. it requires liveness information to be up-to-date) and if there
+ are some, then it tries to save and restore them. Returns true if
+ successful. */
+bool
+safe_insert_insn_on_edge (rtx insn, edge e)
+{
+ rtx x;
+ regset_head killed_head;
+ regset killed = INITIALIZE_REG_SET (killed_head);
+ rtx save_regs = NULL_RTX;
+ int regno, noccmode;
+ enum machine_mode mode;
+
+#ifdef AVOID_CCMODE_COPIES
+ noccmode = true;
+#else
+ noccmode = false;
+#endif
+
+ for (x = insn; x; x = NEXT_INSN (x))
+ if (INSN_P (x))
+ note_stores (PATTERN (x), mark_killed_regs, killed);
+ bitmap_operation (killed, killed, e->dest->global_live_at_start,
+ BITMAP_AND);
+
+ EXECUTE_IF_SET_IN_REG_SET (killed, 0, regno,
+ {
+ mode = regno < FIRST_PSEUDO_REGISTER
+ ? reg_raw_mode[regno]
+ : GET_MODE (regno_reg_rtx[regno]);
+ if (mode == VOIDmode)
+ return false;
+
+ if (noccmode && mode == CCmode)
+ return false;
+
+ save_regs = alloc_EXPR_LIST (0,
+ alloc_EXPR_LIST (0,
+ gen_reg_rtx (mode),
+ gen_raw_REG (mode, regno)),
+ save_regs);
+ });
+
+ if (save_regs)
+ {
+ rtx from, to;
+
+ start_sequence ();
+ for (x = save_regs; x; x = XEXP (x, 1))
+ {
+ from = XEXP (XEXP (x, 0), 1);
+ to = XEXP (XEXP (x, 0), 0);
+ emit_move_insn (to, from);
+ }
+ emit_insn (insn);
+ for (x = save_regs; x; x = XEXP (x, 1))
+ {
+ from = XEXP (XEXP (x, 0), 0);
+ to = XEXP (XEXP (x, 0), 1);
+ emit_move_insn (to, from);
+ }
+ insn = get_insns ();
+ end_sequence ();
+ free_EXPR_LIST_list (&save_regs);
+ }
+ insert_insn_on_edge (insn, e);
+
+ FREE_REG_SET (killed);
+ return true;
+}
+
/* Update the CFG for the instructions queued on edge E. */
static void
-commit_one_edge_insertion (e, watch_calls)
- edge e;
- int watch_calls;
+commit_one_edge_insertion (edge e, int watch_calls)
{
rtx before = NULL_RTX, after = NULL_RTX, insns, tmp, last;
basic_block bb = NULL;
/* Update the CFG for all queued instructions. */
void
-commit_edge_insertions ()
+commit_edge_insertions (void)
{
basic_block bb;
sbitmap blocks;
code on edges between call and storing its return value. */
void
-commit_edge_insertions_watch_calls ()
+commit_edge_insertions_watch_calls (void)
{
basic_block bb;
sbitmap blocks;
/* Print out one basic block with live information at start and end. */
static void
-rtl_dump_bb (bb, outf)
- basic_block bb;
- FILE *outf;
+rtl_dump_bb (basic_block bb, FILE *outf)
{
rtx insn;
rtx last;
basic block. */
void
-print_rtl_with_bb (outf, rtx_first)
- FILE *outf;
- rtx rtx_first;
+print_rtl_with_bb (FILE *outf, rtx rtx_first)
{
rtx tmp_rtx;
{
enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
int max_uid = get_max_uid ();
- basic_block *start
- = (basic_block *) xcalloc (max_uid, sizeof (basic_block));
- basic_block *end
- = (basic_block *) xcalloc (max_uid, sizeof (basic_block));
- enum bb_state *in_bb_p
- = (enum bb_state *) xcalloc (max_uid, sizeof (enum bb_state));
+ basic_block *start = xcalloc (max_uid, sizeof (basic_block));
+ basic_block *end = xcalloc (max_uid, sizeof (basic_block));
+ enum bb_state *in_bb_p = xcalloc (max_uid, sizeof (enum bb_state));
basic_block bb;
}
\f
void
-update_br_prob_note (bb)
- basic_block bb;
+update_br_prob_note (basic_block bb)
{
rtx note;
if (GET_CODE (bb->end) != JUMP_INSN)
In future it can be extended check a lot of other stuff as well
(reachability of basic blocks, life information, etc. etc.). */
static int
-rtl_verify_flow_info_1 ()
+rtl_verify_flow_info_1 (void)
{
const int max_uid = get_max_uid ();
rtx last_head = get_last_insn ();
int err = 0;
basic_block bb, last_bb_seen;
- bb_info = (basic_block *) xcalloc (max_uid, sizeof (basic_block));
+ bb_info = xcalloc (max_uid, sizeof (basic_block));
/* Check bb chain & numbers. */
last_bb_seen = ENTRY_BLOCK_PTR;
FOR_EACH_BB_REVERSE (bb)
{
int n_fallthru = 0, n_eh = 0, n_call = 0, n_abnormal = 0, n_branch = 0;
- edge e;
+ edge e, fallthru = NULL;
rtx note;
if (INSN_P (bb->end)
{
if (INTVAL (XEXP (note, 0)) != BRANCH_EDGE (bb)->probability)
{
- error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
+ error ("verify_flow_info: REG_BR_PROB does not match cfg %wi %i",
INTVAL (XEXP (note, 0)), BRANCH_EDGE (bb)->probability);
err = 1;
}
for (e = bb->succ; e; e = e->succ_next)
{
if (e->flags & EDGE_FALLTHRU)
- n_fallthru++;
+ n_fallthru++, fallthru = e;
- if ((e->flags & ~(EDGE_DFS_BACK | EDGE_CAN_FALLTHRU | EDGE_IRREDUCIBLE_LOOP)) == 0)
+ if ((e->flags & ~(EDGE_DFS_BACK
+ | EDGE_CAN_FALLTHRU
+ | EDGE_IRREDUCIBLE_LOOP
+ | EDGE_LOOP_EXIT)) == 0)
n_branch++;
if (e->flags & EDGE_ABNORMAL_CALL)
err = 1;
}
if (n_branch != 1 && any_condjump_p (bb->end)
- && JUMP_LABEL (bb->end) != bb->next_bb->head)
+ && JUMP_LABEL (bb->end) != fallthru->dest->head)
{
error ("Wrong amount of branch edges after conditional jump %i", bb->index);
err = 1;
- check that all returns are followed by barriers
- check that all fallthru edge points to the adjacent blocks. */
static int
-rtl_verify_flow_info ()
+rtl_verify_flow_info (void)
{
basic_block bb;
int err = rtl_verify_flow_info_1 ();
Return true if any edges are eliminated. */
bool
-purge_dead_edges (bb)
- basic_block bb;
+purge_dead_edges (basic_block bb)
{
edge e, next;
rtx insn = bb->end, note;
true if some edge has been eliminated. */
bool
-purge_all_dead_edges (update_life_p)
- int update_life_p;
+purge_all_dead_edges (int update_life_p)
{
int purged = false;
sbitmap blocks = 0;
/* Same as split_block but update cfg_layout structures. */
static edge
-cfg_layout_split_block (bb, insnp)
- basic_block bb;
- void *insnp;
+cfg_layout_split_block (basic_block bb, void *insnp)
{
rtx insn = insnp;
edge fallthru = rtl_split_block (bb, insn);
- alloc_aux_for_block (fallthru->dest, sizeof (struct reorder_block_def));
- RBI (fallthru->dest)->footer = RBI (fallthru->src)->footer;
- RBI (fallthru->src)->footer = NULL;
+ fallthru->dest->rbi->footer = fallthru->src->rbi->footer;
+ fallthru->src->rbi->footer = NULL;
return fallthru;
}
/* Redirect Edge to DEST. */
static bool
-cfg_layout_redirect_edge_and_branch (e, dest)
- edge e;
- basic_block dest;
+cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
{
basic_block src = e->src;
- basic_block old_next_bb = src->next_bb;
bool ret;
+ if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
+ return false;
+
+ if (e->src != ENTRY_BLOCK_PTR
+ && try_redirect_by_replacing_jump (e, dest, true))
+ return true;
+
+ if (e->dest == dest)
+ return true;
+
+ if (e->src == ENTRY_BLOCK_PTR
+ && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
+ {
+ if (rtl_dump_file)
+ fprintf (rtl_dump_file, "Redirecting entry edge from bb %i to %i\n",
+ e->src->index, dest->index);
+
+ redirect_edge_succ (e, dest);
+ return true;
+ }
+
/* Redirect_edge_and_branch may decide to turn branch into fallthru edge
in the case the basic block appears to be in sequence. Avoid this
transformation. */
- src->next_bb = NULL;
if (e->flags & EDGE_FALLTHRU)
{
/* Redirect any branch edges unified with the fallthru one. */
delete_insn (src->end);
}
redirect_edge_succ_nodup (e, dest);
+ if (rtl_dump_file)
+ fprintf (rtl_dump_file, "Fallthru edge %i->%i redirected to %i\n",
+ e->src->index, e->dest->index, dest->index);
ret = true;
}
else
- ret = rtl_redirect_edge_and_branch (e, dest);
+ ret = redirect_branch_edge (e, dest);
/* We don't want simplejumps in the insn stream during cfglayout. */
if (simplejump_p (src->end))
- {
- delete_insn (src->end);
- delete_barrier (NEXT_INSN (src->end));
- src->succ->flags |= EDGE_FALLTHRU;
- }
- src->next_bb = old_next_bb;
+ abort ();
return ret;
}
/* Simple wrapper as we always can redirect fallthru edges. */
static basic_block
-cfg_layout_redirect_edge_and_branch_force (e, dest)
- edge e;
- basic_block dest;
+cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
{
if (!cfg_layout_redirect_edge_and_branch (e, dest))
abort ();
/* Same as flow_delete_block but update cfg_layout structures. */
static void
-cfg_layout_delete_block (bb)
- basic_block bb;
+cfg_layout_delete_block (basic_block bb)
{
rtx insn, next, prev = PREV_INSN (bb->head), *to, remaints;
- if (RBI (bb)->header)
+ if (bb->rbi->header)
{
next = bb->head;
if (prev)
- NEXT_INSN (prev) = RBI (bb)->header;
+ NEXT_INSN (prev) = bb->rbi->header;
else
- set_first_insn (RBI (bb)->header);
- PREV_INSN (RBI (bb)->header) = prev;
- insn = RBI (bb)->header;
+ set_first_insn (bb->rbi->header);
+ PREV_INSN (bb->rbi->header) = prev;
+ insn = bb->rbi->header;
while (NEXT_INSN (insn))
insn = NEXT_INSN (insn);
NEXT_INSN (insn) = next;
PREV_INSN (next) = insn;
}
next = NEXT_INSN (bb->end);
- if (RBI (bb)->footer)
+ if (bb->rbi->footer)
{
- insn = bb->end;
- NEXT_INSN (insn) = RBI (bb)->footer;
- PREV_INSN (RBI (bb)->footer) = insn;
- while (NEXT_INSN (insn))
- insn = NEXT_INSN (insn);
- NEXT_INSN (insn) = next;
- if (next)
- PREV_INSN (next) = insn;
- else
- set_last_insn (insn);
+ insn = bb->rbi->footer;
+ while (insn)
+ {
+ if (GET_CODE (insn) == BARRIER)
+ {
+ if (PREV_INSN (insn))
+ NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
+ else
+ bb->rbi->footer = NEXT_INSN (insn);
+ if (NEXT_INSN (insn))
+ PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
+ }
+ if (GET_CODE (insn) == CODE_LABEL)
+ break;
+ insn = NEXT_INSN (insn);
+ }
+ if (bb->rbi->footer)
+ {
+ insn = bb->end;
+ NEXT_INSN (insn) = bb->rbi->footer;
+ PREV_INSN (bb->rbi->footer) = insn;
+ while (NEXT_INSN (insn))
+ insn = NEXT_INSN (insn);
+ NEXT_INSN (insn) = next;
+ if (next)
+ PREV_INSN (next) = insn;
+ else
+ set_last_insn (insn);
+ }
}
if (bb->next_bb != EXIT_BLOCK_PTR)
- to = &RBI(bb->next_bb)->header;
+ to = &bb->next_bb->rbi->header;
else
to = &cfg_layout_function_footer;
rtl_delete_block (bb);
if (prev)
prev = NEXT_INSN (prev);
- else
+ else
prev = get_insns ();
if (next)
next = PREV_INSN (next);
- else
+ else
next = get_last_insn ();
if (next && NEXT_INSN (next) != prev)
}
}
+/* return true when blocks A and B can be safely merged. */
+static bool
+cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
+{
+ /* There must be exactly one edge in between the blocks. */
+ return (a->succ && !a->succ->succ_next && a->succ->dest == b
+ && !b->pred->pred_next && a != b
+ /* Must be simple edge. */
+ && !(a->succ->flags & EDGE_COMPLEX)
+ && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
+ /* If the jump insn has side effects,
+ we can't kill the edge. */
+ && (GET_CODE (a->end) != JUMP_INSN
+ || (flow2_completed
+ ? simplejump_p (a->end) : onlyjump_p (a->end))));
+}
+
+/* Merge block A and B, abort when it is not possible. */
+static void
+cfg_layout_merge_blocks (basic_block a, basic_block b)
+{
+#ifdef ENABLE_CHECKING
+ if (!cfg_layout_can_merge_blocks_p (a, b))
+ abort ();
+#endif
+
+ /* If there was a CODE_LABEL beginning B, delete it. */
+ if (GET_CODE (b->head) == CODE_LABEL)
+ delete_insn (b->head);
+
+ /* We should have fallthru edge in a, or we can do dummy redirection to get
+ it cleaned up. */
+ if (GET_CODE (a->end) == JUMP_INSN)
+ redirect_edge_and_branch (a->succ, b);
+ if (GET_CODE (a->end) == JUMP_INSN)
+ abort ();
+
+ /* Possible line number notes should appear in between. */
+ if (b->rbi->header)
+ {
+ rtx first = a->end, last;
+
+ last = emit_insn_after (b->rbi->header, a->end);
+ delete_insn_chain (NEXT_INSN (first), last);
+ b->rbi->header = NULL;
+ }
+
+ /* In the case basic blocks are not adjacent, move them around. */
+ if (NEXT_INSN (a->end) != b->head)
+ {
+ rtx first = unlink_insn_chain (b->head, b->end);
+
+ emit_insn_after (first, a->end);
+ /* Skip possible DELETED_LABEL insn. */
+ if (!NOTE_INSN_BASIC_BLOCK_P (first))
+ first = NEXT_INSN (first);
+ if (!NOTE_INSN_BASIC_BLOCK_P (first))
+ abort ();
+ b->head = NULL;
+ delete_insn (first);
+ }
+ /* Otherwise just re-associate the instructions. */
+ else
+ {
+ rtx insn;
+
+ for (insn = b->head; insn != NEXT_INSN (b->end); insn = NEXT_INSN (insn))
+ set_block_for_insn (insn, a);
+ insn = b->head;
+ /* Skip possible DELETED_LABEL insn. */
+ if (!NOTE_INSN_BASIC_BLOCK_P (insn))
+ insn = NEXT_INSN (insn);
+ if (!NOTE_INSN_BASIC_BLOCK_P (insn))
+ abort ();
+ b->head = NULL;
+ a->end = b->end;
+ delete_insn (insn);
+ }
+
+ /* Possible tablejumps and barriers should appear after the block. */
+ if (b->rbi->footer)
+ {
+ if (!a->rbi->footer)
+ a->rbi->footer = b->rbi->footer;
+ else
+ {
+ rtx last = a->rbi->footer;
+
+ while (NEXT_INSN (last))
+ last = NEXT_INSN (last);
+ NEXT_INSN (last) = b->rbi->footer;
+ PREV_INSN (b->rbi->footer) = last;
+ }
+ b->rbi->footer = NULL;
+ }
+
+ if (rtl_dump_file)
+ fprintf (rtl_dump_file, "Merged blocks %d and %d.\n",
+ a->index, b->index);
+
+ update_cfg_after_block_merging (a, b);
+}
+
+/* Split edge E. */
+static basic_block
+cfg_layout_split_edge (edge e)
+{
+ edge new_e;
+ basic_block new_bb =
+ create_basic_block (e->src != ENTRY_BLOCK_PTR
+ ? NEXT_INSN (e->src-> end) : get_insns (),
+ NULL_RTX, e->src);
+
+ new_bb->count = e->count;
+ new_bb->frequency = EDGE_FREQUENCY (e);
+
+ new_e = make_edge (new_bb, e->dest, EDGE_FALLTHRU);
+ new_e->probability = REG_BR_PROB_BASE;
+ new_e->count = e->count;
+ redirect_edge_and_branch_force (e, new_bb);
+
+ return new_bb;
+}
+
/* Implementation of CFG manipulation for linearized RTL. */
struct cfg_hooks rtl_cfg_hooks = {
rtl_verify_flow_info,
rtl_dump_bb,
+ rtl_create_basic_block,
rtl_redirect_edge_and_branch,
rtl_redirect_edge_and_branch_force,
rtl_delete_block,
rtl_split_block,
+ rtl_can_merge_blocks, /* can_merge_blocks_p */
+ rtl_merge_blocks,
rtl_split_edge
};
This representation will hopefully become the default one in future
version of the compiler. */
struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
- rtl_verify_flow_info_1, /* verify_flow_info. */
+ rtl_verify_flow_info_1,
rtl_dump_bb,
+ cfg_layout_create_basic_block,
cfg_layout_redirect_edge_and_branch,
cfg_layout_redirect_edge_and_branch_force,
cfg_layout_delete_block,
cfg_layout_split_block,
- NULL /* split_edge. */
+ cfg_layout_can_merge_blocks_p,
+ cfg_layout_merge_blocks,
+ cfg_layout_split_edge
};