#include "cfglayout.h"
#include "expr.h"
-/* Stubs in case we don't have a return insn. */
-#ifndef HAVE_return
-#define HAVE_return 0
-#define gen_return() NULL_RTX
-#endif
/* The labels mentioned in non-jump rtl. Valid during find_basic_blocks. */
/* ??? Should probably be using LABEL_NUSES instead. It would take a
bit of surgery to be able to use or co-opt the routines in jump. */
rtx label_value_list;
-rtx tail_recursion_label_list;
static int can_delete_note_p (rtx);
static int can_delete_label_p (rtx);
static bool rtl_move_block_after (basic_block, basic_block);
static int rtl_verify_flow_info (void);
static basic_block cfg_layout_split_block (basic_block, void *);
-static bool cfg_layout_redirect_edge_and_branch (edge, basic_block);
+static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
static void cfg_layout_delete_block (basic_block);
static void rtl_delete_block (basic_block);
static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
-static bool rtl_redirect_edge_and_branch (edge, basic_block);
+static edge rtl_redirect_edge_and_branch (edge, basic_block);
static basic_block rtl_split_block (basic_block, void *);
static void rtl_dump_bb (basic_block, FILE *, int);
static int rtl_verify_flow_info_1 (void);
{
return (NOTE_LINE_NUMBER (note) == NOTE_INSN_DELETED
|| NOTE_LINE_NUMBER (note) == NOTE_INSN_BASIC_BLOCK
+ || NOTE_LINE_NUMBER (note) == NOTE_INSN_UNLIKELY_EXECUTED_CODE
|| NOTE_LINE_NUMBER (note) == NOTE_INSN_PREDICTION);
}
really_delete = false;
PUT_CODE (insn, NOTE);
NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED_LABEL;
- NOTE_SOURCE_FILE (insn) = name;
+ NOTE_DELETED_LABEL_NAME (insn) = name;
}
remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels);
basic_block bb;
if (bb_note
- && ! RTX_INTEGRATED_P (bb_note)
&& (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
&& bb->aux == NULL)
{
link_block (bb, after);
BASIC_BLOCK (bb->index) = bb;
update_bb_for_insn (bb);
+ bb->partition = UNPARTITIONED;
/* Tag the block so that we know it has been used when considering
other basic block notes. */
rtx head = headp, end = endp;
basic_block bb;
- /* Place the new block just after the end. */
- VARRAY_GROW (basic_block_info, last_basic_block + 1);
+ /* Grow the basic block array if needed. */
+ if ((size_t) last_basic_block >= VARRAY_SIZE (basic_block_info))
+ {
+ size_t new_size = last_basic_block + (last_basic_block + 3) / 4;
+ VARRAY_GROW (basic_block_info, new_size);
+ }
n_basic_blocks++;
{
basic_block newbb = rtl_create_basic_block (head, end, after);
- cfg_layout_initialize_rbi (newbb);
+ initialize_bb_rbi (newbb);
return newbb;
}
\f
insn = BB_HEAD (b);
- never_reached_warning (insn, BB_END (b));
-
if (GET_CODE (insn) == CODE_LABEL)
maybe_remove_eh_handler (insn);
BLOCK_FOR_INSN (insn) = NULL;
}
+/* Return RTX to emit after when we want to emit code on the entry of function. */
+rtx
+entry_of_function (void)
+{
+ return (n_basic_blocks ? BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ());
+}
+
/* Update insns block within BB. */
void
static bool
rtl_can_merge_blocks (basic_block a,basic_block b)
{
+ bool partitions_ok = true;
+
+ /* If we are partitioning hot/cold basic blocks, we don't want to
+ mess up unconditional or indirect jumps that cross between hot
+ and cold sections. */
+
+ if (flag_reorder_blocks_and_partition
+ && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
+ || find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
+ || a->partition != b->partition))
+ partitions_ok = false;
+
/* There must be exactly one edge in between the blocks. */
return (a->succ && !a->succ->succ_next && a->succ->dest == b
&& !b->pred->pred_next && a != b
/* Must be simple edge. */
&& !(a->succ->flags & EDGE_COMPLEX)
+ && partitions_ok
&& a->next_bb == b
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
apply only if all edges now point to the same block. The parameters and
return values are equivalent to redirect_edge_and_branch. */
-bool
+edge
try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
{
basic_block src = e->src;
rtx set;
int fallthru = 0;
+
+ /* If we are partitioning hot/cold basic blocks, we don't want to
+ mess up unconditional or indirect jumps that cross between hot
+ and cold sections. */
+
+ if (flag_reorder_blocks_and_partition
+ && find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX))
+ return NULL;
+
/* Verify that all targets will be TARGET. */
for (tmp = src->succ; tmp; tmp = tmp->succ_next)
if (tmp->dest != target && tmp != e)
break;
if (tmp || !onlyjump_p (insn))
- return false;
+ return NULL;
if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
- return false;
+ return NULL;
/* Avoid removing branch with side effects. */
set = single_set (insn);
if (!set || side_effects_p (set))
- return false;
+ return NULL;
/* In case we zap a conditional jump, we'll need to kill
the cc0 setter too. */
/* See if we can create the fallthru edge. */
if (in_cfglayout || can_fallthru (src, target))
{
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Removing jump %i.\n", INSN_UID (insn));
+ if (dump_file)
+ fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
fallthru = 1;
/* Selectively unlink whole insn chain. */
else if (simplejump_p (insn))
{
if (e->dest == target)
- return false;
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Redirecting jump %i from %i to %i.\n",
+ return NULL;
+ if (dump_file)
+ fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
INSN_UID (insn), e->dest->index, target->index);
if (!redirect_jump (insn, block_label (target), 0))
{
if (target == EXIT_BLOCK_PTR)
- return false;
+ return NULL;
abort ();
}
}
/* Cannot do anything for target exit block. */
else if (target == EXIT_BLOCK_PTR)
- return false;
+ return NULL;
/* Or replace possibly complicated jump insn by simple jump insn. */
else
emit_jump_insn_after (gen_jump (target_label), insn);
JUMP_LABEL (BB_END (src)) = target_label;
LABEL_NUSES (target_label)++;
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Replacing insn %i by jump %i\n",
+ if (dump_file)
+ fprintf (dump_file, "Replacing insn %i by jump %i\n",
INSN_UID (insn), INSN_UID (BB_END (src)));
if (e->dest != target)
redirect_edge_succ (e, target);
- return true;
+ return e;
}
/* Return last loop_beg note appearing after INSN, before start of next
return last;
}
-/* Redirect edge representing branch of (un)conditional jump or tablejump. */
-static bool
+/* Redirect edge representing branch of (un)conditional jump or tablejump,
+ NULL on failure */
+static edge
redirect_branch_edge (edge e, basic_block target)
{
rtx tmp;
/* We can only redirect non-fallthru edges of jump insn. */
if (e->flags & EDGE_FALLTHRU)
- return false;
+ return NULL;
else if (GET_CODE (insn) != JUMP_INSN)
- return false;
+ return NULL;
/* Recognize a tablejump and adjust all matching cases. */
if (tablejump_p (insn, NULL, &tmp))
rtx new_label = block_label (target);
if (target == EXIT_BLOCK_PTR)
- return false;
+ return NULL;
if (GET_CODE (PATTERN (tmp)) == ADDR_VEC)
vec = XVEC (PATTERN (tmp), 0);
else
if (computed_jump_p (insn)
/* A return instruction can't be redirected. */
|| returnjump_p (insn))
- return false;
+ return NULL;
/* If the insn doesn't go where we think, we're confused. */
if (JUMP_LABEL (insn) != old_label)
if (!redirect_jump (insn, block_label (target), 0))
{
if (target == EXIT_BLOCK_PTR)
- return false;
+ return NULL;
abort ();
}
}
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Edge %i->%i redirected to %i\n",
+ if (dump_file)
+ fprintf (dump_file, "Edge %i->%i redirected to %i\n",
e->src->index, e->dest->index, target->index);
if (e->dest != target)
- redirect_edge_succ_nodup (e, target);
- return true;
+ e = redirect_edge_succ_nodup (e, target);
+ return e;
}
/* Attempt to change code to redirect edge E to TARGET. Don't do that on
Function can be also called with edge destination equivalent to the TARGET.
Then it should try the simplifications and do nothing if none is possible.
- Return true if transformation succeeded. We still return false in case E
- already destinated TARGET and we didn't managed to simplify instruction
- stream. */
+ Return edge representing the branch if transformation succeeded. Return NULL
+ on failure.
+ We still return NULL in case E already destinated TARGET and we didn't
+ managed to simplify instruction stream. */
-static bool
+static edge
rtl_redirect_edge_and_branch (edge e, basic_block target)
{
+ edge ret;
+ basic_block src = e->src;
+
if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
- return false;
+ return NULL;
if (e->dest == target)
- return true;
+ return e;
- if (try_redirect_by_replacing_jump (e, target, false))
- return true;
+ if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
+ {
+ src->flags |= BB_DIRTY;
+ return ret;
+ }
- if (!redirect_branch_edge (e, target))
- return false;
+ ret = redirect_branch_edge (e, target);
+ if (!ret)
+ return NULL;
- return true;
+ src->flags |= BB_DIRTY;
+ return ret;
}
/* Like force_nonfallthru below, but additionally performs redirection
target->global_live_at_start);
}
+ /* Make sure new block ends up in correct hot/cold section. */
+
+ jump_block->partition = e->src->partition;
+ if (flag_reorder_blocks_and_partition)
+ {
+ if (e->src->partition == COLD_PARTITION)
+ {
+ rtx bb_note, new_note;
+ for (bb_note = BB_HEAD (jump_block);
+ bb_note && bb_note != NEXT_INSN (BB_END (jump_block));
+ bb_note = NEXT_INSN (bb_note))
+ if (GET_CODE (bb_note) == NOTE
+ && NOTE_LINE_NUMBER (bb_note) == NOTE_INSN_BASIC_BLOCK)
+ break;
+ new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE,
+ bb_note);
+ NOTE_BASIC_BLOCK (new_note) = jump_block;
+ jump_block->partition = COLD_PARTITION;
+ }
+ if (GET_CODE (BB_END (jump_block)) == JUMP_INSN
+ && !any_condjump_p (BB_END (jump_block))
+ && jump_block->succ->crossing_edge )
+ REG_NOTES (BB_END (jump_block)) = gen_rtx_EXPR_LIST
+ (REG_CROSSING_JUMP, NULL_RTX,
+ REG_NOTES (BB_END (jump_block)));
+ }
+
/* Wire edge in. */
new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
new_edge->probability = e->probability;
e->flags &= ~EDGE_FALLTHRU;
if (target == EXIT_BLOCK_PTR)
{
- if (HAVE_return)
+#ifdef HAVE_return
emit_jump_insn_after (gen_return (), BB_END (jump_block));
- else
+#else
abort ();
+#endif
}
else
{
rtx q;
basic_block b = e->src, c = b->next_bb;
- /* If the jump insn has side effects, we can't tidy the edge. */
- if (GET_CODE (BB_END (b)) == JUMP_INSN
- && !onlyjump_p (BB_END (b)))
- return;
-
/* ??? In a late-running flow pass, other folks may have deleted basic
blocks by nopping out blocks, leaving multiple BARRIERs between here
and the target label. They ought to be chastized and fixed.
else
before = NULL_RTX;
- bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
+ /* If this is a fall through edge to the exit block, the blocks might be
+ not adjacent, and the right place is the after the source. */
+ if (edge_in->flags & EDGE_FALLTHRU && edge_in->dest == EXIT_BLOCK_PTR)
+ {
+ before = NEXT_INSN (BB_END (edge_in->src));
+ if (before
+ && GET_CODE (before) == NOTE
+ && NOTE_LINE_NUMBER (before) == NOTE_INSN_LOOP_END)
+ before = NEXT_INSN (before);
+ bb = create_basic_block (before, NULL, edge_in->src);
+ }
+ else
+ bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
/* ??? This info is likely going to be out of date very soon. */
if (edge_in->dest->global_live_at_start)
if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
abort ();
- if (e->insns == NULL_RTX)
+ if (e->insns.r == NULL_RTX)
start_sequence ();
else
- push_to_sequence (e->insns);
+ push_to_sequence (e->insns.r);
emit_insn (pattern);
- e->insns = get_insns ();
+ e->insns.r = get_insns ();
end_sequence ();
}
SET_REGNO_REG_SET (killed, regno);
else
{
- for (i = 0; i < (int) HARD_REGNO_NREGS (regno, GET_MODE (reg)); i++)
+ for (i = 0; i < (int) hard_regno_nregs[regno][GET_MODE (reg)]; i++)
SET_REGNO_REG_SET (killed, regno + i);
}
}
basic_block bb = NULL;
/* Pull the insns off the edge now since the edge might go away. */
- insns = e->insns;
- e->insns = NULL_RTX;
+ insns = e->insns.r;
+ e->insns.r = NULL_RTX;
/* Special case -- avoid inserting code between call and storing
its return value. */
tmp = NEXT_INSN (tmp);
if (NOTE_INSN_BASIC_BLOCK_P (tmp))
tmp = NEXT_INSN (tmp);
+ if (tmp
+ && GET_CODE (tmp) == NOTE
+ && NOTE_LINE_NUMBER (tmp) == NOTE_INSN_UNLIKELY_EXECUTED_CODE)
+ tmp = NEXT_INSN (tmp);
if (tmp == BB_HEAD (bb))
before = tmp;
else if (tmp)
{
bb = split_edge (e);
after = BB_END (bb);
+
+ /* If we are partitioning hot/cold basic blocks, we must make sure
+ that the new basic block ends up in the correct section. */
+
+ bb->partition = e->src->partition;
+ if (flag_reorder_blocks_and_partition
+ && e->src != ENTRY_BLOCK_PTR
+ && e->src->partition == COLD_PARTITION)
+ {
+ rtx bb_note, new_note, cur_insn;
+
+ bb_note = NULL_RTX;
+ for (cur_insn = BB_HEAD (bb); cur_insn != NEXT_INSN (BB_END (bb));
+ cur_insn = NEXT_INSN (cur_insn))
+ if (GET_CODE (cur_insn) == NOTE
+ && NOTE_LINE_NUMBER (cur_insn) == NOTE_INSN_BASIC_BLOCK)
+ {
+ bb_note = cur_insn;
+ break;
+ }
+
+ new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE,
+ bb_note);
+ NOTE_BASIC_BLOCK (new_note) = bb;
+ if (GET_CODE (BB_END (bb)) == JUMP_INSN
+ && !any_condjump_p (BB_END (bb))
+ && bb->succ->crossing_edge )
+ REG_NOTES (BB_END (bb)) = gen_rtx_EXPR_LIST
+ (REG_CROSSING_JUMP, NULL_RTX, REG_NOTES (BB_END (bb)));
+ if (after == bb_note)
+ after = new_note;
+ }
}
}
for (e = bb->succ; e; e = next)
{
next = e->succ_next;
- if (e->insns)
+ if (e->insns.r)
{
- changed = true;
- commit_one_edge_insertion (e, false);
+ changed = true;
+ commit_one_edge_insertion (e, false);
}
}
}
for (e = bb->succ; e; e = next)
{
next = e->succ_next;
- if (e->insns)
+ if (e->insns.r)
{
changed = true;
commit_one_edge_insertion (e, true);
rtx last;
char *s_indent;
- s_indent = (char *) alloca ((size_t) indent + 1);
- memset ((void *) s_indent, ' ', (size_t) indent);
+ s_indent = alloca ((size_t) indent + 1);
+ memset (s_indent, ' ', (size_t) indent);
s_indent[indent] = '\0';
fprintf (outf, ";;%s Registers live at start: ", s_indent);
- tails of basic blocks (ensure that boundary is necessary)
- scans body of the basic block for JUMP_INSN, CODE_LABEL
and NOTE_INSN_BASIC_BLOCK
+ - verify that no fall_thru edge crosses hot/cold partition boundaries
In future it can be extended check a lot of other stuff as well
(reachability of basic blocks, life information, etc. etc.). */
for (e = bb->succ; e; e = e->succ_next)
{
if (e->flags & EDGE_FALLTHRU)
- n_fallthru++, fallthru = e;
+ {
+ n_fallthru++, fallthru = e;
+ if (e->crossing_edge)
+ {
+ error ("Fallthru edge crosses section boundary (bb %i)",
+ e->src->index);
+ err = 1;
+ }
+ }
if ((e->flags & ~(EDGE_DFS_BACK
| EDGE_CAN_FALLTHRU
break;
case CODE_LABEL:
- /* An addr_vec is placed outside any block block. */
+ /* An addr_vec is placed outside any basic block. */
if (NEXT_INSN (x)
&& GET_CODE (NEXT_INSN (x)) == JUMP_INSN
&& (GET_CODE (PATTERN (NEXT_INSN (x))) == ADDR_DIFF_VEC
if (!bb->succ || !purged)
return purged;
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Purged edges from bb %i\n", bb->index);
+ if (dump_file)
+ fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
if (!optimize)
return purged;
bb->succ->probability = REG_BR_PROB_BASE;
bb->succ->count = bb->count;
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Purged non-fallthru edges from bb %i\n",
+ if (dump_file)
+ fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
bb->index);
return purged;
}
/* Redirect Edge to DEST. */
-static bool
+static edge
cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
{
basic_block src = e->src;
- bool ret;
+ edge ret;
if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
- return false;
+ return NULL;
if (e->dest == dest)
- return true;
+ return e;
if (e->src != ENTRY_BLOCK_PTR
- && try_redirect_by_replacing_jump (e, dest, true))
- return true;
+ && (ret = try_redirect_by_replacing_jump (e, dest, true)))
+ {
+ src->flags |= BB_DIRTY;
+ return ret;
+ }
if (e->src == ENTRY_BLOCK_PTR
&& (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
{
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Redirecting entry edge from bb %i to %i\n",
+ if (dump_file)
+ fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
e->src->index, dest->index);
+ e->src->flags |= BB_DIRTY;
redirect_edge_succ (e, dest);
- return true;
+ return e;
}
/* Redirect_edge_and_branch may decide to turn branch into fallthru edge
&& label_is_jump_target_p (BB_HEAD (e->dest),
BB_END (src)))
{
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Fallthru edge unified with branch "
+ if (dump_file)
+ fprintf (dump_file, "Fallthru edge unified with branch "
"%i->%i redirected to %i\n",
e->src->index, e->dest->index, dest->index);
e->flags &= ~EDGE_FALLTHRU;
if (!redirect_branch_edge (e, dest))
abort ();
e->flags |= EDGE_FALLTHRU;
- return true;
+ e->src->flags |= BB_DIRTY;
+ return e;
}
/* In case we are redirecting fallthru edge to the branch edge
of conditional jump, remove it. */
&& onlyjump_p (BB_END (src)))
delete_insn (BB_END (src));
}
- redirect_edge_succ_nodup (e, dest);
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Fallthru edge %i->%i redirected to %i\n",
+ ret = redirect_edge_succ_nodup (e, dest);
+ if (dump_file)
+ fprintf (dump_file, "Fallthru edge %i->%i redirected to %i\n",
e->src->index, e->dest->index, dest->index);
-
- ret = true;
}
else
ret = redirect_branch_edge (e, dest);
if (simplejump_p (BB_END (src)))
abort ();
+ src->flags |= BB_DIRTY;
return ret;
}
static bool
cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
{
+ bool partitions_ok = true;
+
+ /* If we are partitioning hot/cold basic blocks, we don't want to
+ mess up unconditional or indirect jumps that cross between hot
+ and cold sections. */
+
+ if (flag_reorder_blocks_and_partition
+ && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
+ || find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
+ || a->partition != b->partition))
+ partitions_ok = false;
+
/* There must be exactly one edge in between the blocks. */
return (a->succ && !a->succ->succ_next && a->succ->dest == b
&& !b->pred->pred_next && a != b
/* Must be simple edge. */
&& !(a->succ->flags & EDGE_COMPLEX)
+ && partitions_ok
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
we can't kill the edge. */
b->rbi->footer = NULL;
}
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Merged blocks %d and %d.\n",
+ if (dump_file)
+ fprintf (dump_file, "Merged blocks %d and %d.\n",
a->index, b->index);
}
{
}
+/* Return 1 if BB ends with a call, possibly followed by some
+ instructions that must stay with the call, 0 otherwise. */
+
+static bool
+rtl_block_ends_with_call_p (basic_block bb)
+{
+ rtx insn = BB_END (bb);
+
+ while (GET_CODE (insn) != CALL_INSN
+ && insn != BB_HEAD (bb)
+ && keep_with_call_p (insn))
+ insn = PREV_INSN (insn);
+ return (GET_CODE (insn) == CALL_INSN);
+}
+
+/* Return 1 if BB ends with a conditional branch, 0 otherwise. */
+
+static bool
+rtl_block_ends_with_condjump_p (basic_block bb)
+{
+ return any_condjump_p (BB_END (bb));
+}
+
+/* Return true if we need to add fake edge to exit.
+ Helper function for rtl_flow_call_edges_add. */
+
+static bool
+need_fake_edge_p (rtx insn)
+{
+ if (!INSN_P (insn))
+ return false;
+
+ if ((GET_CODE (insn) == CALL_INSN
+ && !SIBLING_CALL_P (insn)
+ && !find_reg_note (insn, REG_NORETURN, NULL)
+ && !find_reg_note (insn, REG_ALWAYS_RETURN, NULL)
+ && !CONST_OR_PURE_CALL_P (insn)))
+ return true;
+
+ return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
+ && MEM_VOLATILE_P (PATTERN (insn)))
+ || (GET_CODE (PATTERN (insn)) == PARALLEL
+ && asm_noperands (insn) != -1
+ && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
+ || GET_CODE (PATTERN (insn)) == ASM_INPUT);
+}
+
+/* Add fake edges to the function exit for any non constant and non noreturn
+ calls, volatile inline assembly in the bitmap of blocks specified by
+ BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
+ that were split.
+
+ The goal is to expose cases in which entering a basic block does not imply
+ that all subsequent instructions must be executed. */
+
+static int
+rtl_flow_call_edges_add (sbitmap blocks)
+{
+ int i;
+ int blocks_split = 0;
+ int last_bb = last_basic_block;
+ bool check_last_block = false;
+
+ if (n_basic_blocks == 0)
+ return 0;
+
+ if (! blocks)
+ check_last_block = true;
+ else
+ check_last_block = TEST_BIT (blocks, EXIT_BLOCK_PTR->prev_bb->index);
+
+ /* In the last basic block, before epilogue generation, there will be
+ a fallthru edge to EXIT. Special care is required if the last insn
+ of the last basic block is a call because make_edge folds duplicate
+ edges, which would result in the fallthru edge also being marked
+ fake, which would result in the fallthru edge being removed by
+ remove_fake_edges, which would result in an invalid CFG.
+
+ Moreover, we can't elide the outgoing fake edge, since the block
+ profiler needs to take this into account in order to solve the minimal
+ spanning tree in the case that the call doesn't return.
+
+ Handle this by adding a dummy instruction in a new last basic block. */
+ if (check_last_block)
+ {
+ basic_block bb = EXIT_BLOCK_PTR->prev_bb;
+ rtx insn = BB_END (bb);
+
+ /* Back up past insns that must be kept in the same block as a call. */
+ while (insn != BB_HEAD (bb)
+ && keep_with_call_p (insn))
+ insn = PREV_INSN (insn);
+
+ if (need_fake_edge_p (insn))
+ {
+ edge e;
+
+ for (e = bb->succ; e; e = e->succ_next)
+ if (e->dest == EXIT_BLOCK_PTR)
+ {
+ insert_insn_on_edge (gen_rtx_USE (VOIDmode, const0_rtx), e);
+ commit_edge_insertions ();
+ break;
+ }
+ }
+ }
+
+ /* Now add fake edges to the function exit for any non constant
+ calls since there is no way that we can determine if they will
+ return or not... */
+
+ for (i = 0; i < last_bb; i++)
+ {
+ basic_block bb = BASIC_BLOCK (i);
+ rtx insn;
+ rtx prev_insn;
+
+ if (!bb)
+ continue;
+
+ if (blocks && !TEST_BIT (blocks, i))
+ continue;
+
+ for (insn = BB_END (bb); ; insn = prev_insn)
+ {
+ prev_insn = PREV_INSN (insn);
+ if (need_fake_edge_p (insn))
+ {
+ edge e;
+ rtx split_at_insn = insn;
+
+ /* Don't split the block between a call and an insn that should
+ remain in the same block as the call. */
+ if (GET_CODE (insn) == CALL_INSN)
+ while (split_at_insn != BB_END (bb)
+ && keep_with_call_p (NEXT_INSN (split_at_insn)))
+ split_at_insn = NEXT_INSN (split_at_insn);
+
+ /* The handling above of the final block before the epilogue
+ should be enough to verify that there is no edge to the exit
+ block in CFG already. Calling make_edge in such case would
+ cause us to mark that edge as fake and remove it later. */
+
+#ifdef ENABLE_CHECKING
+ if (split_at_insn == BB_END (bb))
+ for (e = bb->succ; e; e = e->succ_next)
+ if (e->dest == EXIT_BLOCK_PTR)
+ abort ();
+#endif
+
+ /* Note that the following may create a new basic block
+ and renumber the existing basic blocks. */
+ if (split_at_insn != BB_END (bb))
+ {
+ e = split_block (bb, split_at_insn);
+ if (e)
+ blocks_split++;
+ }
+
+ make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ }
+
+ if (insn == BB_HEAD (bb))
+ break;
+ }
+ }
+
+ if (blocks_split)
+ verify_flow_info ();
+
+ return blocks_split;
+}
+
/* Implementation of CFG manipulation for linearized RTL. */
struct cfg_hooks rtl_cfg_hooks = {
"rtl",
rtl_move_block_after,
rtl_can_merge_blocks, /* can_merge_blocks_p */
rtl_merge_blocks,
+ rtl_predict_edge,
+ rtl_predicted_by_p,
+ NULL, /* can_duplicate_block_p */
+ NULL, /* duplicate_block */
rtl_split_edge,
rtl_make_forwarder_block,
- rtl_tidy_fallthru_edge
+ rtl_tidy_fallthru_edge,
+ rtl_block_ends_with_call_p,
+ rtl_block_ends_with_condjump_p,
+ rtl_flow_call_edges_add
};
/* Implementation of CFG manipulation for cfg layout RTL, where
basic block connected via fallthru edges does not have to be adjacent.
This representation will hopefully become the default one in future
version of the compiler. */
+
+/* We do not want to declare these functions in a header file, since they
+ should only be used through the cfghooks interface, and we do not want to
+ move them here since it would require also moving quite a lot of related
+ code. */
+extern bool cfg_layout_can_duplicate_bb_p (basic_block);
+extern basic_block cfg_layout_duplicate_bb (basic_block);
+
struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
"cfglayout mode",
rtl_verify_flow_info_1,
rtl_move_block_after,
cfg_layout_can_merge_blocks_p,
cfg_layout_merge_blocks,
+ rtl_predict_edge,
+ rtl_predicted_by_p,
+ cfg_layout_can_duplicate_bb_p,
+ cfg_layout_duplicate_bb,
cfg_layout_split_edge,
rtl_make_forwarder_block,
- NULL
+ NULL,
+ rtl_block_ends_with_call_p,
+ rtl_block_ends_with_condjump_p,
+ rtl_flow_call_edges_add
};
+