/* If deleting a jump, decrement the use count of the label. Deleting
the label itself should happen in the normal course of block merging. */
- if (JUMP_P (insn)
- && JUMP_LABEL (insn)
- && LABEL_P (JUMP_LABEL (insn)))
+ if (JUMP_P (insn))
{
- LABEL_NUSES (JUMP_LABEL (insn))--;
- JUMP_LABEL (insn) = NULL;
- }
+ if (JUMP_LABEL (insn)
+ && LABEL_P (JUMP_LABEL (insn)))
+ LABEL_NUSES (JUMP_LABEL (insn))--;
- /* Also if deleting an insn that references a label. */
- else
- {
- while ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)) != NULL_RTX
+ /* If there are more targets, remove them too. */
+ while ((note
+ = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
&& LABEL_P (XEXP (note, 0)))
{
LABEL_NUSES (XEXP (note, 0))--;
}
}
+ /* Also if deleting any insn that references a label as an operand. */
+ while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
+ && LABEL_P (XEXP (note, 0)))
+ {
+ LABEL_NUSES (XEXP (note, 0))--;
+ remove_note (insn, note);
+ }
+
if (JUMP_P (insn)
&& (GET_CODE (PATTERN (insn)) == ADDR_VEC
|| GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
}
/* Like delete_insn but also purge dead edges from BB. */
+
rtx
delete_insn_and_edges (rtx insn)
{
start = next;
}
}
+
+/* Like delete_insn_chain but also purge dead edges from BB. */
+
+void
+delete_insn_chain_and_edges (rtx first, rtx last)
+{
+ bool purge = false;
+
+ if (INSN_P (last)
+ && BLOCK_FOR_INSN (last)
+ && BB_END (BLOCK_FOR_INSN (last)) == last)
+ purge = true;
+ delete_insn_chain (first, last, false);
+ if (purge)
+ purge_dead_edges (BLOCK_FOR_INSN (last));
+}
\f
/* Create a new basic block consisting of the instructions between HEAD and END
inclusive. This function is designed to allow fast BB construction - reuses
commit_edge_insertions ();
}
-/* Update insns block within BB. */
+/* Update BLOCK_FOR_INSN of insns between BEGIN and END
+ (or BARRIER if found) and notify df of the bb change.
+ The insn chain range is inclusive
+ (i.e. both BEGIN and END will be updated. */
-void
-update_bb_for_insn (basic_block bb)
+static void
+update_bb_for_insn_chain (rtx begin, rtx end, basic_block bb)
{
rtx insn;
- for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
+ end = NEXT_INSN (end);
+ for (insn = begin; insn != end; insn = NEXT_INSN (insn))
{
if (!BARRIER_P (insn))
{
set_block_for_insn (insn, bb);
df_insn_change_bb (insn);
}
- if (insn == BB_END (bb))
- break;
}
}
+
+/* Update BLOCK_FOR_INSN of insns in BB to BB,
+ and notify df of the change. */
+
+void
+update_bb_for_insn (basic_block bb)
+{
+ update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
+}
+
\f
/* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
note associated with the BLOCK. */
/* Reassociate the insns of B with A. */
if (!b_empty)
{
- rtx x;
-
- for (x = a_end; x != b_end; x = NEXT_INSN (x))
- {
- set_block_for_insn (x, a);
- df_insn_change_bb (x);
- }
-
- set_block_for_insn (b_end, a);
- df_insn_change_bb (b_end);
+ update_bb_for_insn_chain (a_end, b_end, a);
a_end = b_end;
}
/* Return true when block A and B can be merged. */
-static bool
-rtl_can_merge_blocks (const_basic_block a, const_basic_block b)
+static bool
+rtl_can_merge_blocks (basic_block a, basic_block b)
{
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
{
rtx target_label = block_label (target);
rtx barrier, label, table;
- bool jump_p;
emit_jump_insn_after_noloc (gen_jump (target_label), insn);
JUMP_LABEL (BB_END (src)) = target_label;
INSN_UID (insn), INSN_UID (BB_END (src)));
+ delete_insn_chain (kill_from, insn, false);
+
/* Recognize a tablejump that we are converting to a
simple jump and remove its associated CODE_LABEL
and ADDR_VEC or ADDR_DIFF_VEC. */
- jump_p = tablejump_p (insn, &label, &table);
-
- delete_insn_chain (kill_from, insn, false);
- if (jump_p)
- delete_insn_chain (label, table, false);
+ if (tablejump_p (insn, &label, &table))
+ delete_insn_chain (label, table, false);
barrier = next_nonnote_insn (BB_END (src));
if (!barrier || !BARRIER_P (barrier))
which originally were or were created before jump table are
inside the basic block. */
rtx new_insn = BB_END (src);
- rtx tmp;
- for (tmp = NEXT_INSN (BB_END (src)); tmp != barrier;
- tmp = NEXT_INSN (tmp))
- {
- set_block_for_insn (tmp, src);
- df_insn_change_bb (tmp);
- }
+ update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
+ PREV_INSN (barrier), src);
NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
}
/* Return true when blocks A and B can be safely merged. */
+
static bool
-cfg_layout_can_merge_blocks_p (const_basic_block a, const_basic_block b)
+cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
{
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
first = NEXT_INSN (first);
gcc_assert (NOTE_INSN_BASIC_BLOCK_P (first));
BB_HEAD (b) = NULL;
+
+ /* emit_insn_after_noloc doesn't call df_insn_change_bb.
+ We need to explicitly call. */
+ update_bb_for_insn_chain (NEXT_INSN (first),
+ BB_END (b),
+ a);
+
delete_insn (first);
}
/* Otherwise just re-associate the instructions. */
{
rtx insn;
- for (insn = BB_HEAD (b);
- insn != NEXT_INSN (BB_END (b));
- insn = NEXT_INSN (insn))
- {
- set_block_for_insn (insn, a);
- df_insn_change_bb (insn);
- }
+ update_bb_for_insn_chain (BB_HEAD (b), BB_END (b), a);
insn = BB_HEAD (b);
/* Skip possible DELETED_LABEL insn. */
? NEXT_INSN (BB_END (e->src)) : get_insns (),
NULL_RTX, e->src);
+ if (e->dest == EXIT_BLOCK_PTR)
+ BB_COPY_PARTITION (new_bb, e->src);
+ else
+ BB_COPY_PARTITION (new_bb, e->dest);
make_edge (new_bb, e->dest, EDGE_FALLTHRU);
redirect_edge_and_branch_force (e, new_bb);
instructions that must stay with the call, 0 otherwise. */
static bool
-rtl_block_ends_with_call_p (const_basic_block bb)
+rtl_block_ends_with_call_p (basic_block bb)
{
rtx insn = BB_END (bb);
while (!CALL_P (insn)
&& insn != BB_HEAD (bb)
- && keep_with_call_p (insn))
+ && (keep_with_call_p (insn)
+ || NOTE_P (insn)))
insn = PREV_INSN (insn);
return (CALL_P (insn));
}