/* Control flow optimization code for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
This file is part of GCC.
#include "params.h"
#include "tm_p.h"
#include "target.h"
+#include "regs.h"
/* cleanup_cfg maintains following flags for each basic block. */
#define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
+/* Set to true when we are running first pass of try_optimize_cfg loop. */
+static bool first_pass;
static bool try_crossjump_to_edge (int, edge, edge);
static bool try_crossjump_bb (int, basic_block);
static bool outgoing_edges_match (int, basic_block, basic_block);
static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
static bool insns_match_p (int, rtx, rtx);
-static bool label_is_jump_target_p (rtx, rtx);
static bool tail_recursion_label_p (rtx);
static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
/* Verify that we've got a normal conditional branch at the end
of the block. */
- cbranch_insn = cbranch_block->end;
+ cbranch_insn = BB_END (cbranch_block);
if (!any_condjump_p (cbranch_insn))
return false;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
- INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
+ INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
/* Success. Update the CFG to match. Note that after this point
the edge variable names appear backwards; the redirection is done
cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
update_br_prob_note (cbranch_block);
- end = jump_block->end;
+ end = BB_END (jump_block);
/* Deleting a block may produce unreachable code warning even when we are
- not deleting anything live. Supress it by moving all the line number
+ not deleting anything live. Suppress it by moving all the line number
notes out of the block. */
- for (insn = jump_block->head; insn != NEXT_INSN (jump_block->end);
+ for (insn = BB_HEAD (jump_block); insn != NEXT_INSN (BB_END (jump_block));
insn = next)
{
next = NEXT_INSN (insn);
if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
{
- reorder_insns (insn, insn, end);
+ if (insn == BB_END (jump_block))
+ {
+ BB_END (jump_block) = PREV_INSN (insn);
+ if (insn == end)
+ break;
+ }
+ reorder_insns_nobb (insn, insn, end);
end = insn;
}
}
/* Delete the block with the unconditional jump, and clean up the mess. */
- delete_block (jump_block);
- tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
+ delete_basic_block (jump_block);
+ tidy_fallthru_edge (cbranch_jump_edge);
return true;
}
CLEAR_REGNO_REG_SET (nonequal, regno);
if (regno < FIRST_PSEUDO_REGISTER)
{
- int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ int n = hard_regno_nregs[regno][GET_MODE (dest)];
while (--n > 0)
CLEAR_REGNO_REG_SET (nonequal, regno + n);
}
SET_REGNO_REG_SET (nonequal, regno);
if (regno < FIRST_PSEUDO_REGISTER)
{
- int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ int n = hard_regno_nregs[regno][GET_MODE (dest)];
while (--n > 0)
SET_REGNO_REG_SET (nonequal, regno + n);
}
return 1;
if (regno < FIRST_PSEUDO_REGISTER)
{
- int n = HARD_REGNO_NREGS (regno, GET_MODE (*x));
+ int n = hard_regno_nregs[regno][GET_MODE (*x)];
while (--n > 0)
if (REGNO_REG_SET_P (nonequal, regno + n))
return 1;
}
/* Second branch must end with onlyjump, as we will eliminate the jump. */
- if (!any_condjump_p (e->src->end))
+ if (!any_condjump_p (BB_END (e->src)))
return NULL;
- if (!any_condjump_p (b->end) || !onlyjump_p (b->end))
+ if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
{
BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
return NULL;
}
- set1 = pc_set (e->src->end);
- set2 = pc_set (b->end);
+ set1 = pc_set (BB_END (e->src));
+ set2 = pc_set (BB_END (b));
if (((e->flags & EDGE_FALLTHRU) != 0)
!= (XEXP (SET_SRC (set1), 1) == pc_rtx))
reverse1 = true;
cond1 = XEXP (SET_SRC (set1), 0);
cond2 = XEXP (SET_SRC (set2), 0);
if (reverse1)
- code1 = reversed_comparison_code (cond1, e->src->end);
+ code1 = reversed_comparison_code (cond1, BB_END (e->src));
else
code1 = GET_CODE (cond1);
code2 = GET_CODE (cond2);
- reversed_code2 = reversed_comparison_code (cond2, b->end);
+ reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
if (!comparison_dominates_p (code1, code2)
&& !comparison_dominates_p (code1, reversed_code2))
/* Short circuit cases where block B contains some side effects, as we can't
safely bypass it. */
- for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
+ for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
insn = NEXT_INSN (insn))
if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
{
cselib_init ();
/* First process all values computed in the source basic block. */
- for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
+ for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
insn = NEXT_INSN (insn))
if (INSN_P (insn))
cselib_process_insn (insn);
processing as if it were same basic block.
Our goal is to prove that whole block is an NOOP. */
- for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end) && !failed;
+ for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
int counter;
bool threaded = false;
int nthreaded_edges = 0;
+ bool may_thread = first_pass | (b->flags & BB_DIRTY);
next = e->succ_next;
{
basic_block new_target = NULL;
bool new_target_threaded = false;
+ may_thread |= target->flags & BB_DIRTY;
if (FORWARDER_BLOCK_P (target)
&& target->succ->dest != EXIT_BLOCK_PTR)
/* Allow to thread only over one edge at time to simplify updating
of probabilities. */
- else if (mode & CLEANUP_THREADING)
+ else if ((mode & CLEANUP_THREADING) && may_thread)
{
edge t = thread_jump (mode, e, target);
if (t)
if ((mode & CLEANUP_PRE_LOOP) && optimize)
{
rtx insn = (target->succ->flags & EDGE_FALLTHRU
- ? target->head : prev_nonnote_insn (target->end));
+ ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
if (GET_CODE (insn) != NOTE)
insn = NEXT_INSN (insn);
at this time; it can mess up the loop optimizer's
recognition of some patterns. */
- insn = PREV_INSN (target->head);
+ insn = PREV_INSN (BB_HEAD (target));
if (insn && GET_CODE (insn) == NOTE
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
break;
return changed;
}
\f
-/* Return true if LABEL is a target of JUMP_INSN. This applies only
- to non-complex jumps. That is, direct unconditional, conditional,
- and tablejumps, but not computed jumps or returns. It also does
- not apply to the fallthru case of a conditional jump. */
-
-static bool
-label_is_jump_target_p (rtx label, rtx jump_insn)
-{
- rtx tmp = JUMP_LABEL (jump_insn);
-
- if (label == tmp)
- return true;
-
- if (tablejump_p (jump_insn, NULL, &tmp))
- {
- rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
- int i, veclen = GET_NUM_ELEM (vec);
-
- for (i = 0; i < veclen; ++i)
- if (XEXP (RTVEC_ELT (vec, i), 0) == label)
- return true;
- }
-
- return false;
-}
-
/* Return true if LABEL is used for tail recursion. */
static bool
{
rtx barrier;
- barrier = next_nonnote_insn (a->end);
+ barrier = next_nonnote_insn (BB_END (a));
if (GET_CODE (barrier) != BARRIER)
abort ();
delete_insn (barrier);
and adjust the block trees appropriately. Even better would be to have
a tighter connection between block trees and rtl so that this is not
necessary. */
- if (squeeze_notes (&a->head, &a->end))
+ if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
abort ();
/* Scramble the insn chain. */
- if (a->end != PREV_INSN (b->head))
- reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
+ if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
+ reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
a->flags |= BB_DIRTY;
if (rtl_dump_file)
rtx barrier, real_b_end;
rtx label, table;
- real_b_end = b->end;
+ real_b_end = BB_END (b);
/* If there is a jump table following block B temporarily add the jump table
to block B so that it will also be moved to the correct location. */
- if (tablejump_p (b->end, &label, &table)
- && prev_active_insn (label) == b->end)
+ if (tablejump_p (BB_END (b), &label, &table)
+ && prev_active_insn (label) == BB_END (b))
{
- b->end = table;
+ BB_END (b) = table;
}
/* There had better have been a barrier there. Delete it. */
- barrier = NEXT_INSN (b->end);
+ barrier = NEXT_INSN (BB_END (b));
if (barrier && GET_CODE (barrier) == BARRIER)
delete_insn (barrier);
and adjust the block trees appropriately. Even better would be to have
a tighter connection between block trees and rtl so that this is not
necessary. */
- if (squeeze_notes (&b->head, &b->end))
+ if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
abort ();
/* Scramble the insn chain. */
- reorder_insns_nobb (b->head, b->end, a->end);
+ reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
/* Restore the real end of b. */
- b->end = real_b_end;
+ BB_END (b) = real_b_end;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
that would make optimize_sibling_and_tail_recursive_calls more
complex for no gain. */
if ((mode & CLEANUP_PRE_SIBCALL)
- && GET_CODE (c->head) == CODE_LABEL
- && tail_recursion_label_p (c->head))
+ && GET_CODE (BB_HEAD (c)) == CODE_LABEL
+ && tail_recursion_label_p (BB_HEAD (c)))
return NULL;
/* If B has a fallthru edge to C, no need to move anything. */
/* Skip simple jumps at the end of the blocks. Complex jumps still
need to be compared for equivalence, which we'll do below. */
- i1 = bb1->end;
+ i1 = BB_END (bb1);
last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
if (onlyjump_p (i1)
|| (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
i1 = PREV_INSN (i1);
}
- i2 = bb2->end;
+ i2 = BB_END (bb2);
if (onlyjump_p (i2)
|| (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
{
while (true)
{
/* Ignore notes. */
- while (!INSN_P (i1) && i1 != bb1->head)
+ while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
i1 = PREV_INSN (i1);
- while (!INSN_P (i2) && i2 != bb2->head)
+ while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
i2 = PREV_INSN (i2);
- if (i1 == bb1->head || i2 == bb2->head)
+ if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
break;
if (!insns_match_p (mode, i1, i2))
Two, it keeps line number notes as matched as may be. */
if (ninsns)
{
- while (last1 != bb1->head && !INSN_P (PREV_INSN (last1)))
+ while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
last1 = PREV_INSN (last1);
- if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
+ if (last1 != BB_HEAD (bb1) && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
last1 = PREV_INSN (last1);
- while (last2 != bb2->head && !INSN_P (PREV_INSN (last2)))
+ while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
last2 = PREV_INSN (last2);
- if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
+ if (last2 != BB_HEAD (bb2) && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
last2 = PREV_INSN (last2);
*f1 = last1;
unconditional jump, or a fake edge to exit. */
if (bb1->succ && !bb1->succ->succ_next
&& (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
- && (GET_CODE (bb1->end) != JUMP_INSN || simplejump_p (bb1->end)))
+ && (GET_CODE (BB_END (bb1)) != JUMP_INSN || simplejump_p (BB_END (bb1))))
return (bb2->succ && !bb2->succ->succ_next
&& (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
- && (GET_CODE (bb2->end) != JUMP_INSN || simplejump_p (bb2->end)));
+ && (GET_CODE (BB_END (bb2)) != JUMP_INSN || simplejump_p (BB_END (bb2))));
/* Match conditional jumps - this may get tricky when fallthru and branch
edges are crossed. */
if (bb1->succ
&& bb1->succ->succ_next
&& !bb1->succ->succ_next->succ_next
- && any_condjump_p (bb1->end)
- && onlyjump_p (bb1->end))
+ && any_condjump_p (BB_END (bb1))
+ && onlyjump_p (BB_END (bb1)))
{
edge b1, f1, b2, f2;
bool reverse, match;
if (!bb2->succ
|| !bb2->succ->succ_next
|| bb2->succ->succ_next->succ_next
- || !any_condjump_p (bb2->end)
- || !onlyjump_p (bb2->end))
+ || !any_condjump_p (BB_END (bb2))
+ || !onlyjump_p (BB_END (bb2)))
return false;
b1 = BRANCH_EDGE (bb1);
else
return false;
- set1 = pc_set (bb1->end);
- set2 = pc_set (bb2->end);
+ set1 = pc_set (BB_END (bb1));
+ set2 = pc_set (BB_END (bb2));
if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
!= (XEXP (SET_SRC (set2), 1) == pc_rtx))
reverse = !reverse;
cond2 = XEXP (SET_SRC (set2), 0);
code1 = GET_CODE (cond1);
if (reverse)
- code2 = reversed_comparison_code (cond2, bb2->end);
+ code2 = reversed_comparison_code (cond2, BB_END (bb2));
else
code2 = GET_CODE (cond2);
rtx label1, label2;
rtx table1, table2;
- if (tablejump_p (bb1->end, &label1, &table1)
- && tablejump_p (bb2->end, &label2, &table2)
+ if (tablejump_p (BB_END (bb1), &label1, &table1)
+ && tablejump_p (BB_END (bb2), &label2, &table2)
&& GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
{
/* The labels should never be the same rtx. If they really are same
/* If LABEL2 is referenced in BB1->END do not do anything
because we would loose information when replacing
LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
- if (label1 != label2 && !rtx_referenced_p (label2, bb1->end))
+ if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
{
/* Set IDENTICAL to true when the tables are identical. */
bool identical = false;
rr.r1 = label1;
rr.r2 = label2;
rr.update_label_nuses = false;
- for_each_rtx (&bb1->end, replace_label, &rr);
+ for_each_rtx (&BB_END (bb1), replace_label, &rr);
- match = insns_match_p (mode, bb1->end, bb2->end);
+ match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
if (rtl_dump_file && match)
fprintf (rtl_dump_file,
"Tablejumps in bb %i and %i match.\n",
from the instruction is deleted too. */
rr.r1 = label2;
rr.r2 = label1;
- for_each_rtx (&bb1->end, replace_label, &rr);
+ for_each_rtx (&BB_END (bb1), replace_label, &rr);
return match;
}
/* First ensure that the instructions match. There may be many outgoing
edges so this test is generally cheaper. */
- if (!insns_match_p (mode, bb1->end, bb2->end))
+ if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
return false;
/* Search the outgoing edges, ensure that the counts do match, find possible
/* Ensure the same EH region. */
{
- rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
- rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
+ rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
+ rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
if (!n1 && n2)
return false;
rtx label1, label2;
rtx table1, table2;
- if (tablejump_p (src1->end, &label1, &table1)
- && tablejump_p (src2->end, &label2, &table2)
+ if (tablejump_p (BB_END (src1), &label1, &table1)
+ && tablejump_p (BB_END (src2), &label2, &table2)
&& label1 != label2)
{
replace_label_data rr;
/* Do not replace the label in SRC1->END because when deleting
a block whose end is a tablejump, the tablejump referenced
from the instruction is deleted too. */
- if (insn != src1->end)
+ if (insn != BB_END (src1))
for_each_rtx (&insn, replace_label, &rr);
}
}
#endif
/* Avoid splitting if possible. */
- if (newpos2 == src2->head)
+ if (newpos2 == BB_HEAD (src2))
redirect_to = src2;
else
{
to_remove = redirect_from->succ->dest;
redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
- delete_block (to_remove);
+ delete_basic_block (to_remove);
update_forwarder_flag (redirect_from);
If there is a match, we'll do it the other way around. */
if (e == fallthru)
continue;
+ /* If nothing changed since the last attempt, there is nothing
+ we can do. */
+ if (!first_pass
+ && (!(e->src->flags & BB_DIRTY)
+ && !(fallthru->src->flags & BB_DIRTY)))
+ continue;
if (try_crossjump_to_edge (mode, e, fallthru))
{
if (e->src->index > e2->src->index)
continue;
+ /* If nothing changed since the last attempt, there is nothing
+ we can do. */
+ if (!first_pass
+ && (!(e->src->flags & BB_DIRTY)
+ && !(e2->src->flags & BB_DIRTY)))
+ continue;
+
if (try_crossjump_to_edge (mode, e, e2))
{
changed = true;
FOR_EACH_BB (bb)
update_forwarder_flag (bb);
- if (mode & CLEANUP_UPDATE_LIFE)
+ if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
clear_bb_flags ();
if (! (* targetm.cannot_modify_jumps_p) ())
{
+ first_pass = true;
/* Attempt to merge blocks as made possible by edge removal. If
a block has only one successor, and the successor has only
one predecessor, they may be combined. */
fprintf (rtl_dump_file, "Deleting block %i.\n",
b->index);
- delete_block (b);
+ delete_basic_block (b);
if (!(mode & CLEANUP_CFGLAYOUT))
changed = true;
b = c;
if (b->pred->pred_next == NULL
&& (b->pred->flags & EDGE_FALLTHRU)
&& !(b->pred->flags & EDGE_COMPLEX)
- && GET_CODE (b->head) == CODE_LABEL
+ && GET_CODE (BB_HEAD (b)) == CODE_LABEL
&& (!(mode & CLEANUP_PRE_SIBCALL)
- || !tail_recursion_label_p (b->head))
+ || !tail_recursion_label_p (BB_HEAD (b)))
/* If the previous block ends with a branch to this
block, we can't delete the label. Normally this
is a condjump that is yet to be simplified, but
some element going to the same place as the
default (fallthru). */
&& (b->pred->src == ENTRY_BLOCK_PTR
- || GET_CODE (b->pred->src->end) != JUMP_INSN
- || ! label_is_jump_target_p (b->head,
- b->pred->src->end)))
+ || GET_CODE (BB_END (b->pred->src)) != JUMP_INSN
+ || ! label_is_jump_target_p (BB_HEAD (b),
+ BB_END (b->pred->src))))
{
- rtx label = b->head;
+ rtx label = BB_HEAD (b);
delete_insn_chain (label, label);
/* In the case label is undeletable, move it after the
BASIC_BLOCK note. */
- if (NOTE_LINE_NUMBER (b->head) == NOTE_INSN_DELETED_LABEL)
+ if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
{
- rtx bb_note = NEXT_INSN (b->head);
+ rtx bb_note = NEXT_INSN (BB_HEAD (b));
reorder_insns_nobb (label, label, bb_note);
- b->head = bb_note;
+ BB_HEAD (b) = bb_note;
}
if (rtl_dump_file)
fprintf (rtl_dump_file, "Deleted label in block %i.\n",
if (!(mode & CLEANUP_CFGLAYOUT)
&& b->pred->pred_next == NULL
&& (b->pred->flags & EDGE_FALLTHRU)
- && GET_CODE (b->head) != CODE_LABEL
+ && GET_CODE (BB_HEAD (b)) != CODE_LABEL
&& FORWARDER_BLOCK_P (b)
/* Note that forwarder_block_p true ensures that
there is a successor for this block. */
c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
redirect_edge_succ_nodup (b->pred, b->succ->dest);
- delete_block (b);
+ delete_basic_block (b);
changed = true;
b = c;
}
else if (!(mode & CLEANUP_CFGLAYOUT)
/* If the jump insn has side effects,
we can't kill the edge. */
- && (GET_CODE (b->end) != JUMP_INSN
- || (flow2_completed
- ? simplejump_p (b->end)
- : onlyjump_p (b->end)))
+ && (GET_CODE (BB_END (b)) != JUMP_INSN
+ || (reload_completed
+ ? simplejump_p (BB_END (b))
+ : onlyjump_p (BB_END (b))))
&& (next = merge_blocks_move (s, b, c, mode)))
{
b = next;
/* If B has a single outgoing edge, but uses a
non-trivial jump instruction without side-effects, we
can either delete the jump entirely, or replace it
- with a simple unconditional jump. Use
- redirect_edge_and_branch to do the dirty work. */
+ with a simple unconditional jump. */
if (b->succ
&& ! b->succ->succ_next
&& b->succ->dest != EXIT_BLOCK_PTR
- && onlyjump_p (b->end)
- && redirect_edge_and_branch (b->succ, b->succ->dest))
+ && onlyjump_p (BB_END (b))
+ && try_redirect_by_replacing_jump (b->succ, b->succ->dest,
+ (mode & CLEANUP_CFGLAYOUT) != 0))
{
update_forwarder_flag (b);
changed_here = true;
#endif
changed_overall |= changed;
+ first_pass = false;
}
while (changed);
}
if (!(b->flags & BB_REACHABLE))
{
- delete_block (b);
+ delete_basic_block (b);
changed = true;
}
}
PROP_DEATH_NOTES
| PROP_SCAN_DEAD_CODE
| PROP_KILL_DEAD_CODE
- | PROP_LOG_LINKS))
+ | ((mode & CLEANUP_LOG_LINKS)
+ ? PROP_LOG_LINKS : 0)))
break;
}
else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))