/* Control flow optimization code for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010
Free Software Foundation, Inc.
This file is part of GCC.
#include "expr.h"
#include "df.h"
#include "dce.h"
+#include "dbgcnt.h"
#define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
static bool try_crossjump_to_edge (int, edge, edge);
static bool try_crossjump_bb (int, basic_block);
static bool outgoing_edges_match (int, basic_block, basic_block);
-static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
static bool old_insns_match_p (int, rtx, rtx);
static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
return NULL;
}
- cselib_init (false);
+ cselib_init (0);
/* First process all values computed in the source basic block. */
for (insn = NEXT_INSN (BB_HEAD (e->src));
and cold sections.
Basic block partitioning may result in some jumps that appear to
- be optimizable (or blocks that appear to be mergeable), but which really m
- ust be left untouched (they are required to make it safely across
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
partition boundaries). See the comments at the top of
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
{
basic_block target, first;
- int counter;
+ int counter, goto_locus;
bool threaded = false;
int nthreaded_edges = 0;
bool may_thread = first_pass | df_get_bb_dirty (b);
target = first = e->dest;
counter = NUM_FIXED_BLOCKS;
+ goto_locus = e->goto_locus;
/* If we are partitioning hot/cold basic_blocks, we don't want to mess
up jumps that cross between hot/cold sections.
new_target = single_succ (target);
if (target == new_target)
counter = n_basic_blocks;
+ else if (!optimize)
+ {
+ /* When not optimizing, ensure that edges or forwarder
+ blocks with different locus are not optimized out. */
+ int locus = single_succ_edge (target)->goto_locus;
+
+ if (locus && goto_locus && !locator_eq (locus, goto_locus))
+ counter = n_basic_blocks;
+ else if (locus)
+ goto_locus = locus;
+
+ if (INSN_P (BB_END (target)))
+ {
+ locus = INSN_LOCATOR (BB_END (target));
+
+ if (locus && goto_locus
+ && !locator_eq (locus, goto_locus))
+ counter = n_basic_blocks;
+ else if (locus)
+ goto_locus = locus;
+ }
+ }
}
/* Allow to thread only over one edge at time to simplify updating
int edge_frequency;
int n = 0;
+ e->goto_locus = goto_locus;
+
/* Don't force if target is exit block. */
if (threaded && target != EXIT_BLOCK_PTR)
{
if (GET_CODE (i1) != GET_CODE (i2))
return false;
+ /* __builtin_unreachable() may lead to empty blocks (ending with
+ NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
+ if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
+ return true;
+
p1 = PATTERN (i1);
p2 = PATTERN (i2);
be filled that clobbers a parameter expected by the subroutine.
??? We take the simple route for now and assume that if they're
- equal, they were constructed identically. */
+ equal, they were constructed identically.
+
+ Also check for identical exception regions. */
+
+ if (CALL_P (i1))
+ {
+ /* Ensure the same EH region. */
+ rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
+ rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
+
+ if (!n1 && n2)
+ return false;
- if (CALL_P (i1)
- && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
+ if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
+ return false;
+
+ if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
CALL_INSN_FUNCTION_USAGE (i2))
- || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
- return false;
+ || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
+ return false;
+ }
#ifdef STACK_REGS
/* If cross_jump_death_matters is not 0, the insn's mode
? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
return true;
- /* Do not do EQUIV substitution after reload. First, we're undoing the
- work of reload_cse. Second, we may be undoing the work of the post-
- reload splitting pass. */
- /* ??? Possibly add a new phase switch variable that can be used by
- targets to disallow the troublesome insns after splitting. */
- if (!reload_completed)
- {
- /* The following code helps take care of G++ cleanups. */
- rtx equiv1 = find_reg_equal_equiv_note (i1);
- rtx equiv2 = find_reg_equal_equiv_note (i2);
-
- if (equiv1 && equiv2
- /* If the equivalences are not to a constant, they may
- reference pseudos that no longer exist, so we can't
- use them. */
- && (! reload_completed
- || (CONSTANT_P (XEXP (equiv1, 0))
- && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
- {
- rtx s1 = single_set (i1);
- rtx s2 = single_set (i2);
- if (s1 != 0 && s2 != 0
- && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
- {
- validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
- validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
- if (! rtx_renumbered_equal_p (p1, p2))
- cancel_changes (0);
- else if (apply_change_group ())
- return true;
- }
- }
- }
-
return false;
}
\f
+/* When comparing insns I1 and I2 in flow_find_cross_jump or
+ flow_find_head_matching_sequence, ensure the notes match. */
+
+static void
+merge_notes (rtx i1, rtx i2)
+{
+ /* If the merged insns have different REG_EQUAL notes, then
+ remove them. */
+ rtx equiv1 = find_reg_equal_equiv_note (i1);
+ rtx equiv2 = find_reg_equal_equiv_note (i2);
+
+ if (equiv1 && !equiv2)
+ remove_note (i1, equiv1);
+ else if (!equiv1 && equiv2)
+ remove_note (i2, equiv2);
+ else if (equiv1 && equiv2
+ && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
+ {
+ remove_note (i1, equiv1);
+ remove_note (i2, equiv2);
+ }
+}
+
/* Look through the insns at the end of BB1 and BB2 and find the longest
sequence that are equivalent. Store the first insns for that sequence
in *F1 and *F2 and return the sequence length.
To simplify callers of this function, if the blocks match exactly,
store the head of the blocks in *F1 and *F2. */
-static int
-flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
- basic_block bb2, rtx *f1, rtx *f2)
+int
+flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2)
{
rtx i1, i2, last1, last2, afterlast1, afterlast2;
int ninsns = 0;
while (true)
{
/* Ignore notes. */
- while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
+ while (!NONDEBUG_INSN_P (i1) && i1 != BB_HEAD (bb1))
i1 = PREV_INSN (i1);
- while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
+ while (!NONDEBUG_INSN_P (i2) && i2 != BB_HEAD (bb2))
i2 = PREV_INSN (i2);
if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
break;
- if (!old_insns_match_p (mode, i1, i2))
+ if (!old_insns_match_p (0, i1, i2))
break;
merge_memattrs (i1, i2);
/* Don't begin a cross-jump with a NOTE insn. */
if (INSN_P (i1))
{
- /* If the merged insns have different REG_EQUAL notes, then
- remove them. */
- rtx equiv1 = find_reg_equal_equiv_note (i1);
- rtx equiv2 = find_reg_equal_equiv_note (i2);
-
- if (equiv1 && !equiv2)
- remove_note (i1, equiv1);
- else if (!equiv1 && equiv2)
- remove_note (i2, equiv2);
- else if (equiv1 && equiv2
- && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
- {
- remove_note (i1, equiv1);
- remove_note (i2, equiv2);
- }
+ merge_notes (i1, i2);
afterlast1 = last1, afterlast2 = last2;
last1 = i1, last2 = i2;
Two, it keeps line number notes as matched as may be. */
if (ninsns)
{
- while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
+ while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
last1 = PREV_INSN (last1);
if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
last1 = PREV_INSN (last1);
- while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
+ while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
last2 = PREV_INSN (last2);
if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
return ninsns;
}
-/* Return true iff the condbranches at the end of BB1 and BB2 match. */
-bool
-condjump_equiv_p (struct equiv_info *info, bool call_init)
-{
- basic_block bb1 = info->x_block;
- basic_block bb2 = info->y_block;
- edge b1 = BRANCH_EDGE (bb1);
- edge b2 = BRANCH_EDGE (bb2);
- edge f1 = FALLTHRU_EDGE (bb1);
- edge f2 = FALLTHRU_EDGE (bb2);
- bool reverse, match;
- rtx set1, set2, cond1, cond2;
- rtx src1, src2;
- enum rtx_code code1, code2;
-
- /* Get around possible forwarders on fallthru edges. Other cases
- should be optimized out already. */
- if (FORWARDER_BLOCK_P (f1->dest))
- f1 = single_succ_edge (f1->dest);
-
- if (FORWARDER_BLOCK_P (f2->dest))
- f2 = single_succ_edge (f2->dest);
-
- /* To simplify use of this function, return false if there are
- unneeded forwarder blocks. These will get eliminated later
- during cleanup_cfg. */
- if (FORWARDER_BLOCK_P (f1->dest)
- || FORWARDER_BLOCK_P (f2->dest)
- || FORWARDER_BLOCK_P (b1->dest)
- || FORWARDER_BLOCK_P (b2->dest))
- return false;
+/* Like flow_find_cross_jump, except start looking for a matching sequence from
+ the head of the two blocks. Do not include jumps at the end.
+ If STOP_AFTER is nonzero, stop after finding that many matching
+ instructions. */
- if (f1->dest == f2->dest && b1->dest == b2->dest)
- reverse = false;
- else if (f1->dest == b2->dest && b1->dest == f2->dest)
- reverse = true;
- else
- return false;
+int
+flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1,
+ rtx *f2, int stop_after)
+{
+ rtx i1, i2, last1, last2, beforelast1, beforelast2;
+ int ninsns = 0;
+ edge e;
+ edge_iterator ei;
+ int nehedges1 = 0, nehedges2 = 0;
- set1 = pc_set (BB_END (bb1));
- set2 = pc_set (BB_END (bb2));
- if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
- != (XEXP (SET_SRC (set2), 1) == pc_rtx))
- reverse = !reverse;
-
- src1 = SET_SRC (set1);
- src2 = SET_SRC (set2);
- cond1 = XEXP (src1, 0);
- cond2 = XEXP (src2, 0);
- code1 = GET_CODE (cond1);
- if (reverse)
- code2 = reversed_comparison_code (cond2, BB_END (bb2));
- else
- code2 = GET_CODE (cond2);
+ FOR_EACH_EDGE (e, ei, bb1->succs)
+ if (e->flags & EDGE_EH)
+ nehedges1++;
+ FOR_EACH_EDGE (e, ei, bb2->succs)
+ if (e->flags & EDGE_EH)
+ nehedges2++;
- if (code2 == UNKNOWN)
- return false;
+ i1 = BB_HEAD (bb1);
+ i2 = BB_HEAD (bb2);
+ last1 = beforelast1 = last2 = beforelast2 = NULL_RTX;
- if (call_init && !struct_equiv_init (STRUCT_EQUIV_START | info->mode, info))
- gcc_unreachable ();
- /* Make the sources of the pc sets unreadable so that when we call
- insns_match_p it won't process them.
- The death_notes_match_p from insns_match_p won't see the local registers
- used for the pc set, but that could only cause missed optimizations when
- there are actually condjumps that use stack registers. */
- SET_SRC (set1) = pc_rtx;
- SET_SRC (set2) = pc_rtx;
- /* Verify codes and operands match. */
- if (code1 == code2)
+ while (true)
{
- match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
- && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 0), 1, info)
- && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 1), 1, info));
- }
- else if (code1 == swap_condition (code2))
- {
- match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
- && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 0), 1, info)
- && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 1), 1, info));
+ /* Ignore notes. */
+ while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
+ i1 = NEXT_INSN (i1);
- }
- else
- match = false;
- SET_SRC (set1) = src1;
- SET_SRC (set2) = src2;
- match &= verify_changes (0);
-
- /* If we return true, we will join the blocks. Which means that
- we will only have one branch prediction bit to work with. Thus
- we require the existing branches to have probabilities that are
- roughly similar. */
- if (match
- && !optimize_size
- && maybe_hot_bb_p (bb1)
- && maybe_hot_bb_p (bb2))
- {
- int prob2;
+ while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
+ i2 = NEXT_INSN (i2);
- if (b1->dest == b2->dest)
- prob2 = b2->probability;
- else
- /* Do not use f2 probability as f2 may be forwarded. */
- prob2 = REG_BR_PROB_BASE - b2->probability;
+ if (NOTE_P (i1) || NOTE_P (i2)
+ || JUMP_P (i1) || JUMP_P (i2))
+ break;
+
+ /* A sanity check to make sure we're not merging insns with different
+ effects on EH. If only one of them ends a basic block, it shouldn't
+ have an EH edge; if both end a basic block, there should be the same
+ number of EH edges. */
+ if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
+ && nehedges1 > 0)
+ || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
+ && nehedges2 > 0)
+ || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
+ && nehedges1 != nehedges2))
+ break;
+
+ if (!old_insns_match_p (0, i1, i2))
+ break;
+
+ merge_memattrs (i1, i2);
- /* Fail if the difference in probabilities is greater than 50%.
- This rules out two well-predicted branches with opposite
- outcomes. */
- if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
+ /* Don't begin a cross-jump with a NOTE insn. */
+ if (INSN_P (i1))
{
- if (dump_file)
- fprintf (dump_file,
- "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
- bb1->index, bb2->index, b1->probability, prob2);
+ merge_notes (i1, i2);
- match = false;
+ beforelast1 = last1, beforelast2 = last2;
+ last1 = i1, last2 = i2;
+ ninsns++;
}
+
+ if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
+ || (stop_after > 0 && ninsns == stop_after))
+ break;
+
+ i1 = NEXT_INSN (i1);
+ i2 = NEXT_INSN (i2);
}
- if (dump_file && match)
- fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
- bb1->index, bb2->index);
+#ifdef HAVE_cc0
+ /* Don't allow a compare to be shared by cross-jumping unless the insn
+ after the compare is also shared. */
+ if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
+ last1 = beforelast1, last2 = beforelast2, ninsns--;
+#endif
+
+ if (ninsns)
+ {
+ *f1 = last1;
+ *f2 = last2;
+ }
- if (!match)
- cancel_changes (0);
- return match;
+ return ninsns;
}
/* Return true iff outgoing edges of BB1 and BB2 match, together with
we require the existing branches to have probabilities that are
roughly similar. */
if (match
- && !optimize_size
- && maybe_hot_bb_p (bb1)
- && maybe_hot_bb_p (bb2))
+ && optimize_bb_for_speed_p (bb1)
+ && optimize_bb_for_speed_p (bb2))
{
int prob2;
return false;
/* ... and part the second. */
- nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
+ nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2);
/* Don't proceed with the crossjump unless we found a sufficient number
of matching instructions or the 'from' block was totally matched
/* Skip possible basic block header. */
if (LABEL_P (newpos2))
newpos2 = NEXT_INSN (newpos2);
+ while (DEBUG_INSN_P (newpos2))
+ newpos2 = NEXT_INSN (newpos2);
if (NOTE_P (newpos2))
newpos2 = NEXT_INSN (newpos2);
+ while (DEBUG_INSN_P (newpos2))
+ newpos2 = NEXT_INSN (newpos2);
}
if (dump_file)
if (LABEL_P (newpos1))
newpos1 = NEXT_INSN (newpos1);
- if (NOTE_P (newpos1))
+ while (DEBUG_INSN_P (newpos1))
+ newpos1 = NEXT_INSN (newpos1);
+
+ if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
+ newpos1 = NEXT_INSN (newpos1);
+
+ while (DEBUG_INSN_P (newpos1))
newpos1 = NEXT_INSN (newpos1);
redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
/* Don't crossjump if this block ends in a computed jump,
unless we are optimizing for size. */
- if (!optimize_size
+ if (optimize_bb_for_size_p (bb)
&& bb != EXIT_BLOCK_PTR
&& computed_jump_p (BB_END (bb)))
return false;
return changed;
}
+/* Return true if BB contains just bb note, or bb note followed
+ by only DEBUG_INSNs. */
+
+static bool
+trivially_empty_bb_p (basic_block bb)
+{
+ rtx insn = BB_END (bb);
+
+ while (1)
+ {
+ if (insn == BB_HEAD (bb))
+ return true;
+ if (!DEBUG_INSN_P (insn))
+ return false;
+ insn = PREV_INSN (insn);
+ }
+}
+
/* Do simple CFG optimizations - basic block merging, simplifying of jump
instructions etc. Return nonzero if changes were made. */
edge s;
bool changed_here = false;
- /* Delete trivially dead basic blocks. */
- if (EDGE_COUNT (b->preds) == 0)
+ /* Delete trivially dead basic blocks. This is either
+ blocks with no predecessors, or empty blocks with no
+ successors. However if the empty block with no
+ successors is the successor of the ENTRY_BLOCK, it is
+ kept. This ensures that the ENTRY_BLOCK will have a
+ successor which is a precondition for many RTL
+ passes. Empty blocks may result from expanding
+ __builtin_unreachable (). */
+ if (EDGE_COUNT (b->preds) == 0
+ || (EDGE_COUNT (b->succs) == 0
+ && trivially_empty_bb_p (b)
+ && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
{
c = b->prev_bb;
- if (dump_file)
- fprintf (dump_file, "Deleting block %i.\n",
- b->index);
-
+ if ((mode & CLEANUP_CFGLAYOUT)
+ && EDGE_COUNT (b->preds) > 0
+ && b->il.rtl->footer
+ && BARRIER_P (b->il.rtl->footer))
+ {
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, b->preds)
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ if (e->src->il.rtl->footer == NULL)
+ {
+ e->src->il.rtl->footer = b->il.rtl->footer;
+ b->il.rtl->footer = NULL;
+ }
+ break;
+ }
+ }
delete_basic_block (b);
if (!(mode & CLEANUP_CFGLAYOUT))
changed = true;
delete_basic_block (b);
changed = true;
b = c;
+ continue;
}
if (single_succ_p (b)
delete_unreachable_blocks (void)
{
bool changed = false;
- basic_block b, next_bb;
+ basic_block b, prev_bb;
find_unreachable_blocks ();
- /* Delete all unreachable basic blocks. */
-
- for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
+ /* When we're in GIMPLE mode and there may be debug insns, we should
+ delete blocks in reverse dominator order, so as to get a chance
+ to substitute all released DEFs into debug stmts. If we don't
+ have dominators information, walking blocks backward gets us a
+ better chance of retaining most debug information than
+ otherwise. */
+ if (MAY_HAVE_DEBUG_STMTS && current_ir_type () == IR_GIMPLE
+ && dom_info_available_p (CDI_DOMINATORS))
{
- next_bb = b->next_bb;
+ for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
+ {
+ prev_bb = b->prev_bb;
- if (!(b->flags & BB_REACHABLE))
+ if (!(b->flags & BB_REACHABLE))
+ {
+ /* Speed up the removal of blocks that don't dominate
+ others. Walking backwards, this should be the common
+ case. */
+ if (!first_dom_son (CDI_DOMINATORS, b))
+ delete_basic_block (b);
+ else
+ {
+ VEC (basic_block, heap) *h
+ = get_all_dominated_blocks (CDI_DOMINATORS, b);
+
+ while (VEC_length (basic_block, h))
+ {
+ b = VEC_pop (basic_block, h);
+
+ prev_bb = b->prev_bb;
+
+ gcc_assert (!(b->flags & BB_REACHABLE));
+
+ delete_basic_block (b);
+ }
+
+ VEC_free (basic_block, heap, h);
+ }
+
+ changed = true;
+ }
+ }
+ }
+ else
+ {
+ for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
{
- delete_basic_block (b);
- changed = true;
+ prev_bb = b->prev_bb;
+
+ if (!(b->flags & BB_REACHABLE))
+ {
+ delete_basic_block (b);
+ changed = true;
+ }
}
}
next = NEXT_INSN (insn);
if (LABEL_P (insn)
&& LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
- && JUMP_P (next)
- && (GET_CODE (PATTERN (next)) == ADDR_VEC
- || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC))
+ && JUMP_TABLE_DATA_P (next))
{
rtx label = insn, jump = next;
if (mode & CLEANUP_CROSSJUMP)
add_noreturn_fake_exit_edges ();
+ if (!dbg_cnt (cfg_cleanup))
+ return changed;
+
while (try_optimize_cfg (mode))
{
delete_unreachable_blocks (), changed = true;
static unsigned int
rest_of_handle_jump (void)
{
- delete_unreachable_blocks ();
-
- if (cfun->tail_call_emit)
+ if (crtl->tail_call_emit)
fixup_tail_calls ();
return 0;
}
-struct tree_opt_pass pass_jump =
+struct rtl_opt_pass pass_jump =
{
+ {
+ RTL_PASS,
"sibling", /* name */
NULL, /* gate */
rest_of_handle_jump, /* execute */
0, /* properties_destroyed */
TODO_ggc_collect, /* todo_flags_start */
TODO_verify_flow, /* todo_flags_finish */
- 'i' /* letter */
+ }
};
}
-struct tree_opt_pass pass_jump2 =
+struct rtl_opt_pass pass_jump2 =
{
+ {
+ RTL_PASS,
"jump", /* name */
NULL, /* gate */
rest_of_handle_jump2, /* execute */
0, /* properties_destroyed */
TODO_ggc_collect, /* todo_flags_start */
TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */
- 'j' /* letter */
+ }
};