/* Control flow optimization code for GNU compiler.
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
#include "insn-config.h"
#include "flags.h"
#include "recog.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "cselib.h"
#include "params.h"
#include "tm_p.h"
/* Set to true if crossjumps occured in the latest run of try_optimize_cfg. */
static bool crossjumps_occured;
-static bool try_crossjump_to_edge (int, edge, edge);
+/* Set to true if we couldn't run an optimization due to stale liveness
+ information; we should run df_analyze to enable more opportunities. */
+static bool block_was_dirty;
+
+static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
static bool try_crossjump_bb (int, basic_block);
static bool outgoing_edges_match (int, basic_block, basic_block);
-static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
-static bool old_insns_match_p (int, rtx, rtx);
+static enum replace_direction old_insns_match_p (int, rtx, rtx);
static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
{
dest = XEXP (exp, 0);
regno = REGNO (dest);
- CLEAR_REGNO_REG_SET (nonequal, regno);
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int n = hard_regno_nregs[regno][GET_MODE (dest)];
- while (--n > 0)
- CLEAR_REGNO_REG_SET (nonequal, regno + n);
- }
+ if (HARD_REGISTER_NUM_P (regno))
+ bitmap_clear_range (nonequal, regno,
+ hard_regno_nregs[regno][GET_MODE (dest)]);
+ else
+ bitmap_clear_bit (nonequal, regno);
}
return false;
if (!REG_P (dest))
return true;
regno = REGNO (dest);
- SET_REGNO_REG_SET (nonequal, regno);
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int n = hard_regno_nregs[regno][GET_MODE (dest)];
- while (--n > 0)
- SET_REGNO_REG_SET (nonequal, regno + n);
- }
+ if (HARD_REGISTER_NUM_P (regno))
+ bitmap_set_range (nonequal, regno,
+ hard_regno_nregs[regno][GET_MODE (dest)]);
+ else
+ bitmap_set_bit (nonequal, regno);
return false;
default:
return NULL;
}
- cselib_init (false);
+ cselib_init (0);
/* First process all values computed in the source basic block. */
for (insn = NEXT_INSN (BB_HEAD (e->src));
int counter, goto_locus;
bool threaded = false;
int nthreaded_edges = 0;
- bool may_thread = first_pass | df_get_bb_dirty (b);
+ bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
/* Skip complex edges because we don't know how to update them.
{
basic_block new_target = NULL;
bool new_target_threaded = false;
- may_thread |= df_get_bb_dirty (target);
+ may_thread |= (target->flags & BB_MODIFIED) != 0;
if (FORWARDER_BLOCK_P (target)
&& !(single_succ_edge (target)->flags & EDGE_CROSSING)
{
/* When not optimizing, ensure that edges or forwarder
blocks with different locus are not optimized out. */
- int locus = single_succ_edge (target)->goto_locus;
+ int new_locus = single_succ_edge (target)->goto_locus;
+ int locus = goto_locus;
- if (locus && goto_locus && !locator_eq (locus, goto_locus))
- counter = n_basic_blocks;
- else if (locus)
- goto_locus = locus;
-
- if (INSN_P (BB_END (target)))
+ if (new_locus && locus && !locator_eq (new_locus, locus))
+ new_target = NULL;
+ else
{
- locus = INSN_LOCATOR (BB_END (target));
+ rtx last;
- if (locus && goto_locus
- && !locator_eq (locus, goto_locus))
- counter = n_basic_blocks;
- else if (locus)
- goto_locus = locus;
+ if (new_locus)
+ locus = new_locus;
+
+ last = BB_END (target);
+ if (DEBUG_INSN_P (last))
+ last = prev_nondebug_insn (last);
+
+ new_locus = last && INSN_P (last)
+ ? INSN_LOCATOR (last) : 0;
+
+ if (new_locus && locus && !locator_eq (new_locus, locus))
+ new_target = NULL;
+ else
+ {
+ if (new_locus)
+ locus = new_locus;
+
+ goto_locus = locus;
+ }
}
}
}
+ REG_BR_PROB_BASE / 2)
/ REG_BR_PROB_BASE);
- if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
- b->flags |= BB_FORWARDER_BLOCK;
-
do
{
edge t;
ei_next (&ei);
}
- if (threaded_edges)
- free (threaded_edges);
+ free (threaded_edges);
return changed;
}
\f
edge tmp_edge, b_fallthru_edge;
bool c_has_outgoing_fallthru;
bool b_has_incoming_fallthru;
- edge_iterator ei;
/* Avoid overactive code motion, as the forwarder blocks should be
eliminated by edge redirection instead. One exception might have
and loop notes. This is done by squeezing out all the notes
and leaving them there to lie. Not ideal, but functional. */
- FOR_EACH_EDGE (tmp_edge, ei, c->succs)
- if (tmp_edge->flags & EDGE_FALLTHRU)
- break;
-
+ tmp_edge = find_fallthru_edge (c->succs);
c_has_outgoing_fallthru = (tmp_edge != NULL);
- FOR_EACH_EDGE (tmp_edge, ei, b->preds)
- if (tmp_edge->flags & EDGE_FALLTHRU)
- break;
-
+ tmp_edge = find_fallthru_edge (b->preds);
b_has_incoming_fallthru = (tmp_edge != NULL);
b_fallthru_edge = tmp_edge;
next = b->prev_bb;
MEM_ATTRS (x) = 0;
else
{
- rtx mem_size;
+ HOST_WIDE_INT mem_size;
if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
{
{
set_mem_expr (x, 0);
set_mem_expr (y, 0);
- set_mem_offset (x, 0);
- set_mem_offset (y, 0);
+ clear_mem_offset (x);
+ clear_mem_offset (y);
}
- else if (MEM_OFFSET (x) != MEM_OFFSET (y))
+ else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
+ || (MEM_OFFSET_KNOWN_P (x)
+ && MEM_OFFSET (x) != MEM_OFFSET (y)))
{
- set_mem_offset (x, 0);
- set_mem_offset (y, 0);
+ clear_mem_offset (x);
+ clear_mem_offset (y);
}
- if (!MEM_SIZE (x))
- mem_size = NULL_RTX;
- else if (!MEM_SIZE (y))
- mem_size = NULL_RTX;
+ if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
+ {
+ mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
+ set_mem_size (x, mem_size);
+ set_mem_size (y, mem_size);
+ }
else
- mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
- INTVAL (MEM_SIZE (y))));
- set_mem_size (x, mem_size);
- set_mem_size (y, mem_size);
+ {
+ clear_mem_size (x);
+ clear_mem_size (y);
+ }
set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
set_mem_align (y, MEM_ALIGN (x));
}
-/* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
+ /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
+ different single sets S1 and S2. */
static bool
+equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
+{
+ int i;
+ rtx e1, e2;
+
+ if (p1 == s1 && p2 == s2)
+ return true;
+
+ if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
+ return false;
+
+ if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
+ return false;
+
+ for (i = 0; i < XVECLEN (p1, 0); i++)
+ {
+ e1 = XVECEXP (p1, 0, i);
+ e2 = XVECEXP (p2, 0, i);
+ if (e1 == s1 && e2 == s2)
+ continue;
+ if (reload_completed
+ ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
+ continue;
+
+ return false;
+ }
+
+ return true;
+}
+
+/* Examine register notes on I1 and I2 and return:
+ - dir_forward if I1 can be replaced by I2, or
+ - dir_backward if I2 can be replaced by I1, or
+ - dir_both if both are the case. */
+
+static enum replace_direction
+can_replace_by (rtx i1, rtx i2)
+{
+ rtx s1, s2, d1, d2, src1, src2, note1, note2;
+ bool c1, c2;
+
+ /* Check for 2 sets. */
+ s1 = single_set (i1);
+ s2 = single_set (i2);
+ if (s1 == NULL_RTX || s2 == NULL_RTX)
+ return dir_none;
+
+ /* Check that the 2 sets set the same dest. */
+ d1 = SET_DEST (s1);
+ d2 = SET_DEST (s2);
+ if (!(reload_completed
+ ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
+ return dir_none;
+
+ /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
+ set dest to the same value. */
+ note1 = find_reg_equal_equiv_note (i1);
+ note2 = find_reg_equal_equiv_note (i2);
+ if (!note1 || !note2 || !rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0))
+ || !CONST_INT_P (XEXP (note1, 0)))
+ return dir_none;
+
+ if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
+ return dir_none;
+
+ /* Although the 2 sets set dest to the same value, we cannot replace
+ (set (dest) (const_int))
+ by
+ (set (dest) (reg))
+ because we don't know if the reg is live and has the same value at the
+ location of replacement. */
+ src1 = SET_SRC (s1);
+ src2 = SET_SRC (s2);
+ c1 = CONST_INT_P (src1);
+ c2 = CONST_INT_P (src2);
+ if (c1 && c2)
+ return dir_both;
+ else if (c2)
+ return dir_forward;
+ else if (c1)
+ return dir_backward;
+
+ return dir_none;
+}
+
+/* Merges directions A and B. */
+
+static enum replace_direction
+merge_dir (enum replace_direction a, enum replace_direction b)
+{
+ /* Implements the following table:
+ |bo fw bw no
+ ---+-----------
+ bo |bo fw bw no
+ fw |-- fw no no
+ bw |-- -- bw no
+ no |-- -- -- no. */
+
+ if (a == b)
+ return a;
+
+ if (a == dir_both)
+ return b;
+ if (b == dir_both)
+ return a;
+
+ return dir_none;
+}
+
+/* Examine I1 and I2 and return:
+ - dir_forward if I1 can be replaced by I2, or
+ - dir_backward if I2 can be replaced by I1, or
+ - dir_both if both are the case. */
+
+static enum replace_direction
old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
{
rtx p1, p2;
/* Verify that I1 and I2 are equivalent. */
if (GET_CODE (i1) != GET_CODE (i2))
- return false;
+ return dir_none;
+
+ /* __builtin_unreachable() may lead to empty blocks (ending with
+ NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
+ if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
+ return dir_both;
+
+ /* ??? Do not allow cross-jumping between different stack levels. */
+ p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
+ p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
+ if (p1 && p2)
+ {
+ p1 = XEXP (p1, 0);
+ p2 = XEXP (p2, 0);
+ if (!rtx_equal_p (p1, p2))
+ return dir_none;
+
+ /* ??? Worse, this adjustment had better be constant lest we
+ have differing incoming stack levels. */
+ if (!frame_pointer_needed
+ && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN)
+ return dir_none;
+ }
+ else if (p1 || p2)
+ return dir_none;
p1 = PATTERN (i1);
p2 = PATTERN (i2);
if (GET_CODE (p1) != GET_CODE (p2))
- return false;
+ return dir_none;
/* If this is a CALL_INSN, compare register usage information.
If we don't check this on stack register machines, the two
be filled that clobbers a parameter expected by the subroutine.
??? We take the simple route for now and assume that if they're
- equal, they were constructed identically. */
+ equal, they were constructed identically.
+
+ Also check for identical exception regions. */
+
+ if (CALL_P (i1))
+ {
+ /* Ensure the same EH region. */
+ rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
+ rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
+
+ if (!n1 && n2)
+ return dir_none;
+
+ if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
+ return dir_none;
- if (CALL_P (i1)
- && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
+ if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
CALL_INSN_FUNCTION_USAGE (i2))
- || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
- return false;
+ || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
+ return dir_none;
+ }
#ifdef STACK_REGS
/* If cross_jump_death_matters is not 0, the insn's mode
SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
if (!hard_reg_set_equal_p (i1_regset, i2_regset))
- return false;
+ return dir_none;
}
#endif
if (reload_completed
? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
- return true;
+ return dir_both;
- /* Do not do EQUIV substitution after reload. First, we're undoing the
- work of reload_cse. Second, we may be undoing the work of the post-
- reload splitting pass. */
- /* ??? Possibly add a new phase switch variable that can be used by
- targets to disallow the troublesome insns after splitting. */
- if (!reload_completed)
+ return can_replace_by (i1, i2);
+}
+\f
+/* When comparing insns I1 and I2 in flow_find_cross_jump or
+ flow_find_head_matching_sequence, ensure the notes match. */
+
+static void
+merge_notes (rtx i1, rtx i2)
+{
+ /* If the merged insns have different REG_EQUAL notes, then
+ remove them. */
+ rtx equiv1 = find_reg_equal_equiv_note (i1);
+ rtx equiv2 = find_reg_equal_equiv_note (i2);
+
+ if (equiv1 && !equiv2)
+ remove_note (i1, equiv1);
+ else if (!equiv1 && equiv2)
+ remove_note (i2, equiv2);
+ else if (equiv1 && equiv2
+ && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
{
- /* The following code helps take care of G++ cleanups. */
- rtx equiv1 = find_reg_equal_equiv_note (i1);
- rtx equiv2 = find_reg_equal_equiv_note (i2);
-
- if (equiv1 && equiv2
- /* If the equivalences are not to a constant, they may
- reference pseudos that no longer exist, so we can't
- use them. */
- && (! reload_completed
- || (CONSTANT_P (XEXP (equiv1, 0))
- && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
- {
- rtx s1 = single_set (i1);
- rtx s2 = single_set (i2);
- if (s1 != 0 && s2 != 0
- && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
- {
- validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
- validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
- if (! rtx_renumbered_equal_p (p1, p2))
- cancel_changes (0);
- else if (apply_change_group ())
- return true;
- }
- }
+ remove_note (i1, equiv1);
+ remove_note (i2, equiv2);
}
+}
+
+ /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
+ resulting insn in I1, and the corresponding bb in BB1. At the head of a
+ bb, if there is a predecessor bb that reaches this bb via fallthru, and
+ FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
+ DID_FALLTHRU. Otherwise, stops at the head of the bb. */
+
+static void
+walk_to_nondebug_insn (rtx *i1, basic_block *bb1, bool follow_fallthru,
+ bool *did_fallthru)
+{
+ edge fallthru;
+
+ *did_fallthru = false;
+
+ /* Ignore notes. */
+ while (!NONDEBUG_INSN_P (*i1))
+ {
+ if (*i1 != BB_HEAD (*bb1))
+ {
+ *i1 = PREV_INSN (*i1);
+ continue;
+ }
+
+ if (!follow_fallthru)
+ return;
- return false;
+ fallthru = find_fallthru_edge ((*bb1)->preds);
+ if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FUNCTION (cfun)
+ || !single_succ_p (fallthru->src))
+ return;
+
+ *bb1 = fallthru->src;
+ *i1 = BB_END (*bb1);
+ *did_fallthru = true;
+ }
}
-\f
+
/* Look through the insns at the end of BB1 and BB2 and find the longest
- sequence that are equivalent. Store the first insns for that sequence
- in *F1 and *F2 and return the sequence length.
+ sequence that are either equivalent, or allow forward or backward
+ replacement. Store the first insns for that sequence in *F1 and *F2 and
+ return the sequence length.
+
+ DIR_P indicates the allowed replacement direction on function entry, and
+ the actual replacement direction on function exit. If NULL, only equivalent
+ sequences are allowed.
To simplify callers of this function, if the blocks match exactly,
store the head of the blocks in *F1 and *F2. */
-static int
-flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
- basic_block bb2, rtx *f1, rtx *f2)
+int
+flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2,
+ enum replace_direction *dir_p)
{
rtx i1, i2, last1, last2, afterlast1, afterlast2;
int ninsns = 0;
+ rtx p1;
+ enum replace_direction dir, last_dir, afterlast_dir;
+ bool follow_fallthru, did_fallthru;
+
+ if (dir_p)
+ dir = *dir_p;
+ else
+ dir = dir_both;
+ afterlast_dir = dir;
+ last_dir = afterlast_dir;
/* Skip simple jumps at the end of the blocks. Complex jumps still
need to be compared for equivalence, which we'll do below. */
while (true)
{
- /* Ignore notes. */
- while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
- i1 = PREV_INSN (i1);
-
- while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
- i2 = PREV_INSN (i2);
+ /* In the following example, we can replace all jumps to C by jumps to A.
+
+ This removes 4 duplicate insns.
+ [bb A] insn1 [bb C] insn1
+ insn2 insn2
+ [bb B] insn3 insn3
+ insn4 insn4
+ jump_insn jump_insn
+
+ We could also replace all jumps to A by jumps to C, but that leaves B
+ alive, and removes only 2 duplicate insns. In a subsequent crossjump
+ step, all jumps to B would be replaced with jumps to the middle of C,
+ achieving the same result with more effort.
+ So we allow only the first possibility, which means that we don't allow
+ fallthru in the block that's being replaced. */
+
+ follow_fallthru = dir_p && dir != dir_forward;
+ walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
+ if (did_fallthru)
+ dir = dir_backward;
+
+ follow_fallthru = dir_p && dir != dir_backward;
+ walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
+ if (did_fallthru)
+ dir = dir_forward;
if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
break;
- if (!old_insns_match_p (mode, i1, i2))
+ dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
+ if (dir == dir_none || (!dir_p && dir != dir_both))
break;
merge_memattrs (i1, i2);
/* Don't begin a cross-jump with a NOTE insn. */
if (INSN_P (i1))
{
- /* If the merged insns have different REG_EQUAL notes, then
- remove them. */
- rtx equiv1 = find_reg_equal_equiv_note (i1);
- rtx equiv2 = find_reg_equal_equiv_note (i2);
-
- if (equiv1 && !equiv2)
- remove_note (i1, equiv1);
- else if (!equiv1 && equiv2)
- remove_note (i2, equiv2);
- else if (equiv1 && equiv2
- && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
- {
- remove_note (i1, equiv1);
- remove_note (i2, equiv2);
- }
+ merge_notes (i1, i2);
afterlast1 = last1, afterlast2 = last2;
last1 = i1, last2 = i2;
- ninsns++;
+ afterlast_dir = last_dir;
+ last_dir = dir;
+ p1 = PATTERN (i1);
+ if (!(GET_CODE (p1) == USE || GET_CODE (p1) == CLOBBER))
+ ninsns++;
}
i1 = PREV_INSN (i1);
/* Don't allow the insn after a compare to be shared by
cross-jumping unless the compare is also shared. */
if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
- last1 = afterlast1, last2 = afterlast2, ninsns--;
+ last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
#endif
/* Include preceding notes and labels in the cross-jump. One,
Two, it keeps line number notes as matched as may be. */
if (ninsns)
{
- while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
+ bb1 = BLOCK_FOR_INSN (last1);
+ while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
last1 = PREV_INSN (last1);
if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
last1 = PREV_INSN (last1);
- while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
+ bb2 = BLOCK_FOR_INSN (last2);
+ while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
last2 = PREV_INSN (last2);
if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
*f2 = last2;
}
+ if (dir_p)
+ *dir_p = last_dir;
+ return ninsns;
+}
+
+/* Like flow_find_cross_jump, except start looking for a matching sequence from
+ the head of the two blocks. Do not include jumps at the end.
+ If STOP_AFTER is nonzero, stop after finding that many matching
+ instructions. */
+
+int
+flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1,
+ rtx *f2, int stop_after)
+{
+ rtx i1, i2, last1, last2, beforelast1, beforelast2;
+ int ninsns = 0;
+ edge e;
+ edge_iterator ei;
+ int nehedges1 = 0, nehedges2 = 0;
+
+ FOR_EACH_EDGE (e, ei, bb1->succs)
+ if (e->flags & EDGE_EH)
+ nehedges1++;
+ FOR_EACH_EDGE (e, ei, bb2->succs)
+ if (e->flags & EDGE_EH)
+ nehedges2++;
+
+ i1 = BB_HEAD (bb1);
+ i2 = BB_HEAD (bb2);
+ last1 = beforelast1 = last2 = beforelast2 = NULL_RTX;
+
+ while (true)
+ {
+ /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
+ while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
+ {
+ if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
+ break;
+ i1 = NEXT_INSN (i1);
+ }
+
+ while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
+ {
+ if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
+ break;
+ i2 = NEXT_INSN (i2);
+ }
+
+ if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
+ || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
+ break;
+
+ if (NOTE_P (i1) || NOTE_P (i2)
+ || JUMP_P (i1) || JUMP_P (i2))
+ break;
+
+ /* A sanity check to make sure we're not merging insns with different
+ effects on EH. If only one of them ends a basic block, it shouldn't
+ have an EH edge; if both end a basic block, there should be the same
+ number of EH edges. */
+ if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
+ && nehedges1 > 0)
+ || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
+ && nehedges2 > 0)
+ || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
+ && nehedges1 != nehedges2))
+ break;
+
+ if (old_insns_match_p (0, i1, i2) != dir_both)
+ break;
+
+ merge_memattrs (i1, i2);
+
+ /* Don't begin a cross-jump with a NOTE insn. */
+ if (INSN_P (i1))
+ {
+ merge_notes (i1, i2);
+
+ beforelast1 = last1, beforelast2 = last2;
+ last1 = i1, last2 = i2;
+ ninsns++;
+ }
+
+ if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
+ || (stop_after > 0 && ninsns == stop_after))
+ break;
+
+ i1 = NEXT_INSN (i1);
+ i2 = NEXT_INSN (i2);
+ }
+
+#ifdef HAVE_cc0
+ /* Don't allow a compare to be shared by cross-jumping unless the insn
+ after the compare is also shared. */
+ if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
+ last1 = beforelast1, last2 = beforelast2, ninsns--;
+#endif
+
+ if (ninsns)
+ {
+ *f1 = last1;
+ *f2 = last2;
+ }
+
return ninsns;
}
rr.update_label_nuses = false;
for_each_rtx (&BB_END (bb1), replace_label, &rr);
- match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
+ match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
+ == dir_both);
if (dump_file && match)
fprintf (dump_file,
"Tablejumps in bb %i and %i match.\n",
/* First ensure that the instructions match. There may be many outgoing
edges so this test is generally cheaper. */
- if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
+ if (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)) != dir_both)
return false;
/* Search the outgoing edges, ensure that the counts do match, find possible
/* E1 and E2 are edges with the same destination block. Search their
predecessors for common code. If found, redirect control flow from
- (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
+ (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
+ or the other way around (dir_backward). DIR specifies the allowed
+ replacement direction. */
static bool
-try_crossjump_to_edge (int mode, edge e1, edge e2)
+try_crossjump_to_edge (int mode, edge e1, edge e2,
+ enum replace_direction dir)
{
int nmatch;
basic_block src1 = e1->src, src2 = e2->src;
basic_block redirect_to, redirect_from, to_remove;
+ basic_block osrc1, osrc2, redirect_edges_to, tmp;
rtx newpos1, newpos2;
edge s;
edge_iterator ei;
return false;
/* ... and part the second. */
- nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
+ nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
+
+ osrc1 = src1;
+ osrc2 = src2;
+ if (newpos1 != NULL_RTX)
+ src1 = BLOCK_FOR_INSN (newpos1);
+ if (newpos2 != NULL_RTX)
+ src2 = BLOCK_FOR_INSN (newpos2);
+
+ if (dir == dir_backward)
+ {
+#define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
+ SWAP (basic_block, osrc1, osrc2);
+ SWAP (basic_block, src1, src2);
+ SWAP (edge, e1, e2);
+ SWAP (rtx, newpos1, newpos2);
+#undef SWAP
+ }
/* Don't proceed with the crossjump unless we found a sufficient number
of matching instructions or the 'from' block was totally matched
rtx label1, label2;
rtx table1, table2;
- if (tablejump_p (BB_END (src1), &label1, &table1)
- && tablejump_p (BB_END (src2), &label2, &table2)
+ if (tablejump_p (BB_END (osrc1), &label1, &table1)
+ && tablejump_p (BB_END (osrc2), &label2, &table2)
&& label1 != label2)
{
replace_label_data rr;
/* Do not replace the label in SRC1->END because when deleting
a block whose end is a tablejump, the tablejump referenced
from the instruction is deleted too. */
- if (insn != BB_END (src1))
+ if (insn != BB_END (osrc1))
for_each_rtx (&insn, replace_label, &rr);
}
}
/* Skip possible basic block header. */
if (LABEL_P (newpos2))
newpos2 = NEXT_INSN (newpos2);
+ while (DEBUG_INSN_P (newpos2))
+ newpos2 = NEXT_INSN (newpos2);
if (NOTE_P (newpos2))
newpos2 = NEXT_INSN (newpos2);
+ while (DEBUG_INSN_P (newpos2))
+ newpos2 = NEXT_INSN (newpos2);
}
if (dump_file)
/* We may have some registers visible through the block. */
df_set_bb_dirty (redirect_to);
+ if (osrc2 == src2)
+ redirect_edges_to = redirect_to;
+ else
+ redirect_edges_to = osrc2;
+
/* Recompute the frequencies and counts of outgoing edges. */
- FOR_EACH_EDGE (s, ei, redirect_to->succs)
+ FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
{
edge s2;
edge_iterator ei;
s2->dest->count = 0;
}
- if (!redirect_to->frequency && !src1->frequency)
+ if (!redirect_edges_to->frequency && !src1->frequency)
s->probability = (s->probability + s2->probability) / 2;
else
s->probability
- = ((s->probability * redirect_to->frequency +
+ = ((s->probability * redirect_edges_to->frequency +
s2->probability * src1->frequency)
- / (redirect_to->frequency + src1->frequency));
+ / (redirect_edges_to->frequency + src1->frequency));
}
/* Adjust count and frequency for the block. An earlier jump
threading pass may have left the profile in an inconsistent
state (see update_bb_profile_for_threading) so we must be
prepared for overflows. */
- redirect_to->count += src1->count;
- redirect_to->frequency += src1->frequency;
- if (redirect_to->frequency > BB_FREQ_MAX)
- redirect_to->frequency = BB_FREQ_MAX;
- update_br_prob_note (redirect_to);
+ tmp = redirect_to;
+ do
+ {
+ tmp->count += src1->count;
+ tmp->frequency += src1->frequency;
+ if (tmp->frequency > BB_FREQ_MAX)
+ tmp->frequency = BB_FREQ_MAX;
+ if (tmp == redirect_edges_to)
+ break;
+ tmp = find_fallthru_edge (tmp->succs)->dest;
+ }
+ while (true);
+ update_br_prob_note (redirect_edges_to);
/* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
/* Skip possible basic block header. */
if (LABEL_P (newpos1))
newpos1 = NEXT_INSN (newpos1);
+
+ while (DEBUG_INSN_P (newpos1))
+ newpos1 = NEXT_INSN (newpos1);
+
if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
newpos1 = NEXT_INSN (newpos1);
+ while (DEBUG_INSN_P (newpos1))
+ newpos1 = NEXT_INSN (newpos1);
+
redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
to_remove = single_succ (redirect_from);
edge e, e2, fallthru;
bool changed;
unsigned max, ix, ix2;
- basic_block ev, ev2;
- edge_iterator ei;
/* Nothing to do if there is not at least two incoming edges. */
if (EDGE_COUNT (bb->preds) < 2)
if (EDGE_COUNT (bb->preds) > max)
return false;
- FOR_EACH_EDGE (e, ei, bb->preds)
- {
- if (e->flags & EDGE_FALLTHRU)
- {
- fallthru = e;
- break;
- }
- }
+ fallthru = find_fallthru_edge (bb->preds);
changed = false;
- for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
+ for (ix = 0; ix < EDGE_COUNT (bb->preds);)
{
- e = EDGE_PRED (ev, ix);
+ e = EDGE_PRED (bb, ix);
ix++;
/* As noted above, first try with the fallthru predecessor (or, a
/* If nothing changed since the last attempt, there is nothing
we can do. */
if (!first_pass
- && (!(df_get_bb_dirty (e->src))
- && !(df_get_bb_dirty (fallthru->src))))
+ && !((e->src->flags & BB_MODIFIED)
+ || (fallthru->src->flags & BB_MODIFIED)))
continue;
- if (try_crossjump_to_edge (mode, e, fallthru))
+ if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
{
changed = true;
ix = 0;
- ev = bb;
continue;
}
}
if (EDGE_SUCC (e->src, 0) != e)
continue;
- for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
+ for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
{
- e2 = EDGE_PRED (ev2, ix2);
- ix2++;
+ e2 = EDGE_PRED (bb, ix2);
if (e2 == e)
continue;
/* If nothing changed since the last attempt, there is nothing
we can do. */
if (!first_pass
- && (!(df_get_bb_dirty (e->src))
- && !(df_get_bb_dirty (e2->src))))
+ && !((e->src->flags & BB_MODIFIED)
+ || (e2->src->flags & BB_MODIFIED)))
continue;
- if (try_crossjump_to_edge (mode, e, e2))
+ /* Both e and e2 are not fallthru edges, so we can crossjump in either
+ direction. */
+ if (try_crossjump_to_edge (mode, e, e2, dir_both))
{
changed = true;
- ev2 = bb;
ix = 0;
break;
}
return changed;
}
+/* Search the successors of BB for common insn sequences. When found,
+ share code between them by moving it across the basic block
+ boundary. Return true if any changes made. */
+
+static bool
+try_head_merge_bb (basic_block bb)
+{
+ basic_block final_dest_bb = NULL;
+ int max_match = INT_MAX;
+ edge e0;
+ rtx *headptr, *currptr, *nextptr;
+ bool changed, moveall;
+ unsigned ix;
+ rtx e0_last_head, cond, move_before;
+ unsigned nedges = EDGE_COUNT (bb->succs);
+ rtx jump = BB_END (bb);
+ regset live, live_union;
+
+ /* Nothing to do if there is not at least two outgoing edges. */
+ if (nedges < 2)
+ return false;
+
+ /* Don't crossjump if this block ends in a computed jump,
+ unless we are optimizing for size. */
+ if (optimize_bb_for_size_p (bb)
+ && bb != EXIT_BLOCK_PTR
+ && computed_jump_p (BB_END (bb)))
+ return false;
+
+ cond = get_condition (jump, &move_before, true, false);
+ if (cond == NULL_RTX)
+ {
+#ifdef HAVE_cc0
+ if (reg_mentioned_p (cc0_rtx, jump))
+ move_before = prev_nonnote_nondebug_insn (jump);
+ else
+#endif
+ move_before = jump;
+ }
+
+ for (ix = 0; ix < nedges; ix++)
+ if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR)
+ return false;
+
+ for (ix = 0; ix < nedges; ix++)
+ {
+ edge e = EDGE_SUCC (bb, ix);
+ basic_block other_bb = e->dest;
+
+ if (df_get_bb_dirty (other_bb))
+ {
+ block_was_dirty = true;
+ return false;
+ }
+
+ if (e->flags & EDGE_ABNORMAL)
+ return false;
+
+ /* Normally, all destination blocks must only be reachable from this
+ block, i.e. they must have one incoming edge.
+
+ There is one special case we can handle, that of multiple consecutive
+ jumps where the first jumps to one of the targets of the second jump.
+ This happens frequently in switch statements for default labels.
+ The structure is as follows:
+ FINAL_DEST_BB
+ ....
+ if (cond) jump A;
+ fall through
+ BB
+ jump with targets A, B, C, D...
+ A
+ has two incoming edges, from FINAL_DEST_BB and BB
+
+ In this case, we can try to move the insns through BB and into
+ FINAL_DEST_BB. */
+ if (EDGE_COUNT (other_bb->preds) != 1)
+ {
+ edge incoming_edge, incoming_bb_other_edge;
+ edge_iterator ei;
+
+ if (final_dest_bb != NULL
+ || EDGE_COUNT (other_bb->preds) != 2)
+ return false;
+
+ /* We must be able to move the insns across the whole block. */
+ move_before = BB_HEAD (bb);
+ while (!NONDEBUG_INSN_P (move_before))
+ move_before = NEXT_INSN (move_before);
+
+ if (EDGE_COUNT (bb->preds) != 1)
+ return false;
+ incoming_edge = EDGE_PRED (bb, 0);
+ final_dest_bb = incoming_edge->src;
+ if (EDGE_COUNT (final_dest_bb->succs) != 2)
+ return false;
+ FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
+ if (incoming_bb_other_edge != incoming_edge)
+ break;
+ if (incoming_bb_other_edge->dest != other_bb)
+ return false;
+ }
+ }
+
+ e0 = EDGE_SUCC (bb, 0);
+ e0_last_head = NULL_RTX;
+ changed = false;
+
+ for (ix = 1; ix < nedges; ix++)
+ {
+ edge e = EDGE_SUCC (bb, ix);
+ rtx e0_last, e_last;
+ int nmatch;
+
+ nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
+ &e0_last, &e_last, 0);
+ if (nmatch == 0)
+ return false;
+
+ if (nmatch < max_match)
+ {
+ max_match = nmatch;
+ e0_last_head = e0_last;
+ }
+ }
+
+ /* If we matched an entire block, we probably have to avoid moving the
+ last insn. */
+ if (max_match > 0
+ && e0_last_head == BB_END (e0->dest)
+ && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
+ || control_flow_insn_p (e0_last_head)))
+ {
+ max_match--;
+ if (max_match == 0)
+ return false;
+ do
+ e0_last_head = prev_real_insn (e0_last_head);
+ while (DEBUG_INSN_P (e0_last_head));
+ }
+
+ if (max_match == 0)
+ return false;
+
+ /* We must find a union of the live registers at each of the end points. */
+ live = BITMAP_ALLOC (NULL);
+ live_union = BITMAP_ALLOC (NULL);
+
+ currptr = XNEWVEC (rtx, nedges);
+ headptr = XNEWVEC (rtx, nedges);
+ nextptr = XNEWVEC (rtx, nedges);
+
+ for (ix = 0; ix < nedges; ix++)
+ {
+ int j;
+ basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
+ rtx head = BB_HEAD (merge_bb);
+
+ while (!NONDEBUG_INSN_P (head))
+ head = NEXT_INSN (head);
+ headptr[ix] = head;
+ currptr[ix] = head;
+
+ /* Compute the end point and live information */
+ for (j = 1; j < max_match; j++)
+ do
+ head = NEXT_INSN (head);
+ while (!NONDEBUG_INSN_P (head));
+ simulate_backwards_to_point (merge_bb, live, head);
+ IOR_REG_SET (live_union, live);
+ }
+
+ /* If we're moving across two blocks, verify the validity of the
+ first move, then adjust the target and let the loop below deal
+ with the final move. */
+ if (final_dest_bb != NULL)
+ {
+ rtx move_upto;
+
+ moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
+ jump, e0->dest, live_union,
+ NULL, &move_upto);
+ if (!moveall)
+ {
+ if (move_upto == NULL_RTX)
+ goto out;
+
+ while (e0_last_head != move_upto)
+ {
+ df_simulate_one_insn_backwards (e0->dest, e0_last_head,
+ live_union);
+ e0_last_head = PREV_INSN (e0_last_head);
+ }
+ }
+ if (e0_last_head == NULL_RTX)
+ goto out;
+
+ jump = BB_END (final_dest_bb);
+ cond = get_condition (jump, &move_before, true, false);
+ if (cond == NULL_RTX)
+ {
+#ifdef HAVE_cc0
+ if (reg_mentioned_p (cc0_rtx, jump))
+ move_before = prev_nonnote_nondebug_insn (jump);
+ else
+#endif
+ move_before = jump;
+ }
+ }
+
+ do
+ {
+ rtx move_upto;
+ moveall = can_move_insns_across (currptr[0], e0_last_head,
+ move_before, jump, e0->dest, live_union,
+ NULL, &move_upto);
+ if (!moveall && move_upto == NULL_RTX)
+ {
+ if (jump == move_before)
+ break;
+
+ /* Try again, using a different insertion point. */
+ move_before = jump;
+
+#ifdef HAVE_cc0
+ /* Don't try moving before a cc0 user, as that may invalidate
+ the cc0. */
+ if (reg_mentioned_p (cc0_rtx, jump))
+ break;
+#endif
+
+ continue;
+ }
+
+ if (final_dest_bb && !moveall)
+ /* We haven't checked whether a partial move would be OK for the first
+ move, so we have to fail this case. */
+ break;
+
+ changed = true;
+ for (;;)
+ {
+ if (currptr[0] == move_upto)
+ break;
+ for (ix = 0; ix < nedges; ix++)
+ {
+ rtx curr = currptr[ix];
+ do
+ curr = NEXT_INSN (curr);
+ while (!NONDEBUG_INSN_P (curr));
+ currptr[ix] = curr;
+ }
+ }
+
+ /* If we can't currently move all of the identical insns, remember
+ each insn after the range that we'll merge. */
+ if (!moveall)
+ for (ix = 0; ix < nedges; ix++)
+ {
+ rtx curr = currptr[ix];
+ do
+ curr = NEXT_INSN (curr);
+ while (!NONDEBUG_INSN_P (curr));
+ nextptr[ix] = curr;
+ }
+
+ reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
+ df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
+ if (final_dest_bb != NULL)
+ df_set_bb_dirty (final_dest_bb);
+ df_set_bb_dirty (bb);
+ for (ix = 1; ix < nedges; ix++)
+ {
+ df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
+ delete_insn_chain (headptr[ix], currptr[ix], false);
+ }
+ if (!moveall)
+ {
+ if (jump == move_before)
+ break;
+
+ /* For the unmerged insns, try a different insertion point. */
+ move_before = jump;
+
+#ifdef HAVE_cc0
+ /* Don't try moving before a cc0 user, as that may invalidate
+ the cc0. */
+ if (reg_mentioned_p (cc0_rtx, jump))
+ break;
+#endif
+
+ for (ix = 0; ix < nedges; ix++)
+ currptr[ix] = headptr[ix] = nextptr[ix];
+ }
+ }
+ while (!moveall);
+
+ out:
+ free (currptr);
+ free (headptr);
+ free (nextptr);
+
+ crossjumps_occured |= changed;
+
+ return changed;
+}
+
+/* Return true if BB contains just bb note, or bb note followed
+ by only DEBUG_INSNs. */
+
+static bool
+trivially_empty_bb_p (basic_block bb)
+{
+ rtx insn = BB_END (bb);
+
+ while (1)
+ {
+ if (insn == BB_HEAD (bb))
+ return true;
+ if (!DEBUG_INSN_P (insn))
+ return false;
+ insn = PREV_INSN (insn);
+ }
+}
+
/* Do simple CFG optimizations - basic block merging, simplifying of jump
instructions etc. Return nonzero if changes were made. */
one predecessor, they may be combined. */
do
{
+ block_was_dirty = false;
changed = false;
iterations++;
/* Delete trivially dead basic blocks. This is either
blocks with no predecessors, or empty blocks with no
- successors. Empty blocks may result from expanding
+ successors. However if the empty block with no
+ successors is the successor of the ENTRY_BLOCK, it is
+ kept. This ensures that the ENTRY_BLOCK will have a
+ successor which is a precondition for many RTL
+ passes. Empty blocks may result from expanding
__builtin_unreachable (). */
if (EDGE_COUNT (b->preds) == 0
- || (EDGE_COUNT (b->succs) == 0 && BB_HEAD (b) == BB_END (b)))
+ || (EDGE_COUNT (b->succs) == 0
+ && trivially_empty_bb_p (b)
+ && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
{
c = b->prev_bb;
- if (dump_file)
- fprintf (dump_file, "Deleting block %i.\n",
- b->index);
+ if (EDGE_COUNT (b->preds) > 0)
+ {
+ edge e;
+ edge_iterator ei;
+ if (current_ir_type () == IR_RTL_CFGLAYOUT)
+ {
+ if (b->il.rtl->footer
+ && BARRIER_P (b->il.rtl->footer))
+ FOR_EACH_EDGE (e, ei, b->preds)
+ if ((e->flags & EDGE_FALLTHRU)
+ && e->src->il.rtl->footer == NULL)
+ {
+ if (b->il.rtl->footer)
+ {
+ e->src->il.rtl->footer = b->il.rtl->footer;
+ b->il.rtl->footer = NULL;
+ }
+ else
+ {
+ start_sequence ();
+ e->src->il.rtl->footer = emit_barrier ();
+ end_sequence ();
+ }
+ }
+ }
+ else
+ {
+ rtx last = get_last_bb_insn (b);
+ if (last && BARRIER_P (last))
+ FOR_EACH_EDGE (e, ei, b->preds)
+ if ((e->flags & EDGE_FALLTHRU))
+ emit_barrier_after (BB_END (e->src));
+ }
+ }
delete_basic_block (b);
- if (!(mode & CLEANUP_CFGLAYOUT))
- changed = true;
+ changed = true;
/* Avoid trying to remove ENTRY_BLOCK_PTR. */
b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
continue;
delete_basic_block (b);
changed = true;
b = c;
+ continue;
}
+ /* Merge B with its single successor, if any. */
if (single_succ_p (b)
&& (s = single_succ_edge (b))
&& !(s->flags & EDGE_COMPLEX)
/* Simplify branch to branch. */
if (try_forward_edges (mode, b))
- changed_here = true;
+ {
+ update_forwarder_flag (b);
+ changed_here = true;
+ }
/* Look for shared code between blocks. */
if ((mode & CLEANUP_CROSSJUMP)
&& try_crossjump_bb (mode, b))
changed_here = true;
+ if ((mode & CLEANUP_CROSSJUMP)
+ /* This can lengthen register lifetimes. Do it only after
+ reload. */
+ && reload_completed
+ && try_head_merge_bb (b))
+ changed_here = true;
+
/* Don't get confused by the index shift caused by
deleting blocks. */
if (!changed_here)
&& try_crossjump_bb (mode, EXIT_BLOCK_PTR))
changed = true;
+ if (block_was_dirty)
+ {
+ /* This should only be set by head-merging. */
+ gcc_assert (mode & CLEANUP_CROSSJUMP);
+ df_analyze ();
+ }
+
#ifdef ENABLE_CHECKING
if (changed)
verify_flow_info ();
delete_unreachable_blocks (void)
{
bool changed = false;
- basic_block b, next_bb;
+ basic_block b, prev_bb;
find_unreachable_blocks ();
- /* Delete all unreachable basic blocks. */
-
- for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
+ /* When we're in GIMPLE mode and there may be debug insns, we should
+ delete blocks in reverse dominator order, so as to get a chance
+ to substitute all released DEFs into debug stmts. If we don't
+ have dominators information, walking blocks backward gets us a
+ better chance of retaining most debug information than
+ otherwise. */
+ if (MAY_HAVE_DEBUG_STMTS && current_ir_type () == IR_GIMPLE
+ && dom_info_available_p (CDI_DOMINATORS))
{
- next_bb = b->next_bb;
+ for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
+ {
+ prev_bb = b->prev_bb;
+
+ if (!(b->flags & BB_REACHABLE))
+ {
+ /* Speed up the removal of blocks that don't dominate
+ others. Walking backwards, this should be the common
+ case. */
+ if (!first_dom_son (CDI_DOMINATORS, b))
+ delete_basic_block (b);
+ else
+ {
+ VEC (basic_block, heap) *h
+ = get_all_dominated_blocks (CDI_DOMINATORS, b);
- if (!(b->flags & BB_REACHABLE))
+ while (VEC_length (basic_block, h))
+ {
+ b = VEC_pop (basic_block, h);
+
+ prev_bb = b->prev_bb;
+
+ gcc_assert (!(b->flags & BB_REACHABLE));
+
+ delete_basic_block (b);
+ }
+
+ VEC_free (basic_block, heap, h);
+ }
+
+ changed = true;
+ }
+ }
+ }
+ else
+ {
+ for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
{
- delete_basic_block (b);
- changed = true;
+ prev_bb = b->prev_bb;
+
+ if (!(b->flags & BB_REACHABLE))
+ {
+ delete_basic_block (b);
+ changed = true;
+ }
}
}
next = NEXT_INSN (insn);
if (LABEL_P (insn)
&& LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
- && JUMP_P (next)
- && (GET_CODE (PATTERN (next)) == ADDR_VEC
- || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC))
+ && JUMP_TABLE_DATA_P (next))
{
rtx label = insn, jump = next;
if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
&& !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
break;
- else if ((mode & CLEANUP_CROSSJUMP)
- && crossjumps_occured)
+ if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
run_fast_dce ();
}
else
0, /* properties_provided */
0, /* properties_destroyed */
TODO_ggc_collect, /* todo_flags_start */
- TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */
+ TODO_verify_rtl_sharing, /* todo_flags_finish */
}
};
-
-