cleanup_cfg. Following optimizations are performed:
- Unreachable blocks removal
- - Edge forwarding (edge to the forwarder block is forwarded to it's
+ - Edge forwarding (edge to the forwarder block is forwarded to its
successor. Simplification of the branch instruction is performed by
underlying infrastructure so branch can be converted to simplejump or
eliminated).
#include "params.h"
#include "tm_p.h"
#include "target.h"
+#include "regs.h"
+#include "cfglayout.h"
+#include "expr.h"
/* cleanup_cfg maintains following flags for each basic block. */
static void notice_new_block (basic_block);
static void update_forwarder_flag (basic_block);
static int mentions_nonequal_regs (rtx *, void *);
+static void merge_memattrs (rtx, rtx);
\f
/* Set flags for newly created block. */
return false;
jump_dest_block = jump_block->succ->dest;
+ /* If we are partitioning hot/cold basic blocks, we don't want to
+ mess up unconditional or indirect jumps that cross between hot
+ and cold sections. */
+
+ if (flag_reorder_blocks_and_partition
+ && (jump_block->partition != jump_dest_block->partition
+ || cbranch_jump_edge->crossing_edge))
+ return false;
+
/* The conditional branch must target the block after the
unconditional branch. */
cbranch_dest_block = cbranch_jump_edge->dest;
if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
return false;
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
+ if (dump_file)
+ fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
/* Success. Update the CFG to match. Note that after this point
}
}
/* Delete the block with the unconditional jump, and clean up the mess. */
- delete_block (jump_block);
- tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
+ delete_basic_block (jump_block);
+ tidy_fallthru_edge (cbranch_jump_edge);
return true;
}
CLEAR_REGNO_REG_SET (nonequal, regno);
if (regno < FIRST_PSEUDO_REGISTER)
{
- int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ int n = hard_regno_nregs[regno][GET_MODE (dest)];
while (--n > 0)
CLEAR_REGNO_REG_SET (nonequal, regno + n);
}
SET_REGNO_REG_SET (nonequal, regno);
if (regno < FIRST_PSEUDO_REGISTER)
{
- int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
+ int n = hard_regno_nregs[regno][GET_MODE (dest)];
while (--n > 0)
SET_REGNO_REG_SET (nonequal, regno + n);
}
return 1;
if (regno < FIRST_PSEUDO_REGISTER)
{
- int n = HARD_REGNO_NREGS (regno, GET_MODE (*x));
+ int n = hard_regno_nregs[regno][GET_MODE (*x)];
while (--n > 0)
if (REGNO_REG_SET_P (nonequal, regno + n))
return 1;
return NULL;
}
- cselib_init ();
+ cselib_init (false);
/* First process all values computed in the source basic block. */
for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
bool changed = false;
edge e, next, *threaded_edges = NULL;
+ /* If we are partitioning hot/cold basic blocks, we don't want to
+ mess up unconditional or indirect jumps that cross between hot
+ and cold sections. */
+
+ if (flag_reorder_blocks_and_partition
+ && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
+ return false;
+
for (e = b->succ; e; e = next)
{
basic_block target, first;
if (counter >= n_basic_blocks)
{
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
+ if (dump_file)
+ fprintf (dump_file, "Infinite loop in BB %i.\n",
target->index);
}
else if (target == first)
if (threaded && target != EXIT_BLOCK_PTR)
{
notice_new_block (redirect_edge_and_branch_force (e, target));
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Conditionals threaded.\n");
+ if (dump_file)
+ fprintf (dump_file, "Conditionals threaded.\n");
}
else if (!redirect_edge_and_branch (e, target))
{
- if (rtl_dump_file)
- fprintf (rtl_dump_file,
+ if (dump_file)
+ fprintf (dump_file,
"Forwarding edge %i->%i to %i failed.\n",
b->index, e->dest->index, target->index);
continue;
{
rtx barrier;
+ /* If we are partitioning hot/cold basic blocks, we don't want to
+ mess up unconditional or indirect jumps that cross between hot
+ and cold sections. */
+
+ if (flag_reorder_blocks_and_partition
+ && (a->partition != b->partition
+ || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
+ return;
+
barrier = next_nonnote_insn (BB_END (a));
if (GET_CODE (barrier) != BARRIER)
abort ();
reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
a->flags |= BB_DIRTY;
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
+ if (dump_file)
+ fprintf (dump_file, "Moved block %d before %d and merged.\n",
a->index, b->index);
/* Swap the records for the two blocks around. */
rtx barrier, real_b_end;
rtx label, table;
+ /* If we are partitioning hot/cold basic blocks, we don't want to
+ mess up unconditional or indirect jumps that cross between hot
+ and cold sections. */
+
+ if (flag_reorder_blocks_and_partition
+ && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
+ || a->partition != b->partition))
+ return;
+
real_b_end = BB_END (b);
/* If there is a jump table following block B temporarily add the jump table
/* Restore the real end of b. */
BB_END (b) = real_b_end;
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
+ if (dump_file)
+ fprintf (dump_file, "Moved block %d after %d and merged.\n",
b->index, a->index);
/* Now blocks A and B are contiguous. Merge them. */
&& tail_recursion_label_p (BB_HEAD (c)))
return NULL;
+ /* If we are partitioning hot/cold basic blocks, we don't want to
+ mess up unconditional or indirect jumps that cross between hot
+ and cold sections. */
+
+ if (flag_reorder_blocks_and_partition
+ && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
+ || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
+ || b->partition != c->partition))
+ return NULL;
+
+
+
/* If B has a fallthru edge to C, no need to move anything. */
if (e->flags & EDGE_FALLTHRU)
{
merge_blocks (b, c);
update_forwarder_flag (b);
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
+ if (dump_file)
+ fprintf (dump_file, "Merged %d and %d without moving.\n",
b_index, c_index);
return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
}
\f
+/* Removes the memory attributes of MEM expression
+ if they are not equal. */
+
+void
+merge_memattrs (rtx x, rtx y)
+{
+ int i;
+ int j;
+ enum rtx_code code;
+ const char *fmt;
+
+ if (x == y)
+ return;
+ if (x == 0 || y == 0)
+ return;
+
+ code = GET_CODE (x);
+
+ if (code != GET_CODE (y))
+ return;
+
+ if (GET_MODE (x) != GET_MODE (y))
+ return;
+
+ if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
+ {
+ if (! MEM_ATTRS (x))
+ MEM_ATTRS (y) = 0;
+ else if (! MEM_ATTRS (y))
+ MEM_ATTRS (x) = 0;
+ else
+ {
+ if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
+ {
+ set_mem_alias_set (x, 0);
+ set_mem_alias_set (y, 0);
+ }
+
+ if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
+ {
+ set_mem_expr (x, 0);
+ set_mem_expr (y, 0);
+ set_mem_offset (x, 0);
+ set_mem_offset (y, 0);
+ }
+ else if (MEM_OFFSET (x) != MEM_OFFSET (y))
+ {
+ set_mem_offset (x, 0);
+ set_mem_offset (y, 0);
+ }
+
+ set_mem_size (x, MAX (MEM_SIZE (x), MEM_SIZE (y)));
+ set_mem_size (y, MEM_SIZE (x));
+
+ set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
+ set_mem_align (y, MEM_ALIGN (x));
+ }
+ }
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ switch (fmt[i])
+ {
+ case 'E':
+ /* Two vectors must have the same length. */
+ if (XVECLEN (x, i) != XVECLEN (y, i))
+ return;
+
+ for (j = 0; j < XVECLEN (x, i); j++)
+ merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
+
+ break;
+
+ case 'e':
+ merge_memattrs (XEXP (x, i), XEXP (y, i));
+ }
+ }
+ return;
+}
+
+
/* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
static bool
if (!insns_match_p (mode, i1, i2))
break;
+ merge_memattrs (i1, i2);
+
/* Don't begin a cross-jump with a NOTE insn. */
if (INSN_P (i1))
{
outcomes. */
if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
{
- if (rtl_dump_file)
- fprintf (rtl_dump_file,
+ if (dump_file)
+ fprintf (dump_file,
"Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
bb1->index, bb2->index, b1->probability, prob2);
}
}
- if (rtl_dump_file && match)
- fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
+ if (dump_file && match)
+ fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
bb1->index, bb2->index);
return match;
for_each_rtx (&BB_END (bb1), replace_label, &rr);
match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
- if (rtl_dump_file && match)
- fprintf (rtl_dump_file,
+ if (dump_file && match)
+ fprintf (dump_file,
"Tablejumps in bb %i and %i match.\n",
bb1->index, bb2->index);
rtx newpos1, newpos2;
edge s;
+ /* If we have partitioned hot/cold basic blocks, it is a bad idea
+ to try this optimization. */
+
+ if (flag_reorder_blocks_and_partition && no_new_pseudos)
+ return false;
+
/* Search backward through forwarder blocks. We don't need to worry
about multiple entry or chained forwarders, as they will be optimized
away. We do this to look past the unconditional jump following a
redirect_to = src2;
else
{
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
+ if (dump_file)
+ fprintf (dump_file, "Splitting bb %i before %i insns\n",
src2->index, nmatch);
redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
}
- if (rtl_dump_file)
- fprintf (rtl_dump_file,
+ if (dump_file)
+ fprintf (dump_file,
"Cross jumping from bb %i to bb %i; %i common insns\n",
src1->index, src2->index, nmatch);
to_remove = redirect_from->succ->dest;
redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
- delete_block (to_remove);
+ delete_basic_block (to_remove);
update_forwarder_flag (redirect_from);
if (!bb->pred || !bb->pred->pred_next)
return false;
+ /* If we are partitioning hot/cold basic blocks, we don't want to
+ mess up unconditional or indirect jumps that cross between hot
+ and cold sections. */
+
+ if (flag_reorder_blocks_and_partition
+ && (bb->pred->src->partition != bb->pred->pred_next->src->partition
+ || bb->pred->crossing_edge))
+ return false;
+
/* It is always cheapest to redirect a block that ends in a branch to
a block that falls through into BB, as that adds no branches to the
program. We'll try that combination first. */
if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
clear_bb_flags ();
- if (! (* targetm.cannot_modify_jumps_p) ())
+ if (! targetm.cannot_modify_jumps_p ())
{
first_pass = true;
/* Attempt to merge blocks as made possible by edge removal. If
changed = false;
iterations++;
- if (rtl_dump_file)
- fprintf (rtl_dump_file,
+ if (dump_file)
+ fprintf (dump_file,
"\n\ntry_optimize_cfg iteration %i\n\n",
iterations);
while (b->pred == NULL)
{
c = b->prev_bb;
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Deleting block %i.\n",
+ if (dump_file)
+ fprintf (dump_file, "Deleting block %i.\n",
b->index);
- delete_block (b);
+ delete_basic_block (b);
if (!(mode & CLEANUP_CFGLAYOUT))
changed = true;
b = c;
reorder_insns_nobb (label, label, bb_note);
BB_HEAD (b) = bb_note;
}
- if (rtl_dump_file)
- fprintf (rtl_dump_file, "Deleted label in block %i.\n",
+ if (dump_file)
+ fprintf (dump_file, "Deleted label in block %i.\n",
b->index);
}
&& (b->succ->flags & EDGE_FALLTHRU)
&& n_basic_blocks > 1)
{
- if (rtl_dump_file)
- fprintf (rtl_dump_file,
+ if (dump_file)
+ fprintf (dump_file,
"Deleting fallthru block %i.\n",
b->index);
c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
redirect_edge_succ_nodup (b->pred, b->succ->dest);
- delete_block (b);
+ delete_basic_block (b);
changed = true;
b = c;
}
&& ! b->succ->succ_next
&& b->succ->dest != EXIT_BLOCK_PTR
&& onlyjump_p (BB_END (b))
+ && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
&& try_redirect_by_replacing_jump (b->succ, b->succ->dest,
- (mode & CLEANUP_CFGLAYOUT)))
+ (mode & CLEANUP_CFGLAYOUT) != 0))
{
update_forwarder_flag (b);
changed_here = true;
if (!(b->flags & BB_REACHABLE))
{
- delete_block (b);
+ delete_basic_block (b);
changed = true;
}
}
PROP_DEATH_NOTES
| PROP_SCAN_DEAD_CODE
| PROP_KILL_DEAD_CODE
- | PROP_LOG_LINKS))
+ | ((mode & CLEANUP_LOG_LINKS)
+ ? PROP_LOG_LINKS : 0)))
break;
}
else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))