+Tue Oct 23 15:30:23 CEST 2001 Jan Hubicka <jh@suse.cz>
+
+ * i386.c (ix86_expand_int_movcc): Cleanup; use expand_simple_*op.
+
+ * toplev.c (rest_of_compilation): Use CLEANUP_UPDATE_LIFE
+ to avoid update_life_info call.
+ * basic-block.h (CLEANUP_UPATE_LIFE): Define.
+ * cfgcleanup.c (bb_flags): New enum.
+ (BB_FLAGS, BB_SET_FLAG, BB_CLEAR_FLAG, FORWARDER_BLOCK_P): New macros.
+ (notice_new_block, update_forwarder_flag): New functions.
+ (try_simplify_condjump): Use FORWARDER_BLOCK_P.
+ (try_forward_edges): Likewise; update flags.
+ (merge_blocks): Likewise.
+ (outgoing_edges_match): Likewise.
+ (try_crossjump_to_edge): Likewise.
+ (try_optimize_cfg): Likewise; initialize and clear the flags;
+ recompute life info if needed.
+ (cleanup_cfg): No need to clear aux pointers.
+
2001-10-23 Alexandre Oliva <aoliva@redhat.com>
* config/i386/i386.c (override_options): Default to minimum
inside call_placeholders.. */
#define CLEANUP_PRE_LOOP 16 /* Take care to preserve syntactic loop
notes. */
+#define CLEANUP_UPDATE_LIFE 32 /* Keep life information up to date. */
/* Flags for loop discovery. */
#define LOOP_TREE 1 /* Build loop hierarchy tree. */
#include "obstack.h"
+/* cleanup_cfg maitains following flags for each basic block. */
+enum bb_flags {
+ /* Set if life info needs to be recomputed for given BB. */
+ BB_UPDATE_LIFE = 1,
+ /* Set if BB is the forwarder block to avoid too many
+ forwarder_block_p calls. */
+ BB_FORWARDER_BLOCK = 2
+ };
+
+#define BB_FLAGS(bb) (enum bb_flags)(bb)->aux
+#define BB_SET_FLAG(bb,flag) \
+ (bb)->aux = (void *)((enum bb_flags)(bb)->aux | (flag))
+#define BB_CLEAR_FLAG(bb,flag) \
+ (bb)->aux = (void *)((enum bb_flags)(bb)->aux & ~(flag))
+
+#define FORWARDER_BLOCK_P(bb) (BB_FLAGS(bb) & BB_FORWARDER_BLOCK)
+
static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
static bool try_crossjump_bb PARAMS ((int, basic_block));
static bool outgoing_edges_match PARAMS ((basic_block, basic_block));
static bool try_optimize_cfg PARAMS ((int));
static bool try_simplify_condjump PARAMS ((basic_block));
static bool try_forward_edges PARAMS ((int, basic_block));
+static void notice_new_block PARAMS ((basic_block));
+static void update_forwarder_flag PARAMS ((basic_block));
+\f
+/* Set flags for newly created block. */
+
+static void
+notice_new_block (bb)
+ basic_block bb;
+{
+ if (!bb)
+ return;
+ BB_SET_FLAG (bb, BB_UPDATE_LIFE);
+ if (forwarder_block_p (bb))
+ BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
+}
+
+/* Recompute forwarder flag after block has been modified. */
+
+static void
+update_forwarder_flag (bb)
+ basic_block bb;
+{
+ if (forwarder_block_p (bb))
+ BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
+ else
+ BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
+}
\f
/* Simplify a conditional jump around an unconditional jump.
Return true if something changed. */
jump_block = cbranch_fallthru_edge->dest;
if (jump_block->pred->pred_next
|| jump_block->index == n_basic_blocks - 1
- || !forwarder_block_p (jump_block))
+ || !FORWARDER_BLOCK_P (jump_block))
return false;
jump_dest_block = jump_block->succ->dest;
/* Look for the real destination of the jump.
Avoid inifinite loop in the infinite empty loop by counting
up to n_basic_blocks. */
- while (forwarder_block_p (target)
+ while (FORWARDER_BLOCK_P (target)
&& target->succ->dest != EXIT_BLOCK_PTR
&& counter < n_basic_blocks)
{
+ REG_BR_PROB_BASE / 2)
/ REG_BR_PROB_BASE);
+ if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
+ BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
+ BB_SET_FLAG (b, BB_UPDATE_LIFE);
+
do
{
first->count -= edge_count;
if (e->flags & EDGE_FALLTHRU)
{
merge_blocks_nomove (b, c);
+ update_forwarder_flag (b);
if (rtl_dump_file)
{
eliminated by edge redirection instead. One exception might have
been if B is a forwarder block and C has no fallthru edge, but
that should be cleaned up by bb-reorder instead. */
- if (forwarder_block_p (b) || forwarder_block_p (c))
+ if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
return false;
/* We must make sure to not munge nesting of lexical blocks,
{
if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
return false;
- force_nonfallthru (b_fallthru_edge);
+ BB_SET_FLAG (b_fallthru_edge, BB_UPDATE_LIFE);
+ notice_new_block (force_nonfallthru (b_fallthru_edge));
}
merge_blocks_move_predecessor_nojumps (b, c);
return true;
/* Get around possible forwarders on fallthru edges. Other cases
should be optimized out already. */
- if (forwarder_block_p (f1->dest))
+ if (FORWARDER_BLOCK_P (f1->dest))
f1 = f1->dest->succ;
- if (forwarder_block_p (f2->dest))
+ if (FORWARDER_BLOCK_P (f2->dest))
f2 = f2->dest->succ;
/* To simplify use of this function, return false if there are
unneeded forwarder blocks. These will get eliminated later
during cleanup_cfg. */
- if (forwarder_block_p (f1->dest)
- || forwarder_block_p (f2->dest)
- || forwarder_block_p (b1->dest)
- || forwarder_block_p (b2->dest))
+ if (FORWARDER_BLOCK_P (f1->dest)
+ || FORWARDER_BLOCK_P (f2->dest)
+ || FORWARDER_BLOCK_P (b1->dest)
+ || FORWARDER_BLOCK_P (b2->dest))
return false;
if (f1->dest == f2->dest && b1->dest == b2->dest)
conditional jump that is required due to the current CFG shape. */
if (src1->pred
&& !src1->pred->pred_next
- && forwarder_block_p (src1))
+ && FORWARDER_BLOCK_P (src1))
{
e1 = src1->pred;
src1 = e1->src;
}
if (src2->pred
&& !src2->pred->pred_next
- && forwarder_block_p (src2))
+ && FORWARDER_BLOCK_P (src2))
{
e2 = src2->pred;
src2 = e2->src;
return false;
/* Seeing more than 1 forwarder blocks would confuse us later... */
- if (forwarder_block_p (e1->dest)
- && forwarder_block_p (e1->dest->succ->dest))
+ if (FORWARDER_BLOCK_P (e1->dest)
+ && FORWARDER_BLOCK_P (e1->dest->succ->dest))
return false;
- if (forwarder_block_p (e2->dest)
- && forwarder_block_p (e2->dest->succ->dest))
+ if (FORWARDER_BLOCK_P (e2->dest)
+ && FORWARDER_BLOCK_P (e2->dest->succ->dest))
return false;
/* Likewise with dead code (possibly newly created by the other optimizations
edge s2;
basic_block d = s->dest;
- if (forwarder_block_p (d))
+ if (FORWARDER_BLOCK_P (d))
d = d->succ->dest;
for (s2 = src1->succ; ; s2 = s2->succ_next)
{
basic_block d2 = s2->dest;
- if (forwarder_block_p (d2))
+ if (FORWARDER_BLOCK_P (d2))
d2 = d2->succ->dest;
if (d == d2)
break;
/* Take care to update possible forwarder blocks. We verified
that there is no more than one in the chain, so we can't run
into infinite loop. */
- if (forwarder_block_p (s->dest))
+ if (FORWARDER_BLOCK_P (s->dest))
{
s->dest->succ->count += s2->count;
s->dest->count += s2->count;
s->dest->frequency += EDGE_FREQUENCY (s);
}
- if (forwarder_block_p (s2->dest))
+ if (FORWARDER_BLOCK_P (s2->dest))
{
s2->dest->succ->count -= s2->count;
s2->dest->count -= s2->count;
remove_edge (src1->succ);
make_single_succ_edge (src1, redirect_to, 0);
+ BB_SET_FLAG (src1, BB_UPDATE_LIFE);
+ update_forwarder_flag (src1);
+
return true;
}
bool changed_overall = false;
bool changed;
int iterations = 0;
+ sbitmap blocks;
if (mode & CLEANUP_CROSSJUMP)
add_noreturn_fake_exit_edges ();
+ for (i = 0; i < n_basic_blocks; i++)
+ update_forwarder_flag (BASIC_BLOCK (i));
+
/* Attempt to merge blocks as made possible by edge removal. If a block
has only one successor, and the successor has only one predecessor,
they may be combined. */
if (b->pred->pred_next == NULL
&& (b->pred->flags & EDGE_FALLTHRU)
&& GET_CODE (b->head) != CODE_LABEL
- && forwarder_block_p (b)
+ && FORWARDER_BLOCK_P (b)
/* Note that forwarder_block_p true ensures that there
is a successor for this block. */
&& (b->succ->flags & EDGE_FALLTHRU)
&& b->succ->dest != EXIT_BLOCK_PTR
&& onlyjump_p (b->end)
&& redirect_edge_and_branch (b->succ, b->succ->dest))
- changed_here = true;
+ {
+ BB_SET_FLAG (b, BB_UPDATE_LIFE);
+ update_forwarder_flag (b);
+ changed_here = true;
+ }
/* Simplify branch to branch. */
if (try_forward_edges (mode, b))
if (mode & CLEANUP_CROSSJUMP)
remove_fake_edges ();
+ if ((mode & CLEANUP_UPDATE_LIFE) & changed_overall)
+ {
+ bool found = 0;
+ blocks = sbitmap_alloc (n_basic_blocks);
+ for (i = 0; i < n_basic_blocks; i++)
+ if (BB_FLAGS (BASIC_BLOCK (i)) & BB_UPDATE_LIFE)
+ {
+ found = 1;
+ SET_BIT (blocks, i);
+ }
+ if (found)
+ update_life_info (blocks, UPDATE_LIFE_GLOBAL,
+ PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
+ | PROP_KILL_DEAD_CODE);
+ sbitmap_free (blocks);
+ }
+ for (i = 0; i < n_basic_blocks; i++)
+ BASIC_BLOCK (i)->aux = NULL;
+
return changed_overall;
}
\f
free_EXPR_LIST_list (&tail_recursion_label_list);
timevar_pop (TV_CLEANUP_CFG);
- /* Clear bb->aux on all basic blocks. */
- for (i = 0; i < n_basic_blocks; ++i)
- BASIC_BLOCK (i)->aux = NULL;
return changed;
}
enum rtx_code code = GET_CODE (operands[1]), compare_code;
rtx compare_seq, compare_op;
rtx second_test, bypass_test;
+ enum machine_mode mode = GET_MODE (operands[0]);
/* When the compare code is not LTU or GEU, we can not use sbbl case.
In case comparsion is done with immediate, we can convert it to LTU or
if ((code == LEU || code == GTU)
&& GET_CODE (ix86_compare_op1) == CONST_INT
- && GET_MODE (operands[0]) != HImode
+ && mode != HImode
&& (unsigned int)INTVAL (ix86_compare_op1) != 0xffffffff
&& GET_CODE (operands[2]) == CONST_INT
&& GET_CODE (operands[3]) == CONST_INT)
/* Don't attempt mode expansion here -- if we had to expand 5 or 6
HImode insns, we'd be swallowed in word prefix ops. */
- if (GET_MODE (operands[0]) != HImode
- && (GET_MODE (operands[0]) != DImode || TARGET_64BIT)
+ if (mode != HImode
+ && (mode != DImode || TARGET_64BIT)
&& GET_CODE (operands[2]) == CONST_INT
&& GET_CODE (operands[3]) == CONST_INT)
{
if (reg_overlap_mentioned_p (out, ix86_compare_op0)
|| reg_overlap_mentioned_p (out, ix86_compare_op1))
- tmp = gen_reg_rtx (GET_MODE (operands[0]));
+ tmp = gen_reg_rtx (mode);
emit_insn (compare_seq);
- if (GET_MODE (tmp) == DImode)
+ if (mode == DImode)
emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp));
else
emit_insn (gen_x86_movsicc_0_m1 (tmp));
* Size 5 - 8.
*/
if (ct)
- {
- if (GET_MODE (tmp) == DImode)
- emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (ct)));
- else
- emit_insn (gen_addsi3 (tmp, tmp, GEN_INT (ct)));
- }
+ tmp = expand_simple_binop (mode, PLUS,
+ tmp, GEN_INT (ct),
+ tmp, 1, OPTAB_DIRECT);
}
else if (cf == -1)
{
*
* Size 8.
*/
- if (GET_MODE (tmp) == DImode)
- emit_insn (gen_iordi3 (tmp, tmp, GEN_INT (ct)));
- else
- emit_insn (gen_iorsi3 (tmp, tmp, GEN_INT (ct)));
+ tmp = expand_simple_binop (mode, IOR,
+ tmp, GEN_INT (ct),
+ tmp, 1, OPTAB_DIRECT);
}
else if (diff == -1 && ct)
{
*
* Size 8 - 11.
*/
- if (GET_MODE (tmp) == DImode)
- {
- emit_insn (gen_one_cmpldi2 (tmp, tmp));
- if (cf)
- emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (cf)));
- }
- else
- {
- emit_insn (gen_one_cmplsi2 (tmp, tmp));
- if (cf)
- emit_insn (gen_addsi3 (tmp, tmp, GEN_INT (cf)));
- }
+ tmp = expand_simple_unop (mode, NOT, tmp, tmp, 1);
+ if (cf)
+ tmp = expand_simple_binop (mode, PLUS,
+ tmp, GEN_INT (cf),
+ tmp, 1, OPTAB_DIRECT);
}
else
{
*
* Size 8 - 11.
*/
- if (GET_MODE (tmp) == DImode)
- {
- emit_insn (gen_anddi3 (tmp, tmp, GEN_INT (trunc_int_for_mode
- (cf - ct, DImode))));
- if (ct)
- emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (ct)));
- }
- else
- {
- emit_insn (gen_andsi3 (tmp, tmp, GEN_INT (trunc_int_for_mode
- (cf - ct, SImode))));
- if (ct)
- emit_insn (gen_addsi3 (tmp, tmp, GEN_INT (ct)));
- }
+ tmp = expand_simple_binop (mode, AND,
+ tmp,
+ GEN_INT (trunc_int_for_mode
+ (cf - ct, mode)),
+ tmp, 1, OPTAB_DIRECT);
+ if (ct)
+ tmp = expand_simple_binop (mode, PLUS,
+ tmp, GEN_INT (ct),
+ tmp, 1, OPTAB_DIRECT);
}
if (tmp != out)
code = reverse_condition (code);
}
}
- if (diff == 1 || diff == 2 || diff == 4 || diff == 8
- || diff == 3 || diff == 5 || diff == 9)
+ if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
+ || diff == 3 || diff == 5 || diff == 9)
+ && (mode != DImode || x86_64_sign_extended_value (GEN_INT (cf))))
{
/*
* xorl dest,dest
{
rtx out1;
out1 = out;
- tmp = gen_rtx_MULT (GET_MODE (out), out1, GEN_INT (diff & ~1));
+ tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
nops++;
if (diff & 1)
{
- tmp = gen_rtx_PLUS (GET_MODE (out), tmp, out1);
+ tmp = gen_rtx_PLUS (mode, tmp, out1);
nops++;
}
}
if (cf != 0)
{
- tmp = gen_rtx_PLUS (GET_MODE (out), tmp, GEN_INT (cf));
+ tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
nops++;
}
if (tmp != out
out = emit_store_flag (out, code, ix86_compare_op0,
ix86_compare_op1, VOIDmode, 0, 1);
- emit_insn (gen_addsi3 (out, out, constm1_rtx));
- emit_insn (gen_andsi3 (out, out, GEN_INT (trunc_int_for_mode
- (cf - ct, SImode))));
- if (ct != 0)
- emit_insn (gen_addsi3 (out, out, GEN_INT (ct)));
+ out = expand_simple_binop (mode, PLUS,
+ out, constm1_rtx,
+ out, 1, OPTAB_DIRECT);
+ out = expand_simple_binop (mode, AND,
+ out,
+ GEN_INT (trunc_int_for_mode
+ (cf - ct, mode)),
+ out, 1, OPTAB_DIRECT);
+ out = expand_simple_binop (mode, PLUS,
+ out, GEN_INT (ct),
+ out, 1, OPTAB_DIRECT);
if (out != operands[0])
emit_move_insn (operands[0], out);
return 0; /* FAIL */
orig_out = operands[0];
- tmp = gen_reg_rtx (GET_MODE (orig_out));
+ tmp = gen_reg_rtx (mode);
operands[0] = tmp;
/* Recurse to get the constant loaded. */
return 0; /* FAIL */
/* Mask in the interesting variable. */
- out = expand_binop (GET_MODE (orig_out), op, var, tmp, orig_out, 0,
+ out = expand_binop (mode, op, var, tmp, orig_out, 0,
OPTAB_WIDEN);
if (out != orig_out)
emit_move_insn (orig_out, out);
* Size 15.
*/
- if (! nonimmediate_operand (operands[2], GET_MODE (operands[0])))
- operands[2] = force_reg (GET_MODE (operands[0]), operands[2]);
- if (! nonimmediate_operand (operands[3], GET_MODE (operands[0])))
- operands[3] = force_reg (GET_MODE (operands[0]), operands[3]);
+ if (! nonimmediate_operand (operands[2], mode))
+ operands[2] = force_reg (mode, operands[2]);
+ if (! nonimmediate_operand (operands[3], mode))
+ operands[3] = force_reg (mode, operands[3]);
if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
{
- rtx tmp = gen_reg_rtx (GET_MODE (operands[0]));
+ rtx tmp = gen_reg_rtx (mode);
emit_move_insn (tmp, operands[3]);
operands[3] = tmp;
}
if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
{
- rtx tmp = gen_reg_rtx (GET_MODE (operands[0]));
+ rtx tmp = gen_reg_rtx (mode);
emit_move_insn (tmp, operands[2]);
operands[2] = tmp;
}
if (! register_operand (operands[2], VOIDmode)
&& ! register_operand (operands[3], VOIDmode))
- operands[2] = force_reg (GET_MODE (operands[0]), operands[2]);
+ operands[2] = force_reg (mode, operands[2]);
emit_insn (compare_seq);
emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
+ gen_rtx_IF_THEN_ELSE (mode,
compare_op, operands[2],
operands[3])));
if (bypass_test)
emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
+ gen_rtx_IF_THEN_ELSE (mode,
bypass_test,
operands[3],
operands[0])));
if (second_test)
emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
+ gen_rtx_IF_THEN_ELSE (mode,
second_test,
operands[2],
operands[0])));
rebuild_jump_labels (insns);
timevar_pop (TV_JUMP);
- timevar_push (TV_FLOW);
- cleanup_cfg (CLEANUP_EXPENSIVE);
-
- /* Blimey. We've got to have the CFG up to date for the call to
- if_convert below. However, the random deletion of blocks
- without updating life info can wind up with Wierd Stuff in
- global_live_at_end. We then run sched1, which updates things
- properly, discovers the wierdness and aborts. */
- allocate_bb_life_data ();
- update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
- PROP_DEATH_NOTES | PROP_KILL_DEAD_CODE
- | PROP_SCAN_DEAD_CODE);
-
- timevar_pop (TV_FLOW);
+ cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_UPDATE_LIFE);
}
close_dump_file (DFI_combine, print_rtl_with_bb, insns);