/* If-conversion support.
- Copyright (C) 2000, 2001, 2002 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
#include "rtl.h"
#include "regs.h"
#include "expr.h"
#include "real.h"
#include "output.h"
+#include "optabs.h"
#include "toplev.h"
#include "tm_p.h"
+#include "cfgloop.h"
+#include "target.h"
#ifndef HAVE_conditional_execution
static dominance_info post_dominators;
/* Forward references. */
-static int count_bb_insns PARAMS ((basic_block));
-static rtx first_active_insn PARAMS ((basic_block));
-static rtx last_active_insn PARAMS ((basic_block, int));
-static int seq_contains_jump PARAMS ((rtx));
-static basic_block block_fallthru PARAMS ((basic_block));
-static int cond_exec_process_insns PARAMS ((ce_if_block_t *,
- rtx, rtx, rtx, rtx, int));
-static rtx cond_exec_get_condition PARAMS ((rtx));
-static int cond_exec_process_if_block PARAMS ((ce_if_block_t *, int));
-static rtx noce_get_condition PARAMS ((rtx, rtx *));
-static int noce_operand_ok PARAMS ((rtx));
-static int noce_process_if_block PARAMS ((ce_if_block_t *));
-static int process_if_block PARAMS ((ce_if_block_t *));
-static void merge_if_block PARAMS ((ce_if_block_t *));
-static int find_cond_trap PARAMS ((basic_block, edge, edge));
-static basic_block find_if_header PARAMS ((basic_block, int));
-static int block_jumps_and_fallthru_p PARAMS ((basic_block, basic_block));
-static int find_if_block PARAMS ((ce_if_block_t *));
-static int find_if_case_1 PARAMS ((basic_block, edge, edge));
-static int find_if_case_2 PARAMS ((basic_block, edge, edge));
-static int find_memory PARAMS ((rtx *, void *));
-static int dead_or_predicable PARAMS ((basic_block, basic_block,
- basic_block, basic_block, int));
-static void noce_emit_move_insn PARAMS ((rtx, rtx));
-static rtx block_has_only_trap PARAMS ((basic_block));
+static int count_bb_insns (basic_block);
+static rtx first_active_insn (basic_block);
+static rtx last_active_insn (basic_block, int);
+static int seq_contains_jump (rtx);
+static basic_block block_fallthru (basic_block);
+static int cond_exec_process_insns (ce_if_block_t *, rtx, rtx, rtx, rtx, int);
+static rtx cond_exec_get_condition (rtx);
+static int cond_exec_process_if_block (ce_if_block_t *, int);
+static rtx noce_get_condition (rtx, rtx *);
+static int noce_operand_ok (rtx);
+static int noce_process_if_block (ce_if_block_t *);
+static int process_if_block (ce_if_block_t *);
+static void merge_if_block (ce_if_block_t *);
+static int find_cond_trap (basic_block, edge, edge);
+static basic_block find_if_header (basic_block, int);
+static int block_jumps_and_fallthru_p (basic_block, basic_block);
+static int find_if_block (ce_if_block_t *);
+static int find_if_case_1 (basic_block, edge, edge);
+static int find_if_case_2 (basic_block, edge, edge);
+static int find_memory (rtx *, void *);
+static int dead_or_predicable (basic_block, basic_block, basic_block,
+ basic_block, int);
+static void noce_emit_move_insn (rtx, rtx);
+static rtx block_has_only_trap (basic_block);
+static void mark_loop_exit_edges (void);
\f
+/* Sets EDGE_LOOP_EXIT flag for all loop exits. */
+static void
+mark_loop_exit_edges ()
+{
+ struct loops loops;
+ basic_block bb;
+ edge e;
+
+ flow_loops_find (&loops, LOOP_TREE);
+
+ if (loops.num > 1)
+ {
+ FOR_EACH_BB (bb)
+ {
+ for (e = bb->succ; e; e = e->succ_next)
+ {
+ if (find_common_loop (bb->loop_father, e->dest->loop_father)
+ != bb->loop_father)
+ e->flags |= EDGE_LOOP_EXIT;
+ else
+ e->flags &= ~EDGE_LOOP_EXIT;
+ }
+ }
+ }
+
+ flow_loops_free (&loops);
+}
+
/* Count the number of non-jump active insns in BB. */
static int
-count_bb_insns (bb)
- basic_block bb;
+count_bb_insns (basic_block bb)
{
int count = 0;
rtx insn = bb->head;
/* Return the first non-jump active insn in the basic block. */
static rtx
-first_active_insn (bb)
- basic_block bb;
+first_active_insn (basic_block bb)
{
rtx insn = bb->head;
/* Return the last non-jump active (non-jump) insn in the basic block. */
static rtx
-last_active_insn (bb, skip_use_p)
- basic_block bb;
- int skip_use_p;
+last_active_insn (basic_block bb, int skip_use_p)
{
rtx insn = bb->end;
rtx head = bb->head;
return insn;
}
-/* It is possible, especially when having dealt with multi-word
+/* It is possible, especially when having dealt with multi-word
arithmetic, for the expanders to have emitted jumps. Search
through the sequence and return TRUE if a jump exists so that
we can abort the conversion. */
static int
-seq_contains_jump (insn)
- rtx insn;
+seq_contains_jump (rtx insn)
{
while (insn)
{
}
static basic_block
-block_fallthru (bb)
- basic_block bb;
+block_fallthru (basic_block bb)
{
edge e;
insns were processed. */
static int
-cond_exec_process_insns (ce_info, start, end, test, prob_val, mod_ok)
- ce_if_block_t *ce_info ATTRIBUTE_UNUSED; /* if block information */
- rtx start; /* first insn to look at */
- rtx end; /* last insn to look at */
- rtx test; /* conditional execution test */
- rtx prob_val; /* probability of branch taken. */
- int mod_ok; /* true if modifications ok last insn. */
+cond_exec_process_insns (ce_if_block_t *ce_info ATTRIBUTE_UNUSED,
+ /* if block information */rtx start,
+ /* first insn to look at */rtx end,
+ /* last insn to look at */rtx test,
+ /* conditional execution test */rtx prob_val,
+ /* probability of branch taken. */int mod_ok)
{
int must_be_last = FALSE;
rtx insn;
/* Remove USE insns that get in the way. */
if (reload_completed && GET_CODE (PATTERN (insn)) == USE)
{
- /* ??? Ug. Actually unlinking the thing is problematic,
+ /* ??? Ug. Actually unlinking the thing is problematic,
given what we'd have to coordinate with our callers. */
PUT_CODE (insn, NOTE);
NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
/* Return the condition for a jump. Do not do any special processing. */
static rtx
-cond_exec_get_condition (jump)
- rtx jump;
+cond_exec_get_condition (rtx jump)
{
rtx test_if, cond;
converting the block. */
static int
-cond_exec_process_if_block (ce_info, do_multiple_p)
- ce_if_block_t * ce_info; /* if block information */
- int do_multiple_p; /* != 0 if we should handle && and || blocks */
+cond_exec_process_if_block (ce_if_block_t * ce_info,
+ /* if block information */int do_multiple_p)
{
basic_block test_bb = ce_info->test_bb; /* last test block */
basic_block then_bb = ce_info->then_bb; /* THEN */
/* Map test_expr/test_jump into the appropriate MD tests to use on
the conditionally executed code. */
-
+
true_expr = test_expr;
false_code = reversed_comparison_code (true_expr, test_bb->end);
return FALSE;
}
\f
-/* Used by noce_process_if_block to communicate with its subroutines.
+/* Used by noce_process_if_block to communicate with its subroutines.
The subroutines know that A and B may be evaluated freely. They
- know that X is a register. They should insert new instructions
+ know that X is a register. They should insert new instructions
before cond_earliest. */
struct noce_if_info
rtx jump, cond, cond_earliest;
};
-static rtx noce_emit_store_flag PARAMS ((struct noce_if_info *,
- rtx, int, int));
-static int noce_try_store_flag PARAMS ((struct noce_if_info *));
-static int noce_try_store_flag_inc PARAMS ((struct noce_if_info *));
-static int noce_try_store_flag_constants PARAMS ((struct noce_if_info *));
-static int noce_try_store_flag_mask PARAMS ((struct noce_if_info *));
-static rtx noce_emit_cmove PARAMS ((struct noce_if_info *,
- rtx, enum rtx_code, rtx,
- rtx, rtx, rtx));
-static int noce_try_cmove PARAMS ((struct noce_if_info *));
-static int noce_try_cmove_arith PARAMS ((struct noce_if_info *));
-static rtx noce_get_alt_condition PARAMS ((struct noce_if_info *,
- rtx, rtx *));
-static int noce_try_minmax PARAMS ((struct noce_if_info *));
-static int noce_try_abs PARAMS ((struct noce_if_info *));
+static rtx noce_emit_store_flag (struct noce_if_info *, rtx, int, int);
+static int noce_try_store_flag (struct noce_if_info *);
+static int noce_try_addcc (struct noce_if_info *);
+static int noce_try_store_flag_constants (struct noce_if_info *);
+static int noce_try_store_flag_mask (struct noce_if_info *);
+static rtx noce_emit_cmove (struct noce_if_info *, rtx, enum rtx_code, rtx,
+ rtx, rtx, rtx);
+static int noce_try_cmove (struct noce_if_info *);
+static int noce_try_cmove_arith (struct noce_if_info *);
+static rtx noce_get_alt_condition (struct noce_if_info *, rtx, rtx *);
+static int noce_try_minmax (struct noce_if_info *);
+static int noce_try_abs (struct noce_if_info *);
/* Helper function for noce_try_store_flag*. */
static rtx
-noce_emit_store_flag (if_info, x, reversep, normalize)
- struct noce_if_info *if_info;
- rtx x;
- int reversep, normalize;
+noce_emit_store_flag (struct noce_if_info *if_info, rtx x, int reversep,
+ int normalize)
{
rtx cond = if_info->cond;
int cond_complex;
end_sequence ();
}
- /* Don't even try if the comparison operands are weird. */
- if (cond_complex)
+ /* Don't even try if the comparison operands or the mode of X are weird. */
+ if (cond_complex || !SCALAR_INT_MODE_P (GET_MODE (x)))
return NULL_RTX;
return emit_store_flag (x, code, XEXP (cond, 0),
/* Emit instruction to move an rtx into STRICT_LOW_PART. */
static void
-noce_emit_move_insn (x, y)
- rtx x, y;
+noce_emit_move_insn (rtx x, rtx y)
{
enum machine_mode outmode, inmode;
rtx outer, inner;
a go at the conversion. */
static int
-noce_try_store_flag (if_info)
- struct noce_if_info *if_info;
+noce_try_store_flag (struct noce_if_info *if_info)
{
int reversep;
rtx target, seq;
seq = get_insns ();
end_sequence ();
- emit_insn_before_scope (seq, if_info->jump, INSN_SCOPE (if_info->insn_a));
+ emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a));
return TRUE;
}
/* Convert "if (test) x = a; else x = b", for A and B constant. */
static int
-noce_try_store_flag_constants (if_info)
- struct noce_if_info *if_info;
+noce_try_store_flag_constants (struct noce_if_info *if_info)
{
rtx target, seq;
int reversep;
return FALSE;
if (reversep)
- {
+ {
tmp = itrue; itrue = ifalse; ifalse = tmp;
diff = trunc_int_for_mode (-diff, mode);
}
if (seq_contains_jump (seq))
return FALSE;
- emit_insn_before_scope (seq, if_info->jump, INSN_SCOPE (if_info->insn_a));
+ emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a));
return TRUE;
}
return FALSE;
}
-/* Convert "if (test) foo++" into "foo += (test != 0)", and
+/* Convert "if (test) foo++" into "foo += (test != 0)", and
similarly for "foo--". */
static int
-noce_try_store_flag_inc (if_info)
- struct noce_if_info *if_info;
+noce_try_addcc (struct noce_if_info *if_info)
{
rtx target, seq;
int subtract, normalize;
if (! no_new_pseudos
- && (BRANCH_COST >= 2
- || HAVE_incscc
- || HAVE_decscc)
/* Should be no `else' case to worry about. */
&& if_info->b == if_info->x
&& GET_CODE (if_info->a) == PLUS
- && (XEXP (if_info->a, 1) == const1_rtx
- || XEXP (if_info->a, 1) == constm1_rtx)
&& rtx_equal_p (XEXP (if_info->a, 0), if_info->x)
&& (reversed_comparison_code (if_info->cond, if_info->jump)
!= UNKNOWN))
{
- if (STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
- subtract = 0, normalize = 0;
- else if (-STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
- subtract = 1, normalize = 0;
- else
- subtract = 0, normalize = INTVAL (XEXP (if_info->a, 1));
-
- start_sequence ();
-
- target = noce_emit_store_flag (if_info,
- gen_reg_rtx (GET_MODE (if_info->x)),
- 1, normalize);
+ rtx cond = if_info->cond;
+ enum rtx_code code = reversed_comparison_code (cond, if_info->jump);
- if (target)
- target = expand_simple_binop (GET_MODE (if_info->x),
- subtract ? MINUS : PLUS,
- if_info->x, target, if_info->x,
- 0, OPTAB_WIDEN);
- if (target)
+ /* First try to use addcc pattern. */
+ if (general_operand (XEXP (cond, 0), VOIDmode)
+ && general_operand (XEXP (cond, 1), VOIDmode))
{
- if (target != if_info->x)
- noce_emit_move_insn (if_info->x, target);
-
- seq = get_insns ();
+ start_sequence ();
+ target = emit_conditional_add (if_info->x, code,
+ XEXP (cond, 0), XEXP (cond, 1),
+ VOIDmode,
+ if_info->b, XEXP (if_info->a, 1),
+ GET_MODE (if_info->x),
+ (code == LTU || code == GEU
+ || code == LEU || code == GTU));
+ if (target)
+ {
+ if (target != if_info->x)
+ noce_emit_move_insn (if_info->x, target);
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insn_before_setloc (seq, if_info->jump,
+ INSN_LOCATOR (if_info->insn_a));
+ return TRUE;
+ }
end_sequence ();
+ }
- if (seq_contains_jump (seq))
- return FALSE;
+ /* If that fails, construct conditional increment or decrement using
+ setcc. */
+ if (BRANCH_COST >= 2
+ && (XEXP (if_info->a, 1) == const1_rtx
+ || XEXP (if_info->a, 1) == constm1_rtx))
+ {
+ start_sequence ();
+ if (STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
+ subtract = 0, normalize = 0;
+ else if (-STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
+ subtract = 1, normalize = 0;
+ else
+ subtract = 0, normalize = INTVAL (XEXP (if_info->a, 1));
- emit_insn_before_scope (seq, if_info->jump,
- INSN_SCOPE (if_info->insn_a));
- return TRUE;
- }
+ target = noce_emit_store_flag (if_info,
+ gen_reg_rtx (GET_MODE (if_info->x)),
+ 1, normalize);
- end_sequence ();
+ if (target)
+ target = expand_simple_binop (GET_MODE (if_info->x),
+ subtract ? MINUS : PLUS,
+ if_info->x, target, if_info->x,
+ 0, OPTAB_WIDEN);
+ if (target)
+ {
+ if (target != if_info->x)
+ noce_emit_move_insn (if_info->x, target);
+
+ seq = get_insns ();
+ end_sequence ();
+
+ if (seq_contains_jump (seq))
+ return FALSE;
+
+ emit_insn_before_setloc (seq, if_info->jump,
+ INSN_LOCATOR (if_info->insn_a));
+
+ return TRUE;
+ }
+ end_sequence ();
+ }
}
return FALSE;
/* Convert "if (test) x = 0;" to "x &= -(test == 0);" */
static int
-noce_try_store_flag_mask (if_info)
- struct noce_if_info *if_info;
+noce_try_store_flag_mask (struct noce_if_info *if_info)
{
rtx target, seq;
int reversep;
if (seq_contains_jump (seq))
return FALSE;
- emit_insn_before_scope (seq, if_info->jump,
- INSN_SCOPE (if_info->insn_a));
+ emit_insn_before_setloc (seq, if_info->jump,
+ INSN_LOCATOR (if_info->insn_a));
return TRUE;
}
/* Helper function for noce_try_cmove and noce_try_cmove_arith. */
static rtx
-noce_emit_cmove (if_info, x, code, cmp_a, cmp_b, vfalse, vtrue)
- struct noce_if_info *if_info;
- rtx x, cmp_a, cmp_b, vfalse, vtrue;
- enum rtx_code code;
+noce_emit_cmove (struct noce_if_info *if_info, rtx x, enum rtx_code code,
+ rtx cmp_a, rtx cmp_b, rtx vfalse, rtx vtrue)
{
/* If earliest == jump, try to build the cmove insn directly.
This is helpful when combine has created some complex condition
#else
/* We'll never get here, as noce_process_if_block doesn't call the
functions involved. Ifdef code, however, should be discouraged
- because it leads to typos in the code not selected. However,
+ because it leads to typos in the code not selected. However,
emit_conditional_move won't exist either. */
return NULL_RTX;
#endif
has had a go at it. */
static int
-noce_try_cmove (if_info)
- struct noce_if_info *if_info;
+noce_try_cmove (struct noce_if_info *if_info)
{
enum rtx_code code;
rtx target, seq;
seq = get_insns ();
end_sequence ();
- emit_insn_before_scope (seq, if_info->jump,
- INSN_SCOPE (if_info->insn_a));
+ emit_insn_before_setloc (seq, if_info->jump,
+ INSN_LOCATOR (if_info->insn_a));
return TRUE;
}
else
/* Try more complex cases involving conditional_move. */
static int
-noce_try_cmove_arith (if_info)
- struct noce_if_info *if_info;
+noce_try_cmove_arith (struct noce_if_info *if_info)
{
rtx a = if_info->a;
rtx b = if_info->b;
if (test)
x = y;
*/
-
+
code = GET_CODE (if_info->cond);
insn_a = if_info->insn_a;
insn_b = if_info->insn_b;
/* If either operand is complex, load it into a register first.
The best way to do this is to copy the original insn. In this
- way we preserve any clobbers etc that the insn may have had.
+ way we preserve any clobbers etc that the insn may have had.
This is of course not possible in the IS_MEM case. */
if (! general_operand (a, GET_MODE (a)))
{
tmp = get_insns ();
end_sequence ();
- emit_insn_before_scope (tmp, if_info->jump, INSN_SCOPE (if_info->insn_a));
+ emit_insn_before_setloc (tmp, if_info->jump, INSN_LOCATOR (if_info->insn_a));
return TRUE;
end_seq_and_fail:
For these we wish to know that it is A or B in the condition. */
static rtx
-noce_get_alt_condition (if_info, target, earliest)
- struct noce_if_info *if_info;
- rtx target;
- rtx *earliest;
+noce_get_alt_condition (struct noce_if_info *if_info, rtx target,
+ rtx *earliest)
{
rtx cond, set, insn;
int reverse;
/* Convert "if (a < b) x = a; else x = b;" to "x = min(a, b);", etc. */
static int
-noce_try_minmax (if_info)
- struct noce_if_info *if_info;
-{
+noce_try_minmax (struct noce_if_info *if_info)
+{
rtx cond, earliest, target, seq;
enum rtx_code code, op;
int unsignedp;
noce_emit_move_insn (if_info->x, target);
seq = get_insns ();
- end_sequence ();
+ end_sequence ();
if (seq_contains_jump (seq))
return FALSE;
- emit_insn_before_scope (seq, if_info->jump, INSN_SCOPE (if_info->insn_a));
+ emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a));
if_info->cond = cond;
if_info->cond_earliest = earliest;
/* Convert "if (a < 0) x = -a; else x = a;" to "x = abs(a);", etc. */
static int
-noce_try_abs (if_info)
- struct noce_if_info *if_info;
-{
+noce_try_abs (struct noce_if_info *if_info)
+{
rtx cond, earliest, target, seq, a, b, c;
int negate;
}
else
return FALSE;
-
+
cond = noce_get_alt_condition (if_info, b, &earliest);
if (!cond)
return FALSE;
for (insn = earliest;
insn != if_info->test_bb->head;
insn = PREV_INSN (insn))
- if (INSN_P (insn)
+ if (INSN_P (insn)
&& ((note = find_reg_note (insn, REG_EQUAL, c))
|| (note = find_reg_note (insn, REG_EQUIV, c))))
break;
c = get_pool_constant (XEXP (c, 0));
/* Work around funny ideas get_condition has wrt canonicalization.
- Note that these rtx constants are known to be CONST_INT, and
+ Note that these rtx constants are known to be CONST_INT, and
therefore imply integer comparisons. */
if (c == constm1_rtx && GET_CODE (cond) == GT)
;
start_sequence ();
- target = expand_simple_unop (GET_MODE (if_info->x), ABS, b, if_info->x, 0);
+ target = expand_abs_nojump (GET_MODE (if_info->x), b, if_info->x, 1);
/* ??? It's a quandry whether cmove would be better here, especially
for integers. Perhaps combine will clean things up. */
noce_emit_move_insn (if_info->x, target);
seq = get_insns ();
- end_sequence ();
+ end_sequence ();
if (seq_contains_jump (seq))
return FALSE;
- emit_insn_before_scope (seq, if_info->jump, INSN_SCOPE (if_info->insn_a));
+ emit_insn_before_setloc (seq, if_info->jump, INSN_LOCATOR (if_info->insn_a));
if_info->cond = cond;
if_info->cond_earliest = earliest;
valid at JUMP, instead of at EARLIEST. */
static rtx
-noce_get_condition (jump, earliest)
- rtx jump;
- rtx *earliest;
+noce_get_condition (rtx jump, rtx *earliest)
{
rtx cond, set, tmp, insn;
bool reverse;
/* Return true if OP is ok for if-then-else processing. */
static int
-noce_operand_ok (op)
- rtx op;
+noce_operand_ok (rtx op)
{
/* We special-case memories, so handle any of them with
no address side effects. */
successful at converting the block. */
static int
-noce_process_if_block (ce_info)
- struct ce_if_block * ce_info;
+noce_process_if_block (struct ce_if_block * ce_info)
{
basic_block test_bb = ce_info->test_bb; /* test block */
basic_block then_bb = ce_info->then_bb; /* THEN */
rtx insn_a, insn_b;
rtx set_a, set_b;
rtx orig_x, x, a, b;
- rtx jump, cond, insn;
+ rtx jump, cond;
/* We're looking for patterns of the form
else
{
insn_b = prev_nonnote_insn (if_info.cond_earliest);
+ /* We're going to be moving the evaluation of B down from above
+ COND_EARLIEST to JUMP. Make sure the relevant data is still
+ intact. */
if (! insn_b
|| GET_CODE (insn_b) != INSN
|| (set_b = single_set (insn_b)) == NULL_RTX
|| ! rtx_equal_p (x, SET_DEST (set_b))
+ || reg_overlap_mentioned_p (x, SET_SRC (set_b))
+ || modified_between_p (SET_SRC (set_b),
+ PREV_INSN (if_info.cond_earliest), jump)
+ /* Likewise with X. In particular this can happen when
+ noce_get_condition looks farther back in the instruction
+ stream than one might expect. */
|| reg_overlap_mentioned_p (x, cond)
|| reg_overlap_mentioned_p (x, a)
- || reg_overlap_mentioned_p (x, SET_SRC (set_b)))
+ || modified_between_p (x, PREV_INSN (if_info.cond_earliest), jump))
insn_b = set_b = NULL_RTX;
}
- b = (set_b ? SET_SRC (set_b) : x);
- /* X may not be mentioned in the range (cond_earliest, jump].
- Note the use of reg_overlap_mentioned_p, which handles memories
- properly, as opposed to reg_mentioned_p, which doesn't. */
- for (insn = jump; insn != if_info.cond_earliest; insn = PREV_INSN (insn))
- if (INSN_P (insn) && reg_overlap_mentioned_p (x, PATTERN (insn)))
- return FALSE;
+ /* If x has side effects then only the if-then-else form is safe to
+ convert. But even in that case we would need to restore any notes
+ (such as REG_INC) at then end. That can be tricky if
+ noce_emit_move_insn expands to more than one insn, so disable the
+ optimization entirely for now if there are side effects. */
+ if (side_effects_p (x))
+ return FALSE;
- /* A and B may not be modified in the range [cond_earliest, jump). */
- for (insn = if_info.cond_earliest; insn != jump; insn = NEXT_INSN (insn))
- if (INSN_P (insn)
- && (modified_in_p (a, insn) || modified_in_p (b, insn)))
- return FALSE;
+ b = (set_b ? SET_SRC (set_b) : x);
/* Only operate on register destinations, and even then avoid extending
the lifetime of hard registers on small register class machines. */
|| (SMALL_REGISTER_CLASSES
&& REGNO (x) < FIRST_PSEUDO_REGISTER))
{
- if (no_new_pseudos)
+ if (no_new_pseudos || GET_MODE (x) == BLKmode)
return FALSE;
x = gen_reg_rtx (GET_MODE (GET_CODE (x) == STRICT_LOW_PART
? XEXP (x, 0) : x));
if (else_bb && insn_b == else_bb->end)
else_bb->end = PREV_INSN (insn_b);
- reorder_insns (insn_b, insn_b, PREV_INSN (if_info.cond_earliest));
+ reorder_insns (insn_b, insn_b, PREV_INSN (jump));
/* If there was a REG_EQUAL note, delete it since it may have been
true due to this insn being after a jump. */
x must be executed twice. */
else if (insn_b && side_effects_p (orig_x))
return FALSE;
-
+
x = orig_x;
goto success;
}
{
if (noce_try_store_flag_constants (&if_info))
goto success;
- if (noce_try_store_flag_inc (&if_info))
+ if (noce_try_addcc (&if_info))
goto success;
if (noce_try_store_flag_mask (&if_info))
goto success;
if (insn_b && else_bb)
delete_insn (insn_b);
- /* The new insns will have been inserted before cond_earliest. We should
- be able to remove the jump with impunity, but the condition itself may
- have been modified by gcse to be shared across basic blocks. */
+ /* The new insns will have been inserted immediately before the jump. We
+ should be able to remove the jump with impunity, but the condition itself
+ may have been modified by gcse to be shared across basic blocks. */
delete_insn (jump);
/* If we used a temporary, fix it up now. */
insn_b = get_insns ();
end_sequence ();
- emit_insn_after_scope (insn_b, test_bb->end, INSN_SCOPE (insn_a));
+ emit_insn_after_setloc (insn_b, test_bb->end, INSN_LOCATOR (insn_a));
}
/* Merge the blocks! */
straight line code. Return true if successful. */
static int
-process_if_block (ce_info)
- struct ce_if_block * ce_info;
+process_if_block (struct ce_if_block * ce_info)
{
if (! reload_completed
&& noce_process_if_block (ce_info))
/* Merge the blocks and mark for local life update. */
static void
-merge_if_block (ce_info)
- struct ce_if_block * ce_info;
+merge_if_block (struct ce_if_block * ce_info)
{
basic_block test_bb = ce_info->test_bb; /* last test block */
basic_block then_bb = ce_info->then_bb; /* THEN */
basic_block bb = test_bb;
basic_block last_test_bb = ce_info->last_test_bb;
basic_block fallthru = block_fallthru (bb);
-
+
do
{
bb = fallthru;
fallthru = block_fallthru (bb);
if (post_dominators)
delete_from_dominance_info (post_dominators, bb);
- merge_blocks_nomove (combo_bb, bb);
+ merge_blocks (combo_bb, bb);
num_removed_blocks++;
}
while (bb != last_test_bb);
then_bb->global_live_at_end);
if (post_dominators)
delete_from_dominance_info (post_dominators, then_bb);
- merge_blocks_nomove (combo_bb, then_bb);
+ merge_blocks (combo_bb, then_bb);
num_removed_blocks++;
}
{
if (post_dominators)
delete_from_dominance_info (post_dominators, else_bb);
- merge_blocks_nomove (combo_bb, else_bb);
+ merge_blocks (combo_bb, else_bb);
num_removed_blocks++;
}
Since we've already merged the TEST, THEN and ELSE blocks, we should
have only one remaining edge from our if-then-else diamond. If there
is more than one remaining edge, it must come from elsewhere. There
- may be zero incoming edges if the THEN block didn't actually join
+ may be zero incoming edges if the THEN block didn't actually join
back up (as with a call to abort). */
else if ((join_bb->pred == NULL
|| join_bb->pred->pred_next == NULL)
if (post_dominators)
delete_from_dominance_info (post_dominators, join_bb);
- merge_blocks_nomove (combo_bb, join_bb);
+ merge_blocks (combo_bb, join_bb);
num_removed_blocks++;
}
else
first block if some transformation was done. Return NULL otherwise. */
static basic_block
-find_if_header (test_bb, pass)
- basic_block test_bb;
- int pass;
+find_if_header (basic_block test_bb, int pass)
{
ce_if_block_t ce_info;
edge then_edge;
|| (else_edge->flags & EDGE_COMPLEX))
return NULL;
+ /* Nor exit the loop. */
+ if ((then_edge->flags & EDGE_LOOP_EXIT)
+ || (else_edge->flags & EDGE_LOOP_EXIT))
+ return NULL;
+
/* The THEN edge is canonically the one that falls through. */
if (then_edge->flags & EDGE_FALLTHRU)
;
/* Otherwise this must be a multiway branch of some sort. */
return NULL;
- memset ((PTR) &ce_info, '\0', sizeof (ce_info));
+ memset (&ce_info, '\0', sizeof (ce_info));
ce_info.test_bb = test_bb;
ce_info.then_bb = then_edge->dest;
ce_info.else_bb = else_edge->dest;
of non-note, non-jump, non-USE/CLOBBER insns in the block. */
static int
-block_jumps_and_fallthru_p (cur_bb, target_bb)
- basic_block cur_bb;
- basic_block target_bb;
+block_jumps_and_fallthru_p (basic_block cur_bb, basic_block target_bb)
{
edge cur_edge;
int fallthru_p = FALSE;
Return TRUE if we were successful at converting the block. */
static int
-find_if_block (ce_info)
- struct ce_if_block * ce_info;
+find_if_block (struct ce_if_block * ce_info)
{
basic_block test_bb = ce_info->test_bb;
basic_block then_bb = ce_info->then_bb;
int max_insns = MAX_CONDITIONAL_EXECUTE;
int n_insns;
- /* Determine if the preceeding block is an && or || block. */
+ /* Determine if the preceding block is an && or || block. */
if ((n_insns = block_jumps_and_fallthru_p (bb, else_bb)) >= 0)
{
ce_info->and_and_p = TRUE;
}
else if ((n_insns = block_jumps_and_fallthru_p (bb, then_bb)) >= 0)
{
- ce_info->and_and_p = FALSE;
+ ce_info->and_and_p = FALSE;
target_bb = then_bb;
}
else
/* The THEN block of an IF-THEN combo must have zero or one successors. */
if (then_succ != NULL_EDGE
&& (then_succ->succ_next != NULL_EDGE
- || (then_succ->flags & EDGE_COMPLEX)))
+ || (then_succ->flags & EDGE_COMPLEX)
+ || (flow2_completed && tablejump_p (then_bb->end, NULL, NULL))))
return FALSE;
/* If the THEN block has no successors, conditional execution can still
&& then_succ->dest == else_succ->dest
&& else_bb->pred->pred_next == NULL_EDGE
&& else_succ->succ_next == NULL_EDGE
- && ! (else_succ->flags & EDGE_COMPLEX))
+ && ! (else_succ->flags & EDGE_COMPLEX)
+ && ! (flow2_completed && tablejump_p (else_bb->end, NULL, NULL)))
join_bb = else_succ->dest;
/* Otherwise it is not an IF-THEN or IF-THEN-ELSE combination. */
else
- return FALSE;
+ return FALSE;
num_possible_if_blocks++;
to a trap, into a conditional trap. */
static int
-find_cond_trap (test_bb, then_edge, else_edge)
- basic_block test_bb;
- edge then_edge, else_edge;
+find_cond_trap (basic_block test_bb, edge then_edge, edge else_edge)
{
basic_block then_bb = then_edge->dest;
basic_block else_bb = else_edge->dest;
return FALSE;
/* Emit the new insns before cond_earliest. */
- emit_insn_before_scope (seq, cond_earliest, INSN_SCOPE (trap));
+ emit_insn_before_setloc (seq, cond_earliest, INSN_LOCATOR (trap));
/* Delete the trap block if possible. */
remove_edge (trap_bb == then_bb ? then_edge : else_edge);
{
if (post_dominators)
delete_from_dominance_info (post_dominators, trap_bb);
- flow_delete_block (trap_bb);
+ delete_block (trap_bb);
num_removed_blocks++;
}
{
struct ce_if_block new_ce_info;
delete_insn (jump);
- memset ((PTR) &new_ce_info, '\0', sizeof (new_ce_info));
+ memset (&new_ce_info, '\0', sizeof (new_ce_info));
new_ce_info.test_bb = test_bb;
new_ce_info.then_bb = NULL;
new_ce_info.else_bb = NULL;
return TRUE;
}
-/* Subroutine of find_cond_trap: if BB contains only a trap insn,
+/* Subroutine of find_cond_trap: if BB contains only a trap insn,
return it. */
static rtx
-block_has_only_trap (bb)
- basic_block bb;
+block_has_only_trap (basic_block bb)
{
rtx trap;
/* Tests for case 1 above. */
static int
-find_if_case_1 (test_bb, then_edge, else_edge)
- basic_block test_bb;
- edge then_edge, else_edge;
+find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
{
basic_block then_bb = then_edge->dest;
basic_block else_bb = else_edge->dest, new_bb;
return FALSE;
/* Registers set are dead, or are predicable. */
- if (! dead_or_predicable (test_bb, then_bb, else_bb,
+ if (! dead_or_predicable (test_bb, then_bb, else_bb,
then_bb->succ->dest, 1))
return FALSE;
bitmap_operation (test_bb->global_live_at_end,
else_bb->global_live_at_start,
then_bb->global_live_at_end, BITMAP_IOR);
-
+
new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb), else_bb);
then_bb_index = then_bb->index;
if (post_dominators)
delete_from_dominance_info (post_dominators, then_bb);
- flow_delete_block (then_bb);
+ delete_block (then_bb);
/* Make rest of code believe that the newly created block is the THEN_BB
block we removed. */
{
new_bb->index = then_bb_index;
BASIC_BLOCK (then_bb_index) = new_bb;
+ if (post_dominators)
+ add_to_dominance_info (post_dominators, new_bb);
}
/* We've possibly created jump to next insn, cleanup_cfg will solve that
later. */
/* Test for case 2 above. */
static int
-find_if_case_2 (test_bb, then_edge, else_edge)
- basic_block test_bb;
- edge then_edge, else_edge;
+find_if_case_2 (basic_block test_bb, edge then_edge, edge else_edge)
{
basic_block then_bb = then_edge->dest;
basic_block else_bb = else_edge->dest;
if (note && INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2)
;
else if (else_succ->dest->index < 0
- || dominated_by_p (post_dominators, then_bb,
+ || dominated_by_p (post_dominators, then_bb,
else_succ->dest))
;
else
bitmap_operation (test_bb->global_live_at_end,
then_bb->global_live_at_start,
else_bb->global_live_at_end, BITMAP_IOR);
-
+
if (post_dominators)
delete_from_dominance_info (post_dominators, else_bb);
- flow_delete_block (else_bb);
+ delete_block (else_bb);
num_removed_blocks++;
num_updated_if_blocks++;
Return 1 if a memory is found. */
static int
-find_memory (px, data)
- rtx *px;
- void *data ATTRIBUTE_UNUSED;
+find_memory (rtx *px, void *data ATTRIBUTE_UNUSED)
{
return GET_CODE (*px) == MEM;
}
REVERSEP is true if the sense of the branch should be reversed. */
static int
-dead_or_predicable (test_bb, merge_bb, other_bb, new_dest, reversep)
- basic_block test_bb, merge_bb, other_bb;
- basic_block new_dest;
- int reversep;
+dead_or_predicable (basic_block test_bb, basic_block merge_bb,
+ basic_block other_bb, basic_block new_dest, int reversep)
{
rtx head, end, jump, earliest, old_dest, new_label = NULL_RTX;
if (HAVE_conditional_execution)
{
/* In the conditional execution case, we have things easy. We know
- the condition is reversable. We don't have to check life info,
+ the condition is reversible. We don't have to check life info,
becase we're going to conditionally execute the code anyway.
All that's left is making sure the insns involved can actually
be predicated. */
test_set = INITIALIZE_REG_SET (test_set_head);
/* ??? bb->local_set is only valid during calculate_global_regs_live,
- so we must recompute usage for MERGE_BB. Not so bad, I suppose,
+ so we must recompute usage for MERGE_BB. Not so bad, I suppose,
since we've already asserted that MERGE_BB is small. */
propagate_block (merge_bb, tmp, merge_set, merge_set, 0);
/* Main entry point for all if-conversion. */
void
-if_convert (x_life_data_ok)
- int x_life_data_ok;
+if_convert (int x_life_data_ok)
{
basic_block bb;
int pass;
num_removed_blocks = 0;
life_data_ok = (x_life_data_ok != 0);
+ if (! (* targetm.cannot_modify_jumps_p) ())
+ mark_loop_exit_edges ();
+
/* Free up basic_block_for_insn so that we don't have to keep it
- up to date, either here or in merge_blocks_nomove. */
+ up to date, either here or in merge_blocks. */
free_basic_block_vars (1);
/* Compute postdominators if we think we'll use them. */
FOR_EACH_BB (bb)
{
- basic_block new_bb = find_if_header (bb, pass);
- if (new_bb)
+ basic_block new_bb;
+ while ((new_bb = find_if_header (bb, pass)))
bb = new_bb;
}