/* If-conversion support.
- Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
+ the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
License for more details.
You should have received a copy of the GNU General Public License
- along with GCC; see the file COPYING. If not, write to the Free
- Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "target.h"
#include "timevar.h"
#include "tree-pass.h"
+#include "df.h"
#include "vec.h"
#include "vecprim.h"
-
#ifndef HAVE_conditional_execution
#define HAVE_conditional_execution 0
#endif
#define MAX_CONDITIONAL_EXECUTE (BRANCH_COST + 1)
#endif
+#define IFCVT_MULTIPLE_DUMPS 1
+
#define NULL_BLOCK ((basic_block) NULL)
/* # of IF-THEN or IF-THEN-ELSE blocks we looked at */
execution. */
static int num_updated_if_blocks;
-/* # of changes made which require life information to be updated. */
+/* # of changes made. */
static int num_true_changes;
/* Whether conditional execution changes were made. */
static int cond_exec_changed_p;
-/* True if life data ok at present. */
-static bool life_data_ok;
-
/* Forward references. */
static int count_bb_insns (basic_block);
static bool cheap_bb_rtx_cost_p (basic_block, int);
static basic_block block_fallthru (basic_block);
static int cond_exec_process_insns (ce_if_block_t *, rtx, rtx, rtx, rtx, int);
static rtx cond_exec_get_condition (rtx);
-static int cond_exec_process_if_block (ce_if_block_t *, int);
-static rtx noce_get_condition (rtx, rtx *);
+static rtx noce_get_condition (rtx, rtx *, bool);
static int noce_operand_ok (rtx);
-static int noce_process_if_block (ce_if_block_t *);
-static int process_if_block (ce_if_block_t *);
static void merge_if_block (ce_if_block_t *);
static int find_cond_trap (basic_block, edge, edge);
static basic_block find_if_header (basic_block, int);
static int block_jumps_and_fallthru_p (basic_block, basic_block);
-static int find_if_block (ce_if_block_t *);
+static int noce_find_if_block (basic_block, edge, edge, int);
+static int cond_exec_find_if_block (ce_if_block_t *);
static int find_if_case_1 (basic_block, edge, edge);
static int find_if_case_2 (basic_block, edge, edge);
static int find_memory (rtx *, void *);
struct noce_if_info
{
- /* A basic block that ends in a simple conditional jump. */
- basic_block test_bb;
+ /* The basic blocks that make up the IF-THEN-{ELSE-,}JOIN block. */
+ basic_block test_bb, then_bb, else_bb, join_bb;
/* The jump that ends TEST_BB. */
rtx jump;
/* The SET_DEST of INSN_A. */
rtx x;
+
+ /* True if this if block is not canonical. In the canonical form of
+ if blocks, the THEN_BB is the block reached via the fallthru edge
+ from TEST_BB. For the noce transformations, we allow the symmetric
+ form as well. */
+ bool then_else_reversed;
};
static rtx noce_emit_store_flag (struct noce_if_info *, rtx, int, int);
? emit_move_insn (x, y)
: emit_insn (gen_rtx_SET (VOIDmode, x, y));
seq = get_insns ();
- end_sequence();
+ end_sequence ();
if (recog_memoized (insn) <= 0)
{
int normalize, can_reverse;
enum machine_mode mode;
- if (! no_new_pseudos
- && GET_CODE (if_info->a) == CONST_INT
+ if (GET_CODE (if_info->a) == CONST_INT
&& GET_CODE (if_info->b) == CONST_INT)
{
mode = GET_MODE (if_info->x);
rtx target, seq;
int subtract, normalize;
- if (! no_new_pseudos
- && GET_CODE (if_info->a) == PLUS
+ if (GET_CODE (if_info->a) == PLUS
&& rtx_equal_p (XEXP (if_info->a, 0), if_info->b)
&& (reversed_comparison_code (if_info->cond, if_info->jump)
!= UNKNOWN))
int reversep;
reversep = 0;
- if (! no_new_pseudos
- && (BRANCH_COST >= 2
- || STORE_FLAG_VALUE == -1)
+ if ((BRANCH_COST >= 2
+ || STORE_FLAG_VALUE == -1)
&& ((if_info->a == const0_rtx
&& rtx_equal_p (if_info->b, if_info->x))
|| ((reversep = (reversed_comparison_code (if_info->cond,
conditional on their addresses followed by a load. Don't do this
early because it'll screw alias analysis. Note that we've
already checked for no side effects. */
- if (! no_new_pseudos && cse_not_expected
+ /* ??? FIXME: Magic number 5. */
+ if (cse_not_expected
&& MEM_P (a) && MEM_P (b)
&& BRANCH_COST >= 5)
{
{
rtx set;
- if (no_new_pseudos)
- goto end_seq_and_fail;
-
if (is_mem)
{
tmp = gen_reg_rtx (GET_MODE (a));
{
rtx set, last;
- if (no_new_pseudos)
- goto end_seq_and_fail;
-
if (is_mem)
{
tmp = gen_reg_rtx (GET_MODE (b));
reverse
= GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
&& XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (if_info->jump);
+ if (if_info->then_else_reversed)
+ reverse = !reverse;
/* If we're looking for a constant, try to make the conditional
have that constant in it. There are two reasons why it may
enum rtx_code code, op;
int unsignedp;
- /* ??? Can't guarantee that expand_binop won't create pseudos. */
- if (no_new_pseudos)
- return FALSE;
-
/* ??? Reject modes with NaNs or signed zeros since we don't know how
they will be resolved with an SMIN/SMAX. It wouldn't be too hard
to get the target to tell us... */
rtx cond, earliest, target, seq, a, b, c;
int negate;
- /* ??? Can't guarantee that expand_binop won't create pseudos. */
- if (no_new_pseudos)
- return FALSE;
-
/* Recognize A and B as constituting an ABS or NABS. The canonical
form is a branch around the negation, taken when the object is the
first operand of a comparison against 0 that evaluates to true. */
rtx cond, t, m, c, seq;
enum machine_mode mode;
enum rtx_code code;
-
- if (no_new_pseudos)
- return FALSE;
+ bool b_unconditional;
cond = if_info->cond;
code = GET_CODE (cond);
/* This is only profitable if T is cheap, or T is unconditionally
executed/evaluated in the original insn sequence. The latter
- happens if INSN_B was taken from TEST_BB. */
+ happens if INSN_B was taken from TEST_BB, or if there was no
+ INSN_B which can happen for e.g. conditional stores to memory. */
+ b_unconditional = (if_info->insn_b == NULL_RTX
+ || BLOCK_FOR_INSN (if_info->insn_b) == if_info->test_bb);
if (rtx_cost (t, SET) >= COSTS_N_INSNS (2)
- && (BLOCK_FOR_INSN (if_info->insn_b) != if_info->test_bb
+ && (!b_unconditional
|| t != if_info->b))
return FALSE;
/* Similar to get_condition, only the resulting condition must be
- valid at JUMP, instead of at EARLIEST. */
+ valid at JUMP, instead of at EARLIEST.
+
+ If THEN_ELSE_REVERSED is true, the fallthrough does not go to the
+ THEN block of the caller, and we have to reverse the condition. */
static rtx
-noce_get_condition (rtx jump, rtx *earliest)
+noce_get_condition (rtx jump, rtx *earliest, bool then_else_reversed)
{
rtx cond, set, tmp;
bool reverse;
reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
&& XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump));
+ /* We may have to reverse because the caller's if block is not canonical,
+ i.e. the THEN block isn't the fallthrough block for the TEST block
+ (see find_if_header). */
+ if (then_else_reversed)
+ reverse = !reverse;
+
/* If the condition variable is a register and is MODE_INT, accept it. */
cond = XEXP (SET_SRC (set), 0);
NULL_RTX, false, true);
}
-/* Initialize for a simple IF-THEN or IF-THEN-ELSE block. We will not
- be using conditional execution. Set some fields of IF_INFO based
- on CE_INFO: test_bb, cond, jump, cond_earliest. Return TRUE if
- things look OK. */
-
-static int
-noce_init_if_info (struct ce_if_block *ce_info, struct noce_if_info *if_info)
-{
- basic_block test_bb = ce_info->test_bb;
- rtx cond, jump;
-
- /* If test is comprised of && or || elements, don't handle it unless
- it is the special case of && elements without an ELSE block. */
- if (ce_info->num_multiple_test_blocks)
- {
- if (ce_info->else_bb || !ce_info->and_and_p)
- return FALSE;
-
- ce_info->test_bb = test_bb = ce_info->last_test_bb;
- ce_info->num_multiple_test_blocks = 0;
- ce_info->num_and_and_blocks = 0;
- ce_info->num_or_or_blocks = 0;
- }
-
- /* If this is not a standard conditional jump, we can't parse it. */
- jump = BB_END (test_bb);
- cond = noce_get_condition (jump, &if_info->cond_earliest);
- if (!cond)
- return FALSE;
-
- /* If the conditional jump is more than just a conditional
- jump, then we can not do if-conversion on this block. */
- if (! onlyjump_p (jump))
- return FALSE;
-
- /* We must be comparing objects whose modes imply the size. */
- if (GET_MODE (XEXP (cond, 0)) == BLKmode)
- return FALSE;
-
- if_info->test_bb = test_bb;
- if_info->cond = cond;
- if_info->jump = jump;
-
- return TRUE;
-}
-
/* Return true if OP is ok for if-then-else processing. */
static int
return false;
}
-/* Given a simple IF-THEN or IF-THEN-ELSE block, attempt to convert it
- without using conditional execution. Return TRUE if we were
- successful at converting the block. */
+/* Given a simple IF-THEN-JOIN or IF-THEN-ELSE-JOIN block, attempt to convert
+ it without using conditional execution. Return TRUE if we were successful
+ at converting the block. */
static int
-noce_process_if_block (struct ce_if_block * ce_info)
+noce_process_if_block (struct noce_if_info *if_info)
{
- basic_block test_bb = ce_info->test_bb; /* test block */
- basic_block then_bb = ce_info->then_bb; /* THEN */
- basic_block else_bb = ce_info->else_bb; /* ELSE or NULL */
- basic_block join_bb;
- struct noce_if_info if_info;
+ basic_block test_bb = if_info->test_bb; /* test block */
+ basic_block then_bb = if_info->then_bb; /* THEN */
+ basic_block else_bb = if_info->else_bb; /* ELSE or NULL */
+ basic_block join_bb = if_info->join_bb; /* JOIN */
+ rtx jump = if_info->jump;
+ rtx cond = if_info->cond;
rtx insn_a, insn_b;
rtx set_a, set_b;
rtx orig_x, x, a, b;
- rtx jump, cond;
/* We're looking for patterns of the form
??? For future expansion, look for multiple X in such patterns. */
- if (!noce_init_if_info (ce_info, &if_info))
- return FALSE;
-
- cond = if_info.cond;
- jump = if_info.jump;
-
/* Look for one of the potential sets. */
insn_a = first_active_insn (then_bb);
if (! insn_a
}
else
{
- insn_b = prev_nonnote_insn (if_info.cond_earliest);
+ insn_b = prev_nonnote_insn (if_info->cond_earliest);
/* We're going to be moving the evaluation of B down from above
COND_EARLIEST to JUMP. Make sure the relevant data is still
intact. */
|| ! rtx_equal_p (x, SET_DEST (set_b))
|| reg_overlap_mentioned_p (x, SET_SRC (set_b))
|| modified_between_p (SET_SRC (set_b),
- PREV_INSN (if_info.cond_earliest), jump)
+ PREV_INSN (if_info->cond_earliest), jump)
/* Likewise with X. In particular this can happen when
noce_get_condition looks farther back in the instruction
stream than one might expect. */
|| reg_overlap_mentioned_p (x, cond)
|| reg_overlap_mentioned_p (x, a)
- || modified_between_p (x, PREV_INSN (if_info.cond_earliest), jump))
+ || modified_between_p (x, PREV_INSN (if_info->cond_earliest), jump))
insn_b = set_b = NULL_RTX;
}
|| (SMALL_REGISTER_CLASSES
&& REGNO (x) < FIRST_PSEUDO_REGISTER))
{
- if (no_new_pseudos || GET_MODE (x) == BLKmode)
+ if (GET_MODE (x) == BLKmode)
return FALSE;
if (GET_MODE (x) == ZERO_EXTRACT
return FALSE;
/* Set up the info block for our subroutines. */
- if_info.insn_a = insn_a;
- if_info.insn_b = insn_b;
- if_info.x = x;
- if_info.a = a;
- if_info.b = b;
+ if_info->insn_a = insn_a;
+ if_info->insn_b = insn_b;
+ if_info->x = x;
+ if_info->a = a;
+ if_info->b = b;
/* Try optimizations in some approximation of a useful order. */
/* ??? Should first look to see if X is live incoming at all. If it
if (!set_b && MEM_P (orig_x) && noce_mem_write_may_trap_or_fault_p (orig_x))
return FALSE;
- if (noce_try_move (&if_info))
+ if (noce_try_move (if_info))
goto success;
- if (noce_try_store_flag (&if_info))
+ if (noce_try_store_flag (if_info))
goto success;
- if (noce_try_bitop (&if_info))
+ if (noce_try_bitop (if_info))
goto success;
- if (noce_try_minmax (&if_info))
+ if (noce_try_minmax (if_info))
goto success;
- if (noce_try_abs (&if_info))
+ if (noce_try_abs (if_info))
goto success;
if (HAVE_conditional_move
- && noce_try_cmove (&if_info))
+ && noce_try_cmove (if_info))
goto success;
if (! HAVE_conditional_execution)
{
- if (noce_try_store_flag_constants (&if_info))
+ if (noce_try_store_flag_constants (if_info))
goto success;
- if (noce_try_addcc (&if_info))
+ if (noce_try_addcc (if_info))
goto success;
- if (noce_try_store_flag_mask (&if_info))
+ if (noce_try_store_flag_mask (if_info))
goto success;
if (HAVE_conditional_move
- && noce_try_cmove_arith (&if_info))
+ && noce_try_cmove_arith (if_info))
goto success;
- if (noce_try_sign_mask (&if_info))
+ if (noce_try_sign_mask (if_info))
goto success;
}
/* The original THEN and ELSE blocks may now be removed. The test block
must now jump to the join block. If the test block and the join block
can be merged, do so. */
-
- join_bb = single_succ (then_bb);
if (else_bb)
{
delete_basic_block (else_bb);
return true;
}
-/* Given a simple IF-THEN or IF-THEN-ELSE block, attempt to convert it
- using only conditional moves. Return TRUE if we were successful at
+/* Given a simple IF-THEN-JOIN or IF-THEN-ELSE-JOIN block, attempt to convert
+ it using only conditional moves. Return TRUE if we were successful at
converting the block. */
static int
-cond_move_process_if_block (struct ce_if_block *ce_info)
+cond_move_process_if_block (struct noce_if_info *if_info)
{
- basic_block test_bb = ce_info->test_bb;
- basic_block then_bb = ce_info->then_bb;
- basic_block else_bb = ce_info->else_bb;
- basic_block join_bb;
- struct noce_if_info if_info;
- rtx jump, cond, seq, loc_insn;
+ basic_block test_bb = if_info->test_bb;
+ basic_block then_bb = if_info->then_bb;
+ basic_block else_bb = if_info->else_bb;
+ basic_block join_bb = if_info->join_bb;
+ rtx jump = if_info->jump;
+ rtx cond = if_info->cond;
+ rtx seq, loc_insn;
int max_reg, size, c, reg;
rtx *then_vals;
rtx *else_vals;
VEC (int, heap) *else_regs = NULL;
unsigned int i;
- if (!HAVE_conditional_move || no_new_pseudos)
- return FALSE;
-
- memset (&if_info, 0, sizeof if_info);
-
- if (!noce_init_if_info (ce_info, &if_info))
- return FALSE;
-
- cond = if_info.cond;
- jump = if_info.jump;
-
/* Build a mapping for each block to the value used for each
register. */
max_reg = max_reg_num ();
/* Try to emit the conditional moves. First do the then block,
then do anything left in the else blocks. */
start_sequence ();
- if (!cond_move_convert_if_block (&if_info, then_bb, cond,
+ if (!cond_move_convert_if_block (if_info, then_bb, cond,
then_vals, else_vals, false)
|| (else_bb
- && !cond_move_convert_if_block (&if_info, else_bb, cond,
+ && !cond_move_convert_if_block (if_info, else_bb, cond,
then_vals, else_vals, true)))
{
end_sequence ();
return FALSE;
}
- seq = end_ifcvt_sequence (&if_info);
+ seq = end_ifcvt_sequence (if_info);
if (!seq)
return FALSE;
}
emit_insn_before_setloc (seq, jump, INSN_LOCATOR (loc_insn));
- join_bb = single_succ (then_bb);
if (else_bb)
{
delete_basic_block (else_bb);
}
\f
-/* Attempt to convert an IF-THEN or IF-THEN-ELSE block into
- straight line code. Return true if successful. */
+/* Determine if a given basic block heads a simple IF-THEN-JOIN or an
+ IF-THEN-ELSE-JOIN block.
+
+ If so, we'll try to convert the insns to not require the branch,
+ using only transformations that do not require conditional execution.
+
+ Return TRUE if we were successful at converting the block. */
static int
-process_if_block (struct ce_if_block * ce_info)
+noce_find_if_block (basic_block test_bb,
+ edge then_edge, edge else_edge,
+ int pass)
{
- if (! reload_completed
- && noce_process_if_block (ce_info))
- return TRUE;
+ basic_block then_bb, else_bb, join_bb;
+ bool then_else_reversed = false;
+ rtx jump, cond;
+ struct noce_if_info if_info;
- if (HAVE_conditional_move
- && cond_move_process_if_block (ce_info))
- return TRUE;
+ /* We only ever should get here before reload. */
+ gcc_assert (!reload_completed);
- if (HAVE_conditional_execution && reload_completed)
+ /* Recognize an IF-THEN-ELSE-JOIN block. */
+ if (single_pred_p (then_edge->dest)
+ && single_succ_p (then_edge->dest)
+ && single_pred_p (else_edge->dest)
+ && single_succ_p (else_edge->dest)
+ && single_succ (then_edge->dest) == single_succ (else_edge->dest))
{
- /* If we have && and || tests, try to first handle combining the && and
- || tests into the conditional code, and if that fails, go back and
- handle it without the && and ||, which at present handles the && case
- if there was no ELSE block. */
- if (cond_exec_process_if_block (ce_info, TRUE))
- return TRUE;
+ then_bb = then_edge->dest;
+ else_bb = else_edge->dest;
+ join_bb = single_succ (then_bb);
+ }
+ /* Recognize an IF-THEN-JOIN block. */
+ else if (single_pred_p (then_edge->dest)
+ && single_succ_p (then_edge->dest)
+ && single_succ (then_edge->dest) == else_edge->dest)
+ {
+ then_bb = then_edge->dest;
+ else_bb = NULL_BLOCK;
+ join_bb = else_edge->dest;
+ }
+ /* Recognize an IF-ELSE-JOIN block. We can have those because the order
+ of basic blocks in cfglayout mode does not matter, so the fallthrough
+ edge can go to any basic block (and not just to bb->next_bb, like in
+ cfgrtl mode). */
+ else if (single_pred_p (else_edge->dest)
+ && single_succ_p (else_edge->dest)
+ && single_succ (else_edge->dest) == then_edge->dest)
+ {
+ /* The noce transformations do not apply to IF-ELSE-JOIN blocks.
+ To make this work, we have to invert the THEN and ELSE blocks
+ and reverse the jump condition. */
+ then_bb = else_edge->dest;
+ else_bb = NULL_BLOCK;
+ join_bb = single_succ (then_bb);
+ then_else_reversed = true;
+ }
+ else
+ /* Not a form we can handle. */
+ return FALSE;
- if (ce_info->num_multiple_test_blocks)
- {
- cancel_changes (0);
+ /* The edges of the THEN and ELSE blocks cannot have complex edges. */
+ if (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
+ return FALSE;
+ if (else_bb
+ && single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
+ return FALSE;
- if (cond_exec_process_if_block (ce_info, FALSE))
- return TRUE;
- }
+ num_possible_if_blocks++;
+
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "\nIF-THEN%s-JOIN block found, pass %d, test %d, then %d",
+ (else_bb) ? "-ELSE" : "",
+ pass, test_bb->index, then_bb->index);
+
+ if (else_bb)
+ fprintf (dump_file, ", else %d", else_bb->index);
+
+ fprintf (dump_file, ", join %d\n", join_bb->index);
}
+ /* If the conditional jump is more than just a conditional
+ jump, then we can not do if-conversion on this block. */
+ jump = BB_END (test_bb);
+ if (! onlyjump_p (jump))
+ return FALSE;
+
+ /* If this is not a standard conditional jump, we can't parse it. */
+ cond = noce_get_condition (jump,
+ &if_info.cond_earliest,
+ then_else_reversed);
+ if (!cond)
+ return FALSE;
+
+ /* We must be comparing objects whose modes imply the size. */
+ if (GET_MODE (XEXP (cond, 0)) == BLKmode)
+ return FALSE;
+
+ /* Initialize an IF_INFO struct to pass around. */
+ memset (&if_info, 0, sizeof if_info);
+ if_info.test_bb = test_bb;
+ if_info.then_bb = then_bb;
+ if_info.else_bb = else_bb;
+ if_info.join_bb = join_bb;
+ if_info.cond = cond;
+ if_info.jump = jump;
+ if_info.then_else_reversed = then_else_reversed;
+
+ /* Do the real work. */
+
+ if (noce_process_if_block (&if_info))
+ return TRUE;
+
+ if (HAVE_conditional_move
+ && cond_move_process_if_block (&if_info))
+ return TRUE;
+
return FALSE;
}
+\f
/* Merge the blocks and mark for local life update. */
/* All block merging is done into the lower block numbers. */
combo_bb = test_bb;
+ df_set_bb_dirty (test_bb);
/* Merge any basic blocks to handle && and || subtests. Each of
the blocks are on the fallthru path from the predecessor block. */
else if (EDGE_COUNT (join_bb->preds) < 2
&& join_bb != EXIT_BLOCK_PTR)
{
- /* We can merge the JOIN. */
+ /* We can merge the JOIN cleanly and update the dataflow try
+ again on this pass.*/
merge_blocks (combo_bb, join_bb);
num_true_changes++;
}
then_edge = EDGE_SUCC (test_bb, 0);
else_edge = EDGE_SUCC (test_bb, 1);
+ if (df_get_bb_dirty (then_edge->dest))
+ return NULL;
+ if (df_get_bb_dirty (else_edge->dest))
+ return NULL;
+
/* Neither edge should be abnormal. */
if ((then_edge->flags & EDGE_COMPLEX)
|| (else_edge->flags & EDGE_COMPLEX))
IFCVT_INIT_EXTRA_FIELDS (&ce_info);
#endif
- if (find_if_block (&ce_info))
+ if (! reload_completed
+ && noce_find_if_block (test_bb, then_edge, else_edge, pass))
+ goto success;
+
+ if (HAVE_conditional_execution && reload_completed
+ && cond_exec_find_if_block (&ce_info))
goto success;
if (HAVE_trap && HAVE_conditional_trap
&& find_cond_trap (test_bb, then_edge, else_edge))
goto success;
- if (dom_computed[CDI_POST_DOMINATORS] >= DOM_NO_FAST_QUERY
+ if (dom_info_state (CDI_POST_DOMINATORS) >= DOM_NO_FAST_QUERY
&& (! HAVE_conditional_execution || reload_completed))
{
if (find_if_case_1 (test_bb, then_edge, else_edge))
success:
if (dump_file)
fprintf (dump_file, "Conversion succeeded on pass %d.\n", pass);
+ /* Set this so we continue looking. */
+ cond_exec_changed_p = TRUE;
return ce_info.test_bb;
}
Return TRUE if we were successful at converting the block. */
static int
-find_if_block (struct ce_if_block * ce_info)
+cond_exec_find_if_block (struct ce_if_block * ce_info)
{
basic_block test_bb = ce_info->test_bb;
basic_block then_bb = ce_info->then_bb;
ce_info->last_test_bb = test_bb;
+ /* We only ever should get here after reload,
+ and only if we have conditional execution. */
+ gcc_assert (HAVE_conditional_execution && reload_completed);
+
/* Discover if any fall through predecessors of the current test basic block
were && tests (which jump to the else block) or || tests (which jump to
the then block). */
- if (HAVE_conditional_execution && reload_completed
- && single_pred_p (test_bb)
+ if (single_pred_p (test_bb)
&& single_pred_edge (test_bb)->flags == EDGE_FALLTHRU)
{
basic_block bb = single_pred (test_bb);
if (EDGE_COUNT (then_bb->succs) > 0
&& (!single_succ_p (then_bb)
|| (single_succ_edge (then_bb)->flags & EDGE_COMPLEX)
- || (flow2_completed && tablejump_p (BB_END (then_bb), NULL, NULL))))
+ || (epilogue_completed && tablejump_p (BB_END (then_bb), NULL, NULL))))
return FALSE;
/* If the THEN block has no successors, conditional execution can still
&& single_succ (then_bb) == single_succ (else_bb)
&& single_pred_p (else_bb)
&& ! (single_succ_edge (else_bb)->flags & EDGE_COMPLEX)
- && ! (flow2_completed && tablejump_p (BB_END (else_bb), NULL, NULL)))
+ && ! (epilogue_completed && tablejump_p (BB_END (else_bb), NULL, NULL)))
join_bb = single_succ (else_bb);
/* Otherwise it is not an IF-THEN or IF-THEN-ELSE combination. */
}
/* Do the real work. */
+
ce_info->else_bb = else_bb;
ce_info->join_bb = join_bb;
- return process_if_block (ce_info);
+ /* If we have && and || tests, try to first handle combining the && and ||
+ tests into the conditional code, and if that fails, go back and handle
+ it without the && and ||, which at present handles the && case if there
+ was no ELSE block. */
+ if (cond_exec_process_if_block (ce_info, TRUE))
+ return TRUE;
+
+ if (ce_info->num_multiple_test_blocks)
+ {
+ cancel_changes (0);
+
+ if (cond_exec_process_if_block (ce_info, FALSE))
+ return TRUE;
+ }
+
+ return FALSE;
}
/* Convert a branch over a trap, or a branch
/* If this is not a standard conditional jump, we can't parse it. */
jump = BB_END (test_bb);
- cond = noce_get_condition (jump, &cond_earliest);
+ cond = noce_get_condition (jump, &cond_earliest, false);
if (! cond)
return FALSE;
}
/* Attempt to generate the conditional trap. */
- seq = gen_cond_trap (code, XEXP (cond, 0),
- XEXP (cond, 1),
+ seq = gen_cond_trap (code, copy_rtx (XEXP (cond, 0)),
+ copy_rtx (XEXP (cond, 1)),
TRAP_CODE (PATTERN (trap)));
if (seq == NULL)
return FALSE;
/* Delete the trap block if possible. */
remove_edge (trap_bb == then_bb ? then_edge : else_edge);
+ df_set_bb_dirty (test_bb);
+ df_set_bb_dirty (then_bb);
+ df_set_bb_dirty (else_bb);
+
if (EDGE_COUNT (trap_bb->preds) == 0)
{
delete_basic_block (trap_bb);
find_if_case_1 (basic_block test_bb, edge then_edge, edge else_edge)
{
basic_block then_bb = then_edge->dest;
- basic_block else_bb = else_edge->dest, new_bb;
+ basic_block else_bb = else_edge->dest;
+ basic_block new_bb;
int then_bb_index;
/* If we are partitioning hot/cold basic blocks, we don't want to
/* Conversion went ok, including moving the insns and fixing up the
jump. Adjust the CFG to match. */
- bitmap_ior (test_bb->il.rtl->global_live_at_end,
- else_bb->il.rtl->global_live_at_start,
- then_bb->il.rtl->global_live_at_end);
-
-
/* We can avoid creating a new basic block if then_bb is immediately
followed by else_bb, i.e. deleting then_bb allows test_bb to fall
thru to else_bb. */
}
else
new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb),
- else_bb);
+ else_bb);
+
+ df_set_bb_dirty (test_bb);
+ df_set_bb_dirty (else_bb);
then_bb_index = then_bb->index;
delete_basic_block (then_bb);
block we removed. */
if (new_bb)
{
- new_bb->index = then_bb_index;
- SET_BASIC_BLOCK (then_bb_index, new_bb);
+ df_bb_replace (then_bb_index, new_bb);
/* Since the fallthru edge was redirected from test_bb to new_bb,
we need to ensure that new_bb is in the same partition as
test bb (you can not fall through across section boundaries). */
BB_COPY_PARTITION (new_bb, test_bb);
}
- /* We've possibly created jump to next insn, cleanup_cfg will solve that
- later. */
num_true_changes++;
num_updated_if_blocks++;
/* Conversion went ok, including moving the insns and fixing up the
jump. Adjust the CFG to match. */
- bitmap_ior (test_bb->il.rtl->global_live_at_end,
- then_bb->il.rtl->global_live_at_start,
- else_bb->il.rtl->global_live_at_end);
-
+ df_set_bb_dirty (test_bb);
+ df_set_bb_dirty (then_bb);
delete_basic_block (else_bb);
num_true_changes++;
that any registers modified are dead at the branch site. */
rtx insn, cond, prev;
- regset merge_set, tmp, test_live, test_set;
- struct propagate_block_info *pbi;
+ bitmap merge_set, test_live, test_set;
unsigned i, fail = 0;
bitmap_iterator bi;
return FALSE;
/* Find the extent of the conditional. */
- cond = noce_get_condition (jump, &earliest);
+ cond = noce_get_condition (jump, &earliest, false);
if (! cond)
return FALSE;
TEST_SET = set of registers set between EARLIEST and the
end of the block. */
- tmp = ALLOC_REG_SET (®_obstack);
- merge_set = ALLOC_REG_SET (®_obstack);
- test_live = ALLOC_REG_SET (®_obstack);
- test_set = ALLOC_REG_SET (®_obstack);
+ merge_set = BITMAP_ALLOC (®_obstack);
+ test_live = BITMAP_ALLOC (®_obstack);
+ test_set = BITMAP_ALLOC (®_obstack);
/* ??? bb->local_set is only valid during calculate_global_regs_live,
so we must recompute usage for MERGE_BB. Not so bad, I suppose,
expander called from noce_emit_cmove), we must resize the
array first. */
if (max_regno < max_reg_num ())
+ max_regno = max_reg_num ();
+
+ FOR_BB_INSNS (merge_bb, insn)
{
- max_regno = max_reg_num ();
- allocate_reg_info (max_regno, FALSE, FALSE);
+ if (INSN_P (insn))
+ {
+ unsigned int uid = INSN_UID (insn);
+ struct df_ref **def_rec;
+ for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
+ {
+ struct df_ref *def = *def_rec;
+ bitmap_set_bit (merge_set, DF_REF_REGNO (def));
+ }
+ }
}
- propagate_block (merge_bb, tmp, merge_set, merge_set, 0);
/* For small register class machines, don't lengthen lifetimes of
hard registers before reload. */
fail = 1;
}
}
-
+
/* For TEST, we're interested in a range of insns, not a whole block.
Moreover, we're interested in the insns live from OTHER_BB. */
-
- COPY_REG_SET (test_live, other_bb->il.rtl->global_live_at_start);
- pbi = init_propagate_block_info (test_bb, test_live, test_set, test_set,
- 0);
-
+
+ /* The loop below takes the set of live registers
+ after JUMP, and calculates the live set before EARLIEST. */
+ bitmap_copy (test_live, df_get_live_in (other_bb));
+ df_simulate_artificial_refs_at_end (test_bb, test_live);
for (insn = jump; ; insn = prev)
{
- prev = propagate_one_insn (pbi, insn);
+ if (INSN_P (insn))
+ {
+ df_simulate_find_defs (insn, test_set);
+ df_simulate_one_insn_backwards (test_bb, insn, test_live);
+ }
+ prev = PREV_INSN (insn);
if (insn == earliest)
break;
}
- free_propagate_block_info (pbi);
-
/* We can perform the transformation if
MERGE_SET & (TEST_SET | TEST_LIVE)
and
- TEST_SET & merge_bb->il.rtl->global_live_at_start
+ TEST_SET & DF_LIVE_IN (merge_bb)
are empty. */
if (bitmap_intersect_p (test_set, merge_set)
|| bitmap_intersect_p (test_live, merge_set)
- || bitmap_intersect_p (test_set,
- merge_bb->il.rtl->global_live_at_start))
+ || bitmap_intersect_p (test_set, df_get_live_in (merge_bb)))
fail = 1;
- FREE_REG_SET (tmp);
- FREE_REG_SET (merge_set);
- FREE_REG_SET (test_live);
- FREE_REG_SET (test_set);
+ BITMAP_FREE (merge_set);
+ BITMAP_FREE (test_live);
+ BITMAP_FREE (test_set);
if (fail)
return FALSE;
if (end == BB_END (merge_bb))
BB_END (merge_bb) = PREV_INSN (head);
- if (squeeze_notes (&head, &end))
- return TRUE;
-
/* PR 21767: When moving insns above a conditional branch, REG_EQUAL
notes might become invalid. */
insn = head;
/* Main entry point for all if-conversion. */
static void
-if_convert (int x_life_data_ok)
+if_convert (bool recompute_dominance)
{
basic_block bb;
int pass;
+ if (optimize == 1)
+ {
+ df_live_add_problem ();
+ df_live_set_all_dirty ();
+ }
+
num_possible_if_blocks = 0;
num_updated_if_blocks = 0;
num_true_changes = 0;
- life_data_ok = (x_life_data_ok != 0);
- if ((! targetm.cannot_modify_jumps_p ())
- && (!flag_reorder_blocks_and_partition || !no_new_pseudos
- || !targetm.have_named_sections))
- {
- loop_optimizer_init (0);
- if (current_loops)
- {
- mark_loop_exit_edges ();
- loop_optimizer_finalize ();
- }
- free_dominance_info (CDI_DOMINATORS);
- }
+ loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
+ mark_loop_exit_edges ();
+ loop_optimizer_finalize ();
+ free_dominance_info (CDI_DOMINATORS);
/* Compute postdominators if we think we'll use them. */
- if (HAVE_conditional_execution || life_data_ok)
+ if (HAVE_conditional_execution || recompute_dominance)
calculate_dominance_info (CDI_POST_DOMINATORS);
- if (life_data_ok)
- clear_bb_flags ();
+ df_set_flags (DF_LR_RUN_DCE);
/* Go through each of the basic blocks looking for things to convert. If we
have conditional execution, we make multiple passes to allow us to handle
pass = 0;
do
{
+ df_analyze ();
+ /* Only need to do dce on the first pass. */
+ df_clear_flags (DF_LR_RUN_DCE);
cond_exec_changed_p = FALSE;
pass++;
FOR_EACH_BB (bb)
{
- basic_block new_bb;
- while ((new_bb = find_if_header (bb, pass)))
- bb = new_bb;
+ basic_block new_bb;
+ while (!df_get_bb_dirty (bb)
+ && (new_bb = find_if_header (bb, pass)) != NULL)
+ bb = new_bb;
}
#ifdef IFCVT_MULTIPLE_DUMPS
clear_aux_for_blocks ();
- /* Rebuild life info for basic blocks that require it. */
- if (num_true_changes && life_data_ok)
- {
- /* If we allocated new pseudos, we must resize the array for sched1. */
- if (max_regno < max_reg_num ())
- {
- max_regno = max_reg_num ();
- allocate_reg_info (max_regno, FALSE, FALSE);
- }
- update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
- PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
- | PROP_KILL_DEAD_CODE);
- }
+ /* If we allocated new pseudos, we must resize the array for sched1. */
+ if (max_regno < max_reg_num ())
+ max_regno = max_reg_num ();
/* Write the final stats. */
if (dump_file && num_possible_if_blocks > 0)
num_true_changes);
}
+ if (optimize == 1)
+ df_remove_problem (df_live);
+
#ifdef ENABLE_CHECKING
verify_flow_info ();
#endif
if (dump_file)
dump_flow_info (dump_file, dump_flags);
cleanup_cfg (CLEANUP_EXPENSIVE);
- reg_scan (get_insns (), max_reg_num ());
- if_convert (0);
+ if_convert (false);
}
- timevar_push (TV_JUMP);
- cleanup_cfg (CLEANUP_EXPENSIVE);
- reg_scan (get_insns (), max_reg_num ());
- timevar_pop (TV_JUMP);
+ cleanup_cfg (0);
return 0;
}
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
+ TODO_df_finish |
TODO_dump_func, /* todo_flags_finish */
'C' /* letter */
};
static unsigned int
rest_of_handle_if_after_combine (void)
{
- no_new_pseudos = 0;
- if_convert (1);
- no_new_pseudos = 1;
+ if_convert (true);
return 0;
}
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
+ TODO_df_finish |
TODO_dump_func |
TODO_ggc_collect, /* todo_flags_finish */
'C' /* letter */
static bool
gate_handle_if_after_reload (void)
{
- return (optimize > 0);
+ return (optimize > 0 && flag_if_conversion2);
}
static unsigned int
rest_of_handle_if_after_reload (void)
{
- /* Last attempt to optimize CFG, as scheduling, peepholing and insn
- splitting possibly introduced more crossjumping opportunities. */
- cleanup_cfg (CLEANUP_EXPENSIVE
- | CLEANUP_UPDATE_LIFE
- | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0));
- if (flag_if_conversion2)
- if_convert (1);
+ if_convert (true);
return 0;
}
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
+ TODO_df_finish |
TODO_dump_func |
TODO_ggc_collect, /* todo_flags_finish */
'E' /* letter */