/* Rtl-level induction variable analysis.
- Copyright (C) 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
-
+ Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
-Free Software Foundation; either version 2, or (at your option) any
+Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA. */
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
/* This is a simple analysis of induction variables of the loop. The major use
is for determining the number of iterations of a loop for loop unrolling,
doloop optimization and branch prediction. The iv information is computed
on demand.
- Induction variable is analyzed by walking the use-def chains. When a biv
- is found, it is cached in the bivs hash table. When register is proved
- to be a giv, its description is stored to DF_REF_DATA of the def reference.
+ Induction variables are analyzed by walking the use-def chains. When
+ a basic induction variable (biv) is found, it is cached in the bivs
+ hash table. When register is proved to be a biv, its description
+ is stored to DF_REF_DATA of the def reference.
The analysis works always with one loop -- you must call
iv_analysis_loop_init (loop) for it. All the other functions then work with
iv_analysis_done () to clean up the memory.
The available functions are:
-
+
iv_analyze (insn, reg, iv): Stores the description of the induction variable
corresponding to the use of register REG in INSN to IV. Returns true if
REG is an induction variable in INSN. false otherwise.
subreg_lowpart_offset (outer_mode, inner_mode));
}
-static void
+static void
check_iv_ref_table_size (void)
{
if (iv_ref_table_size < DF_DEFS_TABLE_SIZE())
{
unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
- iv_ref_table = xrealloc (iv_ref_table,
- sizeof (struct rtx_iv *) * new_size);
- memset (&iv_ref_table[iv_ref_table_size], 0,
+ iv_ref_table = XRESIZEVEC (struct rtx_iv *, iv_ref_table, new_size);
+ memset (&iv_ref_table[iv_ref_table_size], 0,
(new_size - iv_ref_table_size) * sizeof (struct rtx_iv *));
iv_ref_table_size = new_size;
}
static int
biv_eq (const void *b, const void *r)
{
- return ((const struct biv_entry *) b)->regno == REGNO ((rtx) r);
+ return ((const struct biv_entry *) b)->regno == REGNO ((const_rtx) r);
}
/* Prepare the data for an induction variable analysis of a LOOP. */
df_remove_problem (df_chain);
df_process_deferred_rescans ();
df_chain_add_problem (DF_UD_CHAIN);
+ df_note_add_problem ();
df_set_blocks (blocks);
df_analyze ();
if (dump_file)
- df_dump (dump_file);
+ df_dump_region (dump_file);
check_iv_ref_table_size ();
BITMAP_FREE (blocks);
is set to NULL and true is returned. */
static bool
-latch_dominating_def (rtx reg, struct df_ref **def)
+latch_dominating_def (rtx reg, df_ref *def)
{
- struct df_ref *single_rd = NULL, *adef;
+ df_ref single_rd = NULL, adef;
unsigned regno = REGNO (reg);
struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch);
- for (adef = DF_REG_DEF_CHAIN (regno); adef; adef = adef->next_reg)
+ for (adef = DF_REG_DEF_CHAIN (regno); adef; adef = DF_REF_NEXT_REG (adef))
{
- if (!bitmap_bit_p (bb_info->out, DF_REF_ID (adef)))
+ if (!bitmap_bit_p (df->blocks_to_analyze, DF_REF_BBNO (adef))
+ || !bitmap_bit_p (bb_info->out, DF_REF_ID (adef)))
continue;
/* More than one reaching definition. */
/* Gets definition of REG reaching its use in INSN and stores it to DEF. */
static enum iv_grd_result
-iv_get_reaching_def (rtx insn, rtx reg, struct df_ref **def)
+iv_get_reaching_def (rtx insn, rtx reg, df_ref *def)
{
- struct df_ref *use, *adef;
+ df_ref use, adef;
basic_block def_bb, use_bb;
rtx def_insn;
bool dom_p;
-
+
*def = NULL;
if (!simple_reg_p (reg))
return GRD_INVALID;
adef = DF_REF_CHAIN (use)->ref;
/* We do not handle setting only part of the register. */
- if (adef->flags & DF_REF_READ_WRITE)
+ if (DF_REF_FLAGS (adef) & DF_REF_READ_WRITE)
return GRD_INVALID;
def_insn = DF_REF_INSN (adef);
at get_biv_step. */
static bool
-get_biv_step_1 (struct df_ref *def, rtx reg,
+get_biv_step_1 (df_ref def, rtx reg,
rtx *inner_step, enum machine_mode *inner_mode,
enum rtx_code *extend, enum machine_mode outer_mode,
rtx *outer_step)
rtx next, nextr, tmp;
enum rtx_code code;
rtx insn = DF_REF_INSN (def);
- struct df_ref *next_def;
+ df_ref next_def;
enum iv_grd_result res;
set = single_set (insn);
LAST_DEF is the definition of REG that dominates loop latch. */
static bool
-get_biv_step (struct df_ref *last_def, rtx reg, rtx *inner_step,
+get_biv_step (df_ref last_def, rtx reg, rtx *inner_step,
enum machine_mode *inner_mode, enum rtx_code *extend,
enum machine_mode *outer_mode, rtx *outer_step)
{
/* Records information that DEF is induction variable IV. */
static void
-record_iv (struct df_ref *def, struct rtx_iv *iv)
+record_iv (df_ref def, struct rtx_iv *iv)
{
struct rtx_iv *recorded_iv = XNEW (struct rtx_iv);
static bool
analyzed_for_bivness_p (rtx def, struct rtx_iv *iv)
{
- struct biv_entry *biv = htab_find_with_hash (bivs, def, REGNO (def));
+ struct biv_entry *biv =
+ (struct biv_entry *) htab_find_with_hash (bivs, def, REGNO (def));
if (!biv)
return false;
rtx inner_step, outer_step;
enum machine_mode inner_mode, outer_mode;
enum rtx_code extend;
- struct df_ref *last_def;
+ df_ref last_def;
if (dump_file)
{
print_rtl (dump_file, def);
fprintf (dump_file, " for bivness.\n");
}
-
+
if (!REG_P (def))
{
if (!CONSTANT_P (def))
return iv->base != NULL_RTX;
}
-/* Analyzes expression RHS used at INSN and stores the result to *IV.
+/* Analyzes expression RHS used at INSN and stores the result to *IV.
The mode of the induction variable is MODE. */
bool
{
if (!iv_analyze_op (insn, rhs, iv))
return false;
-
+
if (iv->mode == VOIDmode)
{
iv->mode = mode;
/* Analyzes iv DEF and stores the result to *IV. */
static bool
-iv_analyze_def (struct df_ref *def, struct rtx_iv *iv)
+iv_analyze_def (df_ref def, struct rtx_iv *iv)
{
rtx insn = DF_REF_INSN (def);
rtx reg = DF_REF_REG (def);
fprintf (dump_file, " in insn ");
print_rtl_single (dump_file, insn);
}
-
+
check_iv_ref_table_size ();
if (DF_REF_IV (def))
{
static bool
iv_analyze_op (rtx insn, rtx op, struct rtx_iv *iv)
{
- struct df_ref *def = NULL;
+ df_ref def = NULL;
enum iv_grd_result res;
if (dump_file)
print_rtl_single (dump_file, insn);
}
- if (CONSTANT_P (op))
+ if (function_invariant_p (op))
res = GRD_INVARIANT;
else if (GET_CODE (op) == SUBREG)
{
bool
iv_analyze_result (rtx insn, rtx def, struct rtx_iv *iv)
{
- struct df_ref *adef;
+ df_ref adef;
adef = df_find_def (insn, def);
if (!adef)
biv_p (rtx insn, rtx reg)
{
struct rtx_iv iv;
- struct df_ref *def, *last_def;
+ df_ref def, last_def;
if (!simple_reg_p (reg))
return false;
{
clear_iv_info ();
clean_slate = true;
- df_finish_pass ();
+ df_finish_pass (true);
htab_delete (bivs);
free (iv_ref_table);
iv_ref_table = NULL;
if (!REG_P (*reg))
return 0;
- return REGNO_REG_SET_P (alt, REGNO (*reg));
+ return REGNO_REG_SET_P ((bitmap) alt, REGNO (*reg));
}
/* Marks registers altered by EXPR in set ALT. */
static void
-mark_altered (rtx expr, rtx by ATTRIBUTE_UNUSED, void *alt)
+mark_altered (rtx expr, const_rtx by ATTRIBUTE_UNUSED, void *alt)
{
if (GET_CODE (expr) == SUBREG)
expr = SUBREG_REG (expr);
if (!REG_P (expr))
return;
- SET_REGNO_REG_SET (alt, REGNO (expr));
+ SET_REGNO_REG_SET ((bitmap) alt, REGNO (expr));
}
/* Checks whether RHS is simple enough to process. */
{
rtx op0, op1;
- if (CONSTANT_P (rhs)
+ if (function_invariant_p (rhs)
|| (REG_P (rhs) && !HARD_REGISTER_P (rhs)))
return true;
{
case PLUS:
case MINUS:
+ case AND:
op0 = XEXP (rhs, 0);
op1 = XEXP (rhs, 1);
- /* Allow reg + const sets only. */
- if (REG_P (op0) && !HARD_REGISTER_P (op0) && CONSTANT_P (op1))
- return true;
- if (REG_P (op1) && !HARD_REGISTER_P (op1) && CONSTANT_P (op0))
- return true;
+ /* Allow reg OP const and reg OP reg. */
+ if (!(REG_P (op0) && !HARD_REGISTER_P (op0))
+ && !function_invariant_p (op0))
+ return false;
+ if (!(REG_P (op1) && !HARD_REGISTER_P (op1))
+ && !function_invariant_p (op1))
+ return false;
- return false;
+ return true;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case MULT:
+ op0 = XEXP (rhs, 0);
+ op1 = XEXP (rhs, 1);
+ /* Allow reg OP const. */
+ if (!(REG_P (op0) && !HARD_REGISTER_P (op0)))
+ return false;
+ if (!function_invariant_p (op1))
+ return false;
+
+ return true;
default:
return false;
}
}
-/* Simplifies *EXPR using assignment in INSN. ALTERED is the set of registers
- altered so far. */
+/* If REG has a single definition, replace it with its known value in EXPR.
+ Callback for for_each_rtx. */
-static void
-simplify_using_assignment (rtx insn, rtx *expr, regset altered)
+static int
+replace_single_def_regs (rtx *reg, void *expr1)
{
- rtx set = single_set (insn);
- rtx lhs = NULL_RTX, rhs;
- bool ret = false;
+ unsigned regno;
+ df_ref adef;
+ rtx set, src;
+ rtx *expr = (rtx *)expr1;
- if (set)
- {
- lhs = SET_DEST (set);
- if (!REG_P (lhs)
- || altered_reg_used (&lhs, altered))
- ret = true;
- }
- else
- ret = true;
+ if (!REG_P (*reg))
+ return 0;
- note_stores (PATTERN (insn), mark_altered, altered);
- if (CALL_P (insn))
+ regno = REGNO (*reg);
+ for (;;)
{
- int i;
+ rtx note;
+ adef = DF_REG_DEF_CHAIN (regno);
+ if (adef == NULL || DF_REF_NEXT_REG (adef) != NULL
+ || DF_REF_IS_ARTIFICIAL (adef))
+ return -1;
+
+ set = single_set (DF_REF_INSN (adef));
+ if (set == NULL || !REG_P (SET_DEST (set))
+ || REGNO (SET_DEST (set)) != regno)
+ return -1;
+
+ note = find_reg_equal_equiv_note (DF_REF_INSN (adef));
- /* Kill all call clobbered registers. */
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
- SET_REGNO_REG_SET (altered, i);
+ if (note && function_invariant_p (XEXP (note, 0)))
+ {
+ src = XEXP (note, 0);
+ break;
+ }
+ src = SET_SRC (set);
+
+ if (REG_P (src))
+ {
+ regno = REGNO (src);
+ continue;
+ }
+ break;
}
+ if (!function_invariant_p (src))
+ return -1;
- if (ret)
- return;
+ *expr = simplify_replace_rtx (*expr, *reg, src);
+ return 1;
+}
+
+/* A subroutine of simplify_using_initial_values, this function examines INSN
+ to see if it contains a suitable set that we can use to make a replacement.
+ If it is suitable, return true and set DEST and SRC to the lhs and rhs of
+ the set; return false otherwise. */
+
+static bool
+suitable_set_for_replacement (rtx insn, rtx *dest, rtx *src)
+{
+ rtx set = single_set (insn);
+ rtx lhs = NULL_RTX, rhs;
+
+ if (!set)
+ return false;
+
+ lhs = SET_DEST (set);
+ if (!REG_P (lhs))
+ return false;
rhs = find_reg_equal_equiv_note (insn);
if (rhs)
rhs = SET_SRC (set);
if (!simple_rhs_p (rhs))
- return;
+ return false;
- if (for_each_rtx (&rhs, altered_reg_used, altered))
- return;
+ *dest = lhs;
+ *src = rhs;
+ return true;
+}
- *expr = simplify_replace_rtx (*expr, lhs, rhs);
+/* Using the data returned by suitable_set_for_replacement, replace DEST
+ with SRC in *EXPR and return the new expression. Also call
+ replace_single_def_regs if the replacement changed something. */
+static void
+replace_in_expr (rtx *expr, rtx dest, rtx src)
+{
+ rtx old = *expr;
+ *expr = simplify_replace_rtx (*expr, dest, src);
+ if (old == *expr)
+ return;
+ while (for_each_rtx (expr, replace_single_def_regs, expr) != 0)
+ continue;
}
/* Checks whether A implies B. */
/* A != N is equivalent to A - (N + 1) <u -1. */
if (GET_CODE (a) == NE
- && GET_CODE (op1) == CONST_INT
+ && CONST_INT_P (op1)
&& GET_CODE (b) == LTU
&& opb1 == constm1_rtx
&& GET_CODE (opb0) == PLUS
- && GET_CODE (XEXP (opb0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
!= ((unsigned HOST_WIDE_INT)1
/* Likewise, A != N implies A - N > 0. */
if (GET_CODE (a) == NE
- && GET_CODE (op1) == CONST_INT)
+ && CONST_INT_P (op1))
{
if (GET_CODE (b) == GTU
&& GET_CODE (opb0) == PLUS
&& opb1 == const0_rtx
- && GET_CODE (XEXP (opb0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
!= ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
if (GET_CODE (b) == GEU
&& GET_CODE (opb0) == PLUS
&& opb1 == const1_rtx
- && GET_CODE (XEXP (opb0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
!= ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
/* A >s X, where X is positive, implies A <u Y, if Y is negative. */
if ((GET_CODE (a) == GT || GET_CODE (a) == GE)
- && GET_CODE (op1) == CONST_INT
+ && CONST_INT_P (op1)
&& ((GET_CODE (a) == GT && op1 == constm1_rtx)
|| INTVAL (op1) >= 0)
&& GET_CODE (b) == LTU
- && GET_CODE (opb1) == CONST_INT)
+ && CONST_INT_P (opb1)
+ && rtx_equal_p (op0, opb0))
return INTVAL (opb1) < 0;
return false;
mode = GET_MODE (op1);
gcc_assert (mode != VOIDmode);
- if (GET_CODE (op1) == CONST_INT
+ if (CONST_INT_P (op1)
&& GET_MODE_CLASS (mode) != MODE_CC
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
{
rtx rev, reve, exp = *expr;
- if (!COMPARISON_P (exp))
- return;
-
/* If some register gets altered later, we do not really speak about its
value at the time of comparison. */
if (altered
&& for_each_rtx (&cond, altered_reg_used, altered))
return;
+ if (GET_CODE (cond) == EQ
+ && REG_P (XEXP (cond, 0)) && CONSTANT_P (XEXP (cond, 1)))
+ {
+ *expr = simplify_replace_rtx (*expr, XEXP (cond, 0), XEXP (cond, 1));
+ return;
+ }
+
+ if (!COMPARISON_P (exp))
+ return;
+
rev = reversed_condition (cond);
reve = reversed_condition (exp);
return;
}
-
if (rev && rtx_equal_p (exp, rev))
{
*expr = const0_rtx;
*expr = const_true_rtx;
return;
}
-
+
if (reve && implies_p (cond, reve))
{
*expr = const0_rtx;
static void
simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
{
- rtx head, tail, insn;
+ bool expression_valid;
+ rtx head, tail, insn, cond_list, last_valid_expr;
rtx neutral, aggr;
- regset altered;
+ regset altered, this_altered;
edge e;
if (!*expr)
default:
gcc_unreachable ();
}
-
+
simplify_using_initial_values (loop, UNKNOWN, &head);
if (head == aggr)
{
*expr = tail;
return;
}
-
+
XEXP (*expr, 0) = head;
XEXP (*expr, 1) = tail;
return;
gcc_assert (op == UNKNOWN);
+ for (;;)
+ if (for_each_rtx (expr, replace_single_def_regs, expr) == 0)
+ break;
+ if (CONSTANT_P (*expr))
+ return;
+
e = loop_preheader_edge (loop);
if (e->src == ENTRY_BLOCK_PTR)
return;
altered = ALLOC_REG_SET (®_obstack);
+ this_altered = ALLOC_REG_SET (®_obstack);
+ expression_valid = true;
+ last_valid_expr = *expr;
+ cond_list = NULL_RTX;
while (1)
{
insn = BB_END (e->src);
if (any_condjump_p (insn))
{
rtx cond = get_condition (BB_END (e->src), NULL, false, true);
-
+
if (cond && (e->flags & EDGE_FALLTHRU))
cond = reversed_condition (cond);
if (cond)
{
+ rtx old = *expr;
simplify_using_condition (cond, expr, altered);
- if (CONSTANT_P (*expr))
+ if (old != *expr)
{
- FREE_REG_SET (altered);
- return;
+ rtx note;
+ if (CONSTANT_P (*expr))
+ goto out;
+ for (note = cond_list; note; note = XEXP (note, 1))
+ {
+ simplify_using_condition (XEXP (note, 0), expr, altered);
+ if (CONSTANT_P (*expr))
+ goto out;
+ }
}
+ cond_list = alloc_EXPR_LIST (0, cond, cond_list);
}
}
FOR_BB_INSNS_REVERSE (e->src, insn)
{
+ rtx src, dest;
+ rtx old = *expr;
+
if (!INSN_P (insn))
continue;
-
- simplify_using_assignment (insn, expr, altered);
- if (CONSTANT_P (*expr))
+
+ CLEAR_REG_SET (this_altered);
+ note_stores (PATTERN (insn), mark_altered, this_altered);
+ if (CALL_P (insn))
{
- FREE_REG_SET (altered);
- return;
+ int i;
+
+ /* Kill all call clobbered registers. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
+ SET_REGNO_REG_SET (this_altered, i);
}
- if (for_each_rtx (expr, altered_reg_used, altered))
+
+ if (suitable_set_for_replacement (insn, &dest, &src))
{
- FREE_REG_SET (altered);
- return;
+ rtx *pnote, *pnote_next;
+
+ replace_in_expr (expr, dest, src);
+ if (CONSTANT_P (*expr))
+ goto out;
+
+ for (pnote = &cond_list; *pnote; pnote = pnote_next)
+ {
+ rtx note = *pnote;
+ rtx old_cond = XEXP (note, 0);
+
+ pnote_next = &XEXP (note, 1);
+ replace_in_expr (&XEXP (note, 0), dest, src);
+
+ /* We can no longer use a condition that has been simplified
+ to a constant, and simplify_using_condition will abort if
+ we try. */
+ if (CONSTANT_P (XEXP (note, 0)))
+ {
+ *pnote = *pnote_next;
+ pnote_next = pnote;
+ free_EXPR_LIST_node (note);
+ }
+ /* Retry simplifications with this condition if either the
+ expression or the condition changed. */
+ else if (old_cond != XEXP (note, 0) || old != *expr)
+ simplify_using_condition (XEXP (note, 0), expr, altered);
+ }
}
+ else
+ /* If we did not use this insn to make a replacement, any overlap
+ between stores in this insn and our expression will cause the
+ expression to become invalid. */
+ if (for_each_rtx (expr, altered_reg_used, this_altered))
+ goto out;
+
+ if (CONSTANT_P (*expr))
+ goto out;
+
+ IOR_REG_SET (altered, this_altered);
+
+ /* If the expression now contains regs that have been altered, we
+ can't return it to the caller. However, it is still valid for
+ further simplification, so keep searching to see if we can
+ eventually turn it into a constant. */
+ if (for_each_rtx (expr, altered_reg_used, altered))
+ expression_valid = false;
+ if (expression_valid)
+ last_valid_expr = *expr;
}
if (!single_pred_p (e->src)
e = single_pred_edge (e->src);
}
+ out:
+ free_EXPR_LIST_list (&cond_list);
+ if (!CONSTANT_P (*expr))
+ *expr = last_valid_expr;
FREE_REG_SET (altered);
+ FREE_REG_SET (this_altered);
}
/* Transforms invariant IV into MODE. Adds assumptions based on the fact
and iv0 and iv1 are both ivs iterating in SI mode, but calculated
in different modes. This does not seem impossible to handle, but
it hardly ever occurs in practice.
-
+
The only exception is the case when one of operands is invariant.
For example pentium 3 generates comparisons like
(lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we
return true;
}
-/* Tries to estimate the maximum number of iterations. */
+/* Tries to estimate the maximum number of iterations in LOOP, and store the
+ result in DESC. This function is called from iv_number_of_iterations with
+ a number of fields in DESC already filled in. OLD_NITER is the original
+ expression for the number of iterations, before we tried to simplify it. */
static unsigned HOST_WIDEST_INT
-determine_max_iter (struct loop *loop, struct niter_desc *desc)
+determine_max_iter (struct loop *loop, struct niter_desc *desc, rtx old_niter)
{
rtx niter = desc->niter_expr;
rtx mmin, mmax, cmp;
unsigned HOST_WIDEST_INT nmax, inc;
if (GET_CODE (niter) == AND
- && GET_CODE (XEXP (niter, 0)) == CONST_INT)
+ && CONST_INT_P (XEXP (niter, 0)))
{
nmax = INTVAL (XEXP (niter, 0));
if (!(nmax & (nmax + 1)))
if (GET_CODE (niter) == UDIV)
{
- if (GET_CODE (XEXP (niter, 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (niter, 1)))
{
desc->niter_max = nmax;
return nmax;
/* We could use a binary search here, but for now improving the upper
bound by just one eliminates one important corner case. */
- cmp = gen_rtx_fmt_ee (desc->signed_p ? LT : LTU, VOIDmode, niter, mmax);
+ cmp = simplify_gen_relational (desc->signed_p ? LT : LTU, VOIDmode,
+ desc->mode, old_niter, mmax);
simplify_using_initial_values (loop, UNKNOWN, &cmp);
if (cmp == const_true_rtx)
{
goto fail;
if (iv0.extend_mode == VOIDmode)
iv0.mode = iv0.extend_mode = mode;
-
+
op1 = XEXP (condition, 1);
if (!iv_analyze (insn, op1, &iv1))
goto fail;
mode_mmin = lowpart_subreg (mode, mmin, comp_mode);
mode_mmax = lowpart_subreg (mode, mmax, comp_mode);
- if (GET_CODE (iv0.step) != CONST_INT || GET_CODE (iv1.step) != CONST_INT)
+ if (!CONST_INT_P (iv0.step) || !CONST_INT_P (iv1.step))
goto fail;
/* We can take care of the case of two induction variables chasing each other
may_xform = const0_rtx;
may_not_xform = const_true_rtx;
- if (GET_CODE (delta) == CONST_INT)
+ if (CONST_INT_P (delta))
{
if (was_sharp && INTVAL (delta) == INTVAL (step) - 1)
{
number of iterations in this step, so record the information
here. */
inc = INTVAL (iv0.step) - INTVAL (iv1.step);
- if (GET_CODE (iv1.base) == CONST_INT)
+ if (CONST_INT_P (iv1.base))
up = INTVAL (iv1.base);
else
up = INTVAL (mode_mmax) - inc;
- down = INTVAL (GET_CODE (iv0.base) == CONST_INT
+ down = INTVAL (CONST_INT_P (iv0.base)
? iv0.base
: mode_mmin);
desc->niter_max = (up - down) / inc + 1;
&& XEXP (desc->noloop_assumptions, 0) == const_true_rtx)
goto zero_iter;
- if (GET_CODE (desc->niter_expr) == CONST_INT)
+ if (CONST_INT_P (desc->niter_expr))
{
unsigned HOST_WIDEST_INT val = INTVAL (desc->niter_expr);
else
{
if (!desc->niter_max)
- desc->niter_max = determine_max_iter (loop, desc);
+ desc->niter_max = determine_max_iter (loop, desc, old_niter);
/* simplify_using_initial_values does a copy propagation on the registers
in the expression for the number of iterations. This prolongs life
{
if (flow_bb_inside_loop_p (loop, e->dest))
continue;
-
+
check_simple_exit (loop, e, &act);
if (!act.simple_p)
continue;
if (act.infinite && !desc->infinite)
continue;
}
-
+
*desc = act;
}
}
if (desc)
return desc;
- desc = XNEW (struct niter_desc);
+ /* At least desc->infinite is not always initialized by
+ find_simple_loop_exit. */
+ desc = XCNEW (struct niter_desc);
iv_analysis_loop_init (loop);
find_simple_exit (loop, desc);
loop->aux = desc;
if (desc->simple_p && (desc->assumptions || desc->infinite))
{
- const char *wording;
+ const char *wording;
- /* Assume that no overflow happens and that the loop is finite.
+ /* Assume that no overflow happens and that the loop is finite.
We already warned at the tree level if we ran optimizations there. */
if (!flag_tree_loop_optimize && warn_unsafe_loop_optimizations)
{
if (desc->infinite)
{
- wording =
+ wording =
flag_unsafe_loop_optimizations
? N_("assuming that the loop is not infinite")
: N_("cannot optimize possibly infinite loops");
}
if (desc->assumptions)
{
- wording =
+ wording =
flag_unsafe_loop_optimizations
? N_("assuming that the loop counter does not overflow")
: N_("cannot optimize loop, the loop counter may overflow");