/* Rtl-level induction variable analysis.
- Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009
+ Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
-
+
This file is part of GCC.
-
+
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.
-
+
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
-
+
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
iv_analysis_done () to clean up the memory.
The available functions are:
-
+
iv_analyze (insn, reg, iv): Stores the description of the induction variable
corresponding to the use of register REG in INSN to IV. Returns true if
REG is an induction variable in INSN. false otherwise.
#include "expr.h"
#include "intl.h"
#include "output.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "df.h"
#include "hashtab.h"
subreg_lowpart_offset (outer_mode, inner_mode));
}
-static void
+static void
check_iv_ref_table_size (void)
{
if (iv_ref_table_size < DF_DEFS_TABLE_SIZE())
{
unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
iv_ref_table = XRESIZEVEC (struct rtx_iv *, iv_ref_table, new_size);
- memset (&iv_ref_table[iv_ref_table_size], 0,
+ memset (&iv_ref_table[iv_ref_table_size], 0,
(new_size - iv_ref_table_size) * sizeof (struct rtx_iv *));
iv_ref_table_size = new_size;
}
df_remove_problem (df_chain);
df_process_deferred_rescans ();
df_chain_add_problem (DF_UD_CHAIN);
+ df_note_add_problem ();
df_set_blocks (blocks);
df_analyze ();
if (dump_file)
for (adef = DF_REG_DEF_CHAIN (regno); adef; adef = DF_REF_NEXT_REG (adef))
{
if (!bitmap_bit_p (df->blocks_to_analyze, DF_REF_BBNO (adef))
- || !bitmap_bit_p (bb_info->out, DF_REF_ID (adef)))
+ || !bitmap_bit_p (&bb_info->out, DF_REF_ID (adef)))
continue;
/* More than one reaching definition. */
basic_block def_bb, use_bb;
rtx def_insn;
bool dom_p;
-
+
*def = NULL;
if (!simple_reg_p (reg))
return GRD_INVALID;
print_rtl (dump_file, def);
fprintf (dump_file, " for bivness.\n");
}
-
+
if (!REG_P (def))
{
if (!CONSTANT_P (def))
return iv->base != NULL_RTX;
}
-/* Analyzes expression RHS used at INSN and stores the result to *IV.
+/* Analyzes expression RHS used at INSN and stores the result to *IV.
The mode of the induction variable is MODE. */
bool
{
if (!iv_analyze_op (insn, rhs, iv))
return false;
-
+
if (iv->mode == VOIDmode)
{
iv->mode = mode;
fprintf (dump_file, " in insn ");
print_rtl_single (dump_file, insn);
}
-
+
check_iv_ref_table_size ();
if (DF_REF_IV (def))
{
print_rtl_single (dump_file, insn);
}
- if (CONSTANT_P (op))
+ if (function_invariant_p (op))
res = GRD_INVARIANT;
else if (GET_CODE (op) == SUBREG)
{
{
rtx op0, op1;
- if (CONSTANT_P (rhs)
+ if (function_invariant_p (rhs)
|| (REG_P (rhs) && !HARD_REGISTER_P (rhs)))
return true;
{
case PLUS:
case MINUS:
+ case AND:
op0 = XEXP (rhs, 0);
op1 = XEXP (rhs, 1);
- /* Allow reg + const and reg + reg. */
+ /* Allow reg OP const and reg OP reg. */
if (!(REG_P (op0) && !HARD_REGISTER_P (op0))
- && !CONSTANT_P (op0))
+ && !function_invariant_p (op0))
return false;
if (!(REG_P (op1) && !HARD_REGISTER_P (op1))
- && !CONSTANT_P (op1))
+ && !function_invariant_p (op1))
return false;
return true;
case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case MULT:
op0 = XEXP (rhs, 0);
op1 = XEXP (rhs, 1);
- /* Allow reg << const. */
+ /* Allow reg OP const. */
if (!(REG_P (op0) && !HARD_REGISTER_P (op0)))
return false;
- if (!CONSTANT_P (op1))
+ if (!function_invariant_p (op1))
return false;
return true;
}
}
+/* If REG has a single definition, replace it with its known value in EXPR.
+ Callback for for_each_rtx. */
+
+static int
+replace_single_def_regs (rtx *reg, void *expr1)
+{
+ unsigned regno;
+ df_ref adef;
+ rtx set, src;
+ rtx *expr = (rtx *)expr1;
+
+ if (!REG_P (*reg))
+ return 0;
+
+ regno = REGNO (*reg);
+ for (;;)
+ {
+ rtx note;
+ adef = DF_REG_DEF_CHAIN (regno);
+ if (adef == NULL || DF_REF_NEXT_REG (adef) != NULL
+ || DF_REF_IS_ARTIFICIAL (adef))
+ return -1;
+
+ set = single_set (DF_REF_INSN (adef));
+ if (set == NULL || !REG_P (SET_DEST (set))
+ || REGNO (SET_DEST (set)) != regno)
+ return -1;
+
+ note = find_reg_equal_equiv_note (DF_REF_INSN (adef));
+
+ if (note && function_invariant_p (XEXP (note, 0)))
+ {
+ src = XEXP (note, 0);
+ break;
+ }
+ src = SET_SRC (set);
+
+ if (REG_P (src))
+ {
+ regno = REGNO (src);
+ continue;
+ }
+ break;
+ }
+ if (!function_invariant_p (src))
+ return -1;
+
+ *expr = simplify_replace_rtx (*expr, *reg, src);
+ return 1;
+}
+
/* A subroutine of simplify_using_initial_values, this function examines INSN
to see if it contains a suitable set that we can use to make a replacement.
If it is suitable, return true and set DEST and SRC to the lhs and rhs of
return true;
}
+/* Using the data returned by suitable_set_for_replacement, replace DEST
+ with SRC in *EXPR and return the new expression. Also call
+ replace_single_def_regs if the replacement changed something. */
+static void
+replace_in_expr (rtx *expr, rtx dest, rtx src)
+{
+ rtx old = *expr;
+ *expr = simplify_replace_rtx (*expr, dest, src);
+ if (old == *expr)
+ return;
+ while (for_each_rtx (expr, replace_single_def_regs, expr) != 0)
+ continue;
+}
+
/* Checks whether A implies B. */
static bool
/* A != N is equivalent to A - (N + 1) <u -1. */
if (GET_CODE (a) == NE
- && GET_CODE (op1) == CONST_INT
+ && CONST_INT_P (op1)
&& GET_CODE (b) == LTU
&& opb1 == constm1_rtx
&& GET_CODE (opb0) == PLUS
- && GET_CODE (XEXP (opb0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
!= ((unsigned HOST_WIDE_INT)1
/* Likewise, A != N implies A - N > 0. */
if (GET_CODE (a) == NE
- && GET_CODE (op1) == CONST_INT)
+ && CONST_INT_P (op1))
{
if (GET_CODE (b) == GTU
&& GET_CODE (opb0) == PLUS
&& opb1 == const0_rtx
- && GET_CODE (XEXP (opb0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
!= ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
if (GET_CODE (b) == GEU
&& GET_CODE (opb0) == PLUS
&& opb1 == const1_rtx
- && GET_CODE (XEXP (opb0, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (opb0, 1))
/* Avoid overflows. */
&& ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1))
!= ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
/* A >s X, where X is positive, implies A <u Y, if Y is negative. */
if ((GET_CODE (a) == GT || GET_CODE (a) == GE)
- && GET_CODE (op1) == CONST_INT
+ && CONST_INT_P (op1)
&& ((GET_CODE (a) == GT && op1 == constm1_rtx)
|| INTVAL (op1) >= 0)
&& GET_CODE (b) == LTU
- && GET_CODE (opb1) == CONST_INT
+ && CONST_INT_P (opb1)
&& rtx_equal_p (op0, opb0))
return INTVAL (opb1) < 0;
mode = GET_MODE (op1);
gcc_assert (mode != VOIDmode);
- if (GET_CODE (op1) == CONST_INT
+ if (CONST_INT_P (op1)
&& GET_MODE_CLASS (mode) != MODE_CC
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
{
rtx rev, reve, exp = *expr;
- if (!COMPARISON_P (exp))
- return;
-
/* If some register gets altered later, we do not really speak about its
value at the time of comparison. */
if (altered
&& for_each_rtx (&cond, altered_reg_used, altered))
return;
+ if (GET_CODE (cond) == EQ
+ && REG_P (XEXP (cond, 0)) && CONSTANT_P (XEXP (cond, 1)))
+ {
+ *expr = simplify_replace_rtx (*expr, XEXP (cond, 0), XEXP (cond, 1));
+ return;
+ }
+
+ if (!COMPARISON_P (exp))
+ return;
+
rev = reversed_condition (cond);
reve = reversed_condition (exp);
return;
}
-
if (rev && rtx_equal_p (exp, rev))
{
*expr = const0_rtx;
*expr = const_true_rtx;
return;
}
-
+
if (reve && implies_p (cond, reve))
{
*expr = const0_rtx;
simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
{
bool expression_valid;
- rtx head, tail, insn, last_valid_expr;
+ rtx head, tail, insn, cond_list, last_valid_expr;
rtx neutral, aggr;
regset altered, this_altered;
edge e;
default:
gcc_unreachable ();
}
-
+
simplify_using_initial_values (loop, UNKNOWN, &head);
if (head == aggr)
{
*expr = tail;
return;
}
-
+
XEXP (*expr, 0) = head;
XEXP (*expr, 1) = tail;
return;
gcc_assert (op == UNKNOWN);
+ for (;;)
+ if (for_each_rtx (expr, replace_single_def_regs, expr) == 0)
+ break;
+ if (CONSTANT_P (*expr))
+ return;
+
e = loop_preheader_edge (loop);
if (e->src == ENTRY_BLOCK_PTR)
return;
expression_valid = true;
last_valid_expr = *expr;
+ cond_list = NULL_RTX;
while (1)
{
insn = BB_END (e->src);
if (any_condjump_p (insn))
{
rtx cond = get_condition (BB_END (e->src), NULL, false, true);
-
+
if (cond && (e->flags & EDGE_FALLTHRU))
cond = reversed_condition (cond);
if (cond)
{
+ rtx old = *expr;
simplify_using_condition (cond, expr, altered);
- if (CONSTANT_P (*expr))
- goto out;
+ if (old != *expr)
+ {
+ rtx note;
+ if (CONSTANT_P (*expr))
+ goto out;
+ for (note = cond_list; note; note = XEXP (note, 1))
+ {
+ simplify_using_condition (XEXP (note, 0), expr, altered);
+ if (CONSTANT_P (*expr))
+ goto out;
+ }
+ }
+ cond_list = alloc_EXPR_LIST (0, cond, cond_list);
}
}
FOR_BB_INSNS_REVERSE (e->src, insn)
{
rtx src, dest;
+ rtx old = *expr;
if (!INSN_P (insn))
continue;
if (CALL_P (insn))
{
int i;
-
+
/* Kill all call clobbered registers. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
if (suitable_set_for_replacement (insn, &dest, &src))
{
- *expr = simplify_replace_rtx (*expr, dest, src);
+ rtx *pnote, *pnote_next;
+
+ replace_in_expr (expr, dest, src);
if (CONSTANT_P (*expr))
goto out;
+
+ for (pnote = &cond_list; *pnote; pnote = pnote_next)
+ {
+ rtx note = *pnote;
+ rtx old_cond = XEXP (note, 0);
+
+ pnote_next = &XEXP (note, 1);
+ replace_in_expr (&XEXP (note, 0), dest, src);
+
+ /* We can no longer use a condition that has been simplified
+ to a constant, and simplify_using_condition will abort if
+ we try. */
+ if (CONSTANT_P (XEXP (note, 0)))
+ {
+ *pnote = *pnote_next;
+ pnote_next = pnote;
+ free_EXPR_LIST_node (note);
+ }
+ /* Retry simplifications with this condition if either the
+ expression or the condition changed. */
+ else if (old_cond != XEXP (note, 0) || old != *expr)
+ simplify_using_condition (XEXP (note, 0), expr, altered);
+ }
}
else
/* If we did not use this insn to make a replacement, any overlap
if (for_each_rtx (expr, altered_reg_used, this_altered))
goto out;
+ if (CONSTANT_P (*expr))
+ goto out;
+
IOR_REG_SET (altered, this_altered);
/* If the expression now contains regs that have been altered, we
}
out:
+ free_EXPR_LIST_list (&cond_list);
if (!CONSTANT_P (*expr))
*expr = last_valid_expr;
FREE_REG_SET (altered);
and iv0 and iv1 are both ivs iterating in SI mode, but calculated
in different modes. This does not seem impossible to handle, but
it hardly ever occurs in practice.
-
+
The only exception is the case when one of operands is invariant.
For example pentium 3 generates comparisons like
(lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we
return true;
}
-/* Tries to estimate the maximum number of iterations. */
+/* Tries to estimate the maximum number of iterations in LOOP, and store the
+ result in DESC. This function is called from iv_number_of_iterations with
+ a number of fields in DESC already filled in. OLD_NITER is the original
+ expression for the number of iterations, before we tried to simplify it. */
static unsigned HOST_WIDEST_INT
-determine_max_iter (struct loop *loop, struct niter_desc *desc)
+determine_max_iter (struct loop *loop, struct niter_desc *desc, rtx old_niter)
{
rtx niter = desc->niter_expr;
rtx mmin, mmax, cmp;
unsigned HOST_WIDEST_INT nmax, inc;
if (GET_CODE (niter) == AND
- && GET_CODE (XEXP (niter, 0)) == CONST_INT)
+ && CONST_INT_P (XEXP (niter, 0)))
{
nmax = INTVAL (XEXP (niter, 0));
if (!(nmax & (nmax + 1)))
if (GET_CODE (niter) == UDIV)
{
- if (GET_CODE (XEXP (niter, 1)) != CONST_INT)
+ if (!CONST_INT_P (XEXP (niter, 1)))
{
desc->niter_max = nmax;
return nmax;
/* We could use a binary search here, but for now improving the upper
bound by just one eliminates one important corner case. */
- cmp = gen_rtx_fmt_ee (desc->signed_p ? LT : LTU, VOIDmode, niter, mmax);
+ cmp = simplify_gen_relational (desc->signed_p ? LT : LTU, VOIDmode,
+ desc->mode, old_niter, mmax);
simplify_using_initial_values (loop, UNKNOWN, &cmp);
if (cmp == const_true_rtx)
{
goto fail;
if (iv0.extend_mode == VOIDmode)
iv0.mode = iv0.extend_mode = mode;
-
+
op1 = XEXP (condition, 1);
if (!iv_analyze (insn, op1, &iv1))
goto fail;
mode_mmin = lowpart_subreg (mode, mmin, comp_mode);
mode_mmax = lowpart_subreg (mode, mmax, comp_mode);
- if (GET_CODE (iv0.step) != CONST_INT || GET_CODE (iv1.step) != CONST_INT)
+ if (!CONST_INT_P (iv0.step) || !CONST_INT_P (iv1.step))
goto fail;
/* We can take care of the case of two induction variables chasing each other
may_xform = const0_rtx;
may_not_xform = const_true_rtx;
- if (GET_CODE (delta) == CONST_INT)
+ if (CONST_INT_P (delta))
{
if (was_sharp && INTVAL (delta) == INTVAL (step) - 1)
{
number of iterations in this step, so record the information
here. */
inc = INTVAL (iv0.step) - INTVAL (iv1.step);
- if (GET_CODE (iv1.base) == CONST_INT)
+ if (CONST_INT_P (iv1.base))
up = INTVAL (iv1.base);
else
up = INTVAL (mode_mmax) - inc;
- down = INTVAL (GET_CODE (iv0.base) == CONST_INT
+ down = INTVAL (CONST_INT_P (iv0.base)
? iv0.base
: mode_mmin);
desc->niter_max = (up - down) / inc + 1;
&& XEXP (desc->noloop_assumptions, 0) == const_true_rtx)
goto zero_iter;
- if (GET_CODE (desc->niter_expr) == CONST_INT)
+ if (CONST_INT_P (desc->niter_expr))
{
unsigned HOST_WIDEST_INT val = INTVAL (desc->niter_expr);
else
{
if (!desc->niter_max)
- desc->niter_max = determine_max_iter (loop, desc);
+ desc->niter_max = determine_max_iter (loop, desc, old_niter);
/* simplify_using_initial_values does a copy propagation on the registers
in the expression for the number of iterations. This prolongs life
{
if (flow_bb_inside_loop_p (loop, e->dest))
continue;
-
+
check_simple_exit (loop, e, &act);
if (!act.simple_p)
continue;
if (act.infinite && !desc->infinite)
continue;
}
-
+
*desc = act;
}
}
if (desc->simple_p && (desc->assumptions || desc->infinite))
{
- const char *wording;
+ const char *wording;
- /* Assume that no overflow happens and that the loop is finite.
+ /* Assume that no overflow happens and that the loop is finite.
We already warned at the tree level if we ran optimizations there. */
if (!flag_tree_loop_optimize && warn_unsafe_loop_optimizations)
{
if (desc->infinite)
{
- wording =
+ wording =
flag_unsafe_loop_optimizations
? N_("assuming that the loop is not infinite")
: N_("cannot optimize possibly infinite loops");
}
if (desc->assumptions)
{
- wording =
+ wording =
flag_unsafe_loop_optimizations
? N_("assuming that the loop counter does not overflow")
: N_("cannot optimize loop, the loop counter may overflow");