mpz_t below, up;
} bounds;
-/* Sets RESULT to VAL, taken unsigned if UNS is true and as signed
- otherwise. */
-
-static void
-mpz_set_double_int (mpz_t result, double_int val, bool uns)
-{
- bool negate = false;
- unsigned HOST_WIDE_INT vp[2];
-
- if (!uns && double_int_negative_p (val))
- {
- negate = true;
- val = double_int_neg (val);
- }
-
- vp[0] = val.low;
- vp[1] = (unsigned HOST_WIDE_INT) val.high;
- mpz_import (result, 2, -1, sizeof (HOST_WIDE_INT), 0, 0, vp);
-
- if (negate)
- mpz_neg (result, result);
-}
-
-/* Stores bounds of TYPE to MIN and MAX. */
-
-static void
-get_type_bounds (tree type, mpz_t min, mpz_t max)
-{
- if (TYPE_UNSIGNED (type))
- {
- mpz_set_ui (min, 0);
- mpz_set_double_int (max, double_int_mask (TYPE_PRECISION (type)), true);
- }
- else
- {
- double_int mx, mn;
-
- mx = double_int_mask (TYPE_PRECISION (type) - 1);
- mn = double_int_sext (double_int_add (mx, double_int_one),
- TYPE_PRECISION (type));
- mpz_set_double_int (max, mx, true);
- mpz_set_double_int (min, mn, false);
- }
-}
-
-/* Returns VAL converted to TYPE. If VAL does not fit in TYPE,
- the minimum or maximum value of the type is returned instead. */
-
-static double_int
-mpz_to_double_int (tree type, mpz_t val)
-{
- mpz_t min, max;
- unsigned HOST_WIDE_INT vp[2];
- bool negate = false;
- size_t count;
- double_int res;
-
- mpz_init (min);
- mpz_init (max);
- get_type_bounds (type, min, max);
-
- if (mpz_cmp (val, min) < 0)
- mpz_set (val, min);
- else if (mpz_cmp (val, max) > 0)
- mpz_set (val, max);
-
- if (mpz_sgn (val) < 0)
- negate = true;
-
- vp[0] = 0;
- vp[1] = 0;
- mpz_export (vp, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, val);
- gcc_assert (count <= 2);
-
- mpz_clear (min);
- mpz_clear (max);
-
- res.low = vp[0];
- res.high = (HOST_WIDE_INT) vp[1];
-
- res = double_int_ext (res, TYPE_PRECISION (type), TYPE_UNSIGNED (type));
- if (negate)
- res = double_int_neg (res);
-
- return res;
-}
/* Splits expression EXPR to a variable part VAR and constant OFFSET. */
/* If the computation may wrap, we know nothing about the value, except for
the range of the type. */
- get_type_bounds (type, min, max);
+ get_type_static_bounds (type, min, max);
if (!nowrap_type_p (type))
return;
tree c0, enum tree_code cmp, tree c1,
bounds *bnds)
{
- tree varc0, varc1, tmp;
+ tree varc0, varc1, tmp, ctype;
mpz_t offc0, offc1, loffx, loffy, bnd;
bool lbound = false;
bool no_wrap = nowrap_type_p (type);
case LE_EXPR:
case GT_EXPR:
case GE_EXPR:
+ STRIP_SIGN_NOPS (c0);
+ STRIP_SIGN_NOPS (c1);
+ ctype = TREE_TYPE (c0);
+ if (!tree_ssa_useless_type_conversion_1 (ctype, type))
+ return;
+
break;
case EQ_EXPR:
/* We could derive quite precise information from EQ_EXPR, however, such
- a guard is unlikely to appear, so we do not bother with handling it.
- TODO. */
+ a guard is unlikely to appear, so we do not bother with handling
+ it. */
return;
case NE_EXPR:
- /* NE_EXPR comparisons do not contain much of useful information (except for
- special cases like comparing with the bounds of the type, TODO). */
+ /* NE_EXPR comparisons do not contain much of useful information, except for
+ special case of comparing with the bounds of the type. */
+ if (TREE_CODE (c1) != INTEGER_CST
+ || !INTEGRAL_TYPE_P (type))
+ return;
+
+ /* Ensure that the condition speaks about an expression in the same type
+ as X and Y. */
+ ctype = TREE_TYPE (c0);
+ if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
+ return;
+ c0 = fold_convert (type, c0);
+ c1 = fold_convert (type, c1);
+
+ if (TYPE_MIN_VALUE (type)
+ && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
+ {
+ cmp = GT_EXPR;
+ break;
+ }
+ if (TYPE_MAX_VALUE (type)
+ && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
+ {
+ cmp = LT_EXPR;
+ break;
+ }
+
return;
default:
return;
int cnt = 0;
edge e;
basic_block bb;
- tree cond, c0, c1, ctype;
+ tree cond, c0, c1;
enum tree_code cmp;
+ /* Get rid of unnecessary casts, but preserve the value of
+ the expressions. */
+ STRIP_SIGN_NOPS (x);
+ STRIP_SIGN_NOPS (y);
+
mpz_init (bnds->below);
mpz_init (bnds->up);
mpz_init (offx);
c0 = TREE_OPERAND (cond, 0);
cmp = TREE_CODE (cond);
c1 = TREE_OPERAND (cond, 1);
- ctype = TREE_TYPE (c0);
-
- if (!tree_ssa_useless_type_conversion_1 (ctype, type))
- continue;
if (e->flags & EDGE_FALSE_VALUE)
cmp = invert_tree_comparison (cmp, false);
mpz_init (max);
number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds);
- niter->max = mpz_to_double_int (niter_type, max);
+ niter->max = mpz_get_double_int (niter_type, max, false);
mpz_clear (max);
/* First the trivial cases -- when the step is 1. */
niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
iv1->base, iv0->base);
niter->niter = delta;
- niter->max = mpz_to_double_int (niter_type, bnds->up);
+ niter->max = mpz_get_double_int (niter_type, bnds->up, false);
return true;
}
mpz_add (tmp, bnds->up, mstep);
mpz_sub_ui (tmp, tmp, 1);
mpz_fdiv_q (tmp, tmp, mstep);
- niter->max = mpz_to_double_int (niter_type, tmp);
+ niter->max = mpz_get_double_int (niter_type, tmp, false);
mpz_clear (mstep);
mpz_clear (tmp);
}
}
+/* Records that every statement in LOOP is executed I_BOUND times.
+ REALISTIC is true if I_BOUND is expected to be close the the real number
+ of iterations. UPPER is true if we are sure the loop iterates at most
+ I_BOUND times. */
+
+static void
+record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
+ bool upper)
+{
+ /* Update the bounds only when there is no previous estimation, or when the current
+ estimation is smaller. */
+ if (upper
+ && (!loop->any_upper_bound
+ || double_int_ucmp (i_bound, loop->nb_iterations_upper_bound) < 0))
+ {
+ loop->any_upper_bound = true;
+ loop->nb_iterations_upper_bound = i_bound;
+ }
+ if (realistic
+ && (!loop->any_estimate
+ || double_int_ucmp (i_bound, loop->nb_iterations_estimate) < 0))
+ {
+ loop->any_estimate = true;
+ loop->nb_iterations_estimate = i_bound;
+ }
+}
+
/* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
is true if the loop is exited immediately after STMT, and this exit
is taken at last when the STMT is executed BOUND + 1 times.
- REALISTIC is true if the estimate comes from a reliable source
- (number of iterations analysis, or size of data accessed in the loop).
- I_BOUND is an unsigned double_int upper estimate on BOUND. */
+ REALISTIC is true if BOUND is expected to be close the the real number
+ of iterations. UPPER is true if we are sure the loop iterates at most
+ BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
static void
record_estimate (struct loop *loop, tree bound, double_int i_bound,
- tree at_stmt, bool is_exit, bool realistic)
+ tree at_stmt, bool is_exit, bool realistic, bool upper)
{
- struct nb_iter_bound *elt = xmalloc (sizeof (struct nb_iter_bound));
+ double_int delta;
+ edge exit;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
print_generic_expr (dump_file, at_stmt, TDF_SLIM);
- fprintf (dump_file, " is executed at most ");
+ fprintf (dump_file, " is %sexecuted at most ",
+ upper ? "" : "probably ");
print_generic_expr (dump_file, bound, TDF_SLIM);
fprintf (dump_file, " (bounded by ");
dump_double_int (dump_file, i_bound, true);
fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
}
- elt->bound = i_bound;
- elt->stmt = at_stmt;
- elt->is_exit = is_exit;
- elt->realistic = realistic && TREE_CODE (bound) == INTEGER_CST;
- elt->next = loop->bounds;
- loop->bounds = elt;
+ /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
+ real number of iterations. */
+ if (TREE_CODE (bound) != INTEGER_CST)
+ realistic = false;
+ if (!upper && !realistic)
+ return;
+
+ /* If we have a guaranteed upper bound, record it in the appropriate
+ list. */
+ if (upper)
+ {
+ struct nb_iter_bound *elt = XNEW (struct nb_iter_bound);
+
+ elt->bound = i_bound;
+ elt->stmt = at_stmt;
+ elt->is_exit = is_exit;
+ elt->next = loop->bounds;
+ loop->bounds = elt;
+ }
+
+ /* Update the number of iteration estimates according to the bound.
+ If at_stmt is an exit, then every statement in the loop is
+ executed at most BOUND + 1 times. If it is not an exit, then
+ some of the statements before it could be executed BOUND + 2
+ times, if an exit of LOOP is before stmt. */
+ exit = single_exit (loop);
+ if (is_exit
+ || (exit != NULL
+ && dominated_by_p (CDI_DOMINATORS,
+ exit->src, bb_for_stmt (at_stmt))))
+ delta = double_int_one;
+ else
+ delta = double_int_two;
+ i_bound = double_int_add (i_bound, delta);
+
+ /* If an overflow occurred, ignore the result. */
+ if (double_int_ucmp (i_bound, delta) < 0)
+ return;
+
+ record_niter_bound (loop, i_bound, realistic, upper);
}
/* Record the estimate on number of iterations of LOOP based on the fact that
the induction variable BASE + STEP * i evaluated in STMT does not wrap and
- its values belong to the range <LOW, HIGH>. DATA_SIZE_BOUNDS_P is true if
- LOW and HIGH are derived from the size of data. */
+ its values belong to the range <LOW, HIGH>. REALISTIC is true if the
+ estimated number of iterations is expected to be close to the real one.
+ UPPER is true if we are sure the induction variable does not wrap. */
static void
record_nonwrapping_iv (struct loop *loop, tree base, tree step, tree stmt,
- tree low, tree high, bool data_size_bounds_p)
+ tree low, tree high, bool realistic, bool upper)
{
tree niter_bound, extreme, delta;
tree type = TREE_TYPE (base), unsigned_type;
would get out of the range. */
niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
max = derive_constant_upper_bound (niter_bound);
- record_estimate (loop, niter_bound, max, stmt, false, data_size_bounds_p);
-}
-
-/* Initialize LOOP->ESTIMATED_NB_ITERATIONS with the lowest safe
- approximation of the number of iterations for LOOP. */
-
-static void
-compute_estimated_nb_iterations (struct loop *loop)
-{
- struct nb_iter_bound *bound;
- double_int bnd_val, delta;
- edge exit;
-
- gcc_assert (loop->estimate_state == EST_NOT_AVAILABLE);
-
- for (bound = loop->bounds; bound; bound = bound->next)
- {
- if (!bound->realistic)
- continue;
-
- bnd_val = bound->bound;
- /* If bound->stmt is an exit, then every statement in the loop is
- executed at most BND_VAL + 1 times. If it is not an exit, then
- some of the statements before it could be executed BND_VAL + 2
- times, if an exit of LOOP is before stmt. */
- exit = single_exit (loop);
-
- if (bound->is_exit
- || (exit != NULL
- && dominated_by_p (CDI_DOMINATORS,
- exit->src, bb_for_stmt (bound->stmt))))
- delta = double_int_one;
- else
- delta = double_int_two;
- bnd_val = double_int_add (bnd_val, delta);
-
- /* If an overflow occured, ignore the result. */
- if (double_int_ucmp (bnd_val, delta) < 0)
- continue;
-
- /* Update only when there is no previous estimation, or when the current
- estimation is smaller. */
- if (loop->estimate_state == EST_NOT_AVAILABLE
- || double_int_ucmp (bnd_val, loop->estimated_nb_iterations) < 0)
- {
- loop->estimate_state = EST_AVAILABLE;
- loop->estimated_nb_iterations = bnd_val;
- }
- }
+ record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
}
/* Returns true if REF is a reference to an array at the end of a dynamically
}
/* Determine information about number of iterations a LOOP from the index
- IDX of a data reference accessed in STMT. Callback for for_each_index. */
+ IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
+ guaranteed to be executed in every iteration of LOOP. Callback for
+ for_each_index. */
struct ilb_data
{
struct loop *loop;
tree stmt;
+ bool reliable;
};
static bool
struct ilb_data *data = dta;
tree ev, init, step;
tree low, high, type, next;
- bool sign;
+ bool sign, upper = data->reliable, at_end = false;
struct loop *loop = data->loop;
- if (TREE_CODE (base) != ARRAY_REF
- || array_at_struct_end_p (base))
+ if (TREE_CODE (base) != ARRAY_REF)
return true;
+ /* For arrays at the end of the structure, we are not guaranteed that they
+ do not really extend over their declared size. However, for arrays of
+ size greater than one, this is unlikely to be intended. */
+ if (array_at_struct_end_p (base))
+ {
+ at_end = true;
+ upper = false;
+ }
+
ev = instantiate_parameters (loop, analyze_scalar_evolution (loop, *idx));
init = initial_condition (ev);
step = evolution_part_in_loop_num (ev, loop->num);
return true;
sign = tree_int_cst_sign_bit (step);
type = TREE_TYPE (step);
-
+
+ /* The array of length 1 at the end of a structure most likely extends
+ beyond its bounds. */
+ if (at_end
+ && operand_equal_p (low, high, 0))
+ return true;
+
/* In case the relevant bound of the array does not fit in type, or
it does, but bound + step (in type) still belongs into the range of the
array, the index may wrap and still stay within the range of the array
&& tree_int_cst_compare (next, high) <= 0)
return true;
- record_nonwrapping_iv (loop, init, step, data->stmt, low, high, true);
+ record_nonwrapping_iv (loop, init, step, data->stmt, low, high, true, upper);
return true;
}
/* Determine information about number of iterations a LOOP from the bounds
- of arrays in the data reference REF accessed in STMT. */
+ of arrays in the data reference REF accessed in STMT. RELIABLE is true if
+ STMT is guaranteed to be executed in every iteration of LOOP.*/
static void
-infer_loop_bounds_from_ref (struct loop *loop, tree stmt, tree ref)
+infer_loop_bounds_from_ref (struct loop *loop, tree stmt, tree ref,
+ bool reliable)
{
struct ilb_data data;
data.loop = loop;
data.stmt = stmt;
+ data.reliable = reliable;
for_each_index (&ref, idx_infer_loop_bounds, &data);
}
/* Determine information about number of iterations of a LOOP from the way
- arrays are used in STMT. */
+ arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
+ executed in every iteration of LOOP. */
static void
-infer_loop_bounds_from_array (struct loop *loop, tree stmt)
+infer_loop_bounds_from_array (struct loop *loop, tree stmt, bool reliable)
{
tree call;
/* For each memory access, analyze its access function
and record a bound on the loop iteration domain. */
if (REFERENCE_CLASS_P (op0))
- infer_loop_bounds_from_ref (loop, stmt, op0);
+ infer_loop_bounds_from_ref (loop, stmt, op0, reliable);
if (REFERENCE_CLASS_P (op1))
- infer_loop_bounds_from_ref (loop, stmt, op1);
+ infer_loop_bounds_from_ref (loop, stmt, op1, reliable);
}
FOR_EACH_CALL_EXPR_ARG (arg, iter, call)
if (REFERENCE_CLASS_P (arg))
- infer_loop_bounds_from_ref (loop, stmt, arg);
+ infer_loop_bounds_from_ref (loop, stmt, arg, reliable);
}
}
low = lower_bound_in_type (type, type);
high = upper_bound_in_type (type, type);
- record_nonwrapping_iv (loop, base, step, stmt, low, high, false);
+ record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
}
/* The following analyzers are extracting informations on the bounds
basic_block *bbs;
block_stmt_iterator bsi;
basic_block bb;
+ bool reliable;
bbs = get_loop_body (loop);
bb = bbs[i];
/* If BB is not executed in each iteration of the loop, we cannot
- use it to infer any information about # of iterations of the loop. */
- if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
- continue;
+ use the operations in it to infer reliable upper bound on the
+ # of iterations of the loop. However, we can use it as a guess. */
+ reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
{
tree stmt = bsi_stmt (bsi);
- infer_loop_bounds_from_array (loop, stmt);
- infer_loop_bounds_from_signedness (loop, stmt);
+ infer_loop_bounds_from_array (loop, stmt, reliable);
+
+ if (reliable)
+ infer_loop_bounds_from_signedness (loop, stmt);
}
}
free (bbs);
}
+/* Converts VAL to double_int. */
+
+static double_int
+gcov_type_to_double_int (gcov_type val)
+{
+ double_int ret;
+
+ ret.low = (unsigned HOST_WIDE_INT) val;
+ /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
+ the size of type. */
+ val >>= HOST_BITS_PER_WIDE_INT - 1;
+ val >>= 1;
+ ret.high = (unsigned HOST_WIDE_INT) val;
+
+ return ret;
+}
+
/* Records estimates on numbers of iterations of LOOP. */
void
unsigned i;
struct tree_niter_desc niter_desc;
edge ex;
+ double_int bound;
/* Give up if we already have tried to compute an estimation. */
if (loop->estimate_state != EST_NOT_COMPUTED)
return;
- loop->estimate_state = EST_NOT_AVAILABLE;
+ loop->estimate_state = EST_AVAILABLE;
+ loop->any_upper_bound = false;
+ loop->any_estimate = false;
exits = get_loop_exit_edges (loop);
for (i = 0; VEC_iterate (edge, exits, i, ex); i++)
niter);
record_estimate (loop, niter, niter_desc.max,
last_stmt (ex->src),
- true, true);
+ true, true, true);
}
VEC_free (edge, heap, exits);
infer_loop_bounds_from_undefined (loop);
- compute_estimated_nb_iterations (loop);
+
+ /* If we have a measured profile, use it to estimate the number of
+ iterations. */
+ if (loop->header->count != 0)
+ {
+ gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
+ bound = gcov_type_to_double_int (nit);
+ record_niter_bound (loop, bound, true, false);
+ }
+
+ /* If an upper bound is smaller than the realistic estimate of the
+ number of iterations, use the upper bound instead. */
+ if (loop->any_upper_bound
+ && loop->any_estimate
+ && double_int_ucmp (loop->nb_iterations_upper_bound,
+ loop->nb_iterations_estimate) < 0)
+ loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
}
/* Records estimates on numbers of iterations of loops. */