+ acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
+ if (acnd && integer_zerop (acnd))
+ {
+ fold_undefer_and_ignore_overflow_warnings ();
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "Proved that loop %d iterates %d times using brute force.\n",
+ loop->num, i);
+ return build_int_cst (unsigned_type_node, i);
+ }
+
+ for (j = 0; j < 2; j++)
+ {
+ val[j] = get_val_for (next[j], val[j]);
+ if (!is_gimple_min_invariant (val[j]))
+ {
+ fold_undefer_and_ignore_overflow_warnings ();
+ return chrec_dont_know;
+ }
+ }
+ }
+
+ fold_undefer_and_ignore_overflow_warnings ();
+
+ return chrec_dont_know;
+}
+
+/* Finds the exit of the LOOP by that the loop exits after a constant
+ number of iterations and stores the exit edge to *EXIT. The constant
+ giving the number of iterations of LOOP is returned. The number of
+ iterations is determined using loop_niter_by_eval (i.e. by brute force
+ evaluation). If we are unable to find the exit for that loop_niter_by_eval
+ determines the number of iterations, chrec_dont_know is returned. */
+
+tree
+find_loop_niter_by_eval (struct loop *loop, edge *exit)
+{
+ unsigned i;
+ VEC (edge, heap) *exits = get_loop_exit_edges (loop);
+ edge ex;
+ tree niter = NULL_TREE, aniter;
+
+ *exit = NULL;
+
+ /* Loops with multiple exits are expensive to handle and less important. */
+ if (!flag_expensive_optimizations
+ && VEC_length (edge, exits) > 1)
+ return chrec_dont_know;
+
+ FOR_EACH_VEC_ELT (edge, exits, i, ex)
+ {
+ if (!just_once_each_iteration_p (loop, ex->src))
+ continue;
+
+ aniter = loop_niter_by_eval (loop, ex);
+ if (chrec_contains_undetermined (aniter))
+ continue;
+
+ if (niter
+ && !tree_int_cst_lt (aniter, niter))
+ continue;
+
+ niter = aniter;
+ *exit = ex;
+ }
+ VEC_free (edge, heap, exits);
+
+ return niter ? niter : chrec_dont_know;
+}
+
+/*
+
+ Analysis of upper bounds on number of iterations of a loop.
+
+*/
+
+static double_int derive_constant_upper_bound_ops (tree, tree,
+ enum tree_code, tree);
+
+/* Returns a constant upper bound on the value of the right-hand side of
+ an assignment statement STMT. */
+
+static double_int
+derive_constant_upper_bound_assign (gimple stmt)
+{
+ enum tree_code code = gimple_assign_rhs_code (stmt);
+ tree op0 = gimple_assign_rhs1 (stmt);
+ tree op1 = gimple_assign_rhs2 (stmt);
+
+ return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
+ op0, code, op1);
+}
+
+/* Returns a constant upper bound on the value of expression VAL. VAL
+ is considered to be unsigned. If its type is signed, its value must
+ be nonnegative. */
+
+static double_int
+derive_constant_upper_bound (tree val)
+{
+ enum tree_code code;
+ tree op0, op1;
+
+ extract_ops_from_tree (val, &code, &op0, &op1);
+ return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
+}
+
+/* Returns a constant upper bound on the value of expression OP0 CODE OP1,
+ whose type is TYPE. The expression is considered to be unsigned. If
+ its type is signed, its value must be nonnegative. */
+
+static double_int
+derive_constant_upper_bound_ops (tree type, tree op0,
+ enum tree_code code, tree op1)
+{
+ tree subtype, maxt;
+ double_int bnd, max, mmax, cst;
+ gimple stmt;
+
+ if (INTEGRAL_TYPE_P (type))
+ maxt = TYPE_MAX_VALUE (type);
+ else
+ maxt = upper_bound_in_type (type, type);
+
+ max = tree_to_double_int (maxt);
+
+ switch (code)
+ {
+ case INTEGER_CST:
+ return tree_to_double_int (op0);
+
+ CASE_CONVERT:
+ subtype = TREE_TYPE (op0);
+ if (!TYPE_UNSIGNED (subtype)
+ /* If TYPE is also signed, the fact that VAL is nonnegative implies
+ that OP0 is nonnegative. */
+ && TYPE_UNSIGNED (type)
+ && !tree_expr_nonnegative_p (op0))
+ {
+ /* If we cannot prove that the casted expression is nonnegative,
+ we cannot establish more useful upper bound than the precision
+ of the type gives us. */
+ return max;
+ }
+
+ /* We now know that op0 is an nonnegative value. Try deriving an upper
+ bound for it. */
+ bnd = derive_constant_upper_bound (op0);
+
+ /* If the bound does not fit in TYPE, max. value of TYPE could be
+ attained. */
+ if (double_int_ucmp (max, bnd) < 0)
+ return max;
+
+ return bnd;
+
+ case PLUS_EXPR:
+ case POINTER_PLUS_EXPR:
+ case MINUS_EXPR:
+ if (TREE_CODE (op1) != INTEGER_CST
+ || !tree_expr_nonnegative_p (op0))
+ return max;
+
+ /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
+ choose the most logical way how to treat this constant regardless
+ of the signedness of the type. */
+ cst = tree_to_double_int (op1);
+ cst = double_int_sext (cst, TYPE_PRECISION (type));
+ if (code != MINUS_EXPR)
+ cst = double_int_neg (cst);
+
+ bnd = derive_constant_upper_bound (op0);
+
+ if (double_int_negative_p (cst))
+ {
+ cst = double_int_neg (cst);
+ /* Avoid CST == 0x80000... */
+ if (double_int_negative_p (cst))
+ return max;;
+
+ /* OP0 + CST. We need to check that
+ BND <= MAX (type) - CST. */
+
+ mmax = double_int_sub (max, cst);
+ if (double_int_ucmp (bnd, mmax) > 0)
+ return max;
+
+ return double_int_add (bnd, cst);
+ }
+ else
+ {
+ /* OP0 - CST, where CST >= 0.
+
+ If TYPE is signed, we have already verified that OP0 >= 0, and we
+ know that the result is nonnegative. This implies that
+ VAL <= BND - CST.
+
+ If TYPE is unsigned, we must additionally know that OP0 >= CST,
+ otherwise the operation underflows.
+ */
+
+ /* This should only happen if the type is unsigned; however, for
+ buggy programs that use overflowing signed arithmetics even with
+ -fno-wrapv, this condition may also be true for signed values. */
+ if (double_int_ucmp (bnd, cst) < 0)
+ return max;
+
+ if (TYPE_UNSIGNED (type))
+ {
+ tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
+ double_int_to_tree (type, cst));
+ if (!tem || integer_nonzerop (tem))
+ return max;
+ }
+
+ bnd = double_int_sub (bnd, cst);
+ }
+
+ return bnd;
+
+ case FLOOR_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ if (TREE_CODE (op1) != INTEGER_CST
+ || tree_int_cst_sign_bit (op1))
+ return max;
+
+ bnd = derive_constant_upper_bound (op0);
+ return double_int_udiv (bnd, tree_to_double_int (op1), FLOOR_DIV_EXPR);
+
+ case BIT_AND_EXPR:
+ if (TREE_CODE (op1) != INTEGER_CST
+ || tree_int_cst_sign_bit (op1))
+ return max;
+ return tree_to_double_int (op1);
+
+ case SSA_NAME:
+ stmt = SSA_NAME_DEF_STMT (op0);
+ if (gimple_code (stmt) != GIMPLE_ASSIGN
+ || gimple_assign_lhs (stmt) != op0)
+ return max;
+ return derive_constant_upper_bound_assign (stmt);
+
+ default:
+ return max;
+ }
+}
+
+/* Records that every statement in LOOP is executed I_BOUND times.
+ REALISTIC is true if I_BOUND is expected to be close to the real number
+ of iterations. UPPER is true if we are sure the loop iterates at most
+ I_BOUND times. */
+
+static void
+record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
+ bool upper)
+{
+ /* Update the bounds only when there is no previous estimation, or when the current
+ estimation is smaller. */
+ if (upper
+ && (!loop->any_upper_bound
+ || double_int_ucmp (i_bound, loop->nb_iterations_upper_bound) < 0))
+ {
+ loop->any_upper_bound = true;
+ loop->nb_iterations_upper_bound = i_bound;
+ }
+ if (realistic
+ && (!loop->any_estimate
+ || double_int_ucmp (i_bound, loop->nb_iterations_estimate) < 0))
+ {
+ loop->any_estimate = true;
+ loop->nb_iterations_estimate = i_bound;
+ }
+}
+
+/* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
+ is true if the loop is exited immediately after STMT, and this exit
+ is taken at last when the STMT is executed BOUND + 1 times.
+ REALISTIC is true if BOUND is expected to be close to the real number
+ of iterations. UPPER is true if we are sure the loop iterates at most
+ BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
+
+static void
+record_estimate (struct loop *loop, tree bound, double_int i_bound,
+ gimple at_stmt, bool is_exit, bool realistic, bool upper)
+{
+ double_int delta;
+ edge exit;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
+ print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
+ fprintf (dump_file, " is %sexecuted at most ",
+ upper ? "" : "probably ");
+ print_generic_expr (dump_file, bound, TDF_SLIM);
+ fprintf (dump_file, " (bounded by ");
+ dump_double_int (dump_file, i_bound, true);
+ fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
+ }
+
+ /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
+ real number of iterations. */
+ if (TREE_CODE (bound) != INTEGER_CST)
+ realistic = false;
+ if (!upper && !realistic)
+ return;
+
+ /* If we have a guaranteed upper bound, record it in the appropriate
+ list. */
+ if (upper)
+ {
+ struct nb_iter_bound *elt = ggc_alloc_nb_iter_bound ();
+
+ elt->bound = i_bound;
+ elt->stmt = at_stmt;
+ elt->is_exit = is_exit;
+ elt->next = loop->bounds;
+ loop->bounds = elt;
+ }
+
+ /* Update the number of iteration estimates according to the bound.
+ If at_stmt is an exit or dominates the single exit from the loop,
+ then the loop latch is executed at most BOUND times, otherwise
+ it can be executed BOUND + 1 times. */
+ exit = single_exit (loop);
+ if (is_exit
+ || (exit != NULL
+ && dominated_by_p (CDI_DOMINATORS,
+ exit->src, gimple_bb (at_stmt))))
+ delta = double_int_zero;
+ else
+ delta = double_int_one;
+ i_bound = double_int_add (i_bound, delta);
+
+ /* If an overflow occurred, ignore the result. */
+ if (double_int_ucmp (i_bound, delta) < 0)
+ return;
+
+ record_niter_bound (loop, i_bound, realistic, upper);
+}
+
+/* Record the estimate on number of iterations of LOOP based on the fact that
+ the induction variable BASE + STEP * i evaluated in STMT does not wrap and
+ its values belong to the range <LOW, HIGH>. REALISTIC is true if the
+ estimated number of iterations is expected to be close to the real one.
+ UPPER is true if we are sure the induction variable does not wrap. */
+
+static void
+record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
+ tree low, tree high, bool realistic, bool upper)
+{
+ tree niter_bound, extreme, delta;
+ tree type = TREE_TYPE (base), unsigned_type;
+ double_int max;
+
+ if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
+ return;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Induction variable (");
+ print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
+ fprintf (dump_file, ") ");
+ print_generic_expr (dump_file, base, TDF_SLIM);
+ fprintf (dump_file, " + ");
+ print_generic_expr (dump_file, step, TDF_SLIM);
+ fprintf (dump_file, " * iteration does not wrap in statement ");
+ print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
+ fprintf (dump_file, " in loop %d.\n", loop->num);
+ }
+
+ unsigned_type = unsigned_type_for (type);
+ base = fold_convert (unsigned_type, base);
+ step = fold_convert (unsigned_type, step);
+
+ if (tree_int_cst_sign_bit (step))
+ {
+ extreme = fold_convert (unsigned_type, low);
+ if (TREE_CODE (base) != INTEGER_CST)
+ base = fold_convert (unsigned_type, high);
+ delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
+ step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
+ }
+ else
+ {
+ extreme = fold_convert (unsigned_type, high);
+ if (TREE_CODE (base) != INTEGER_CST)
+ base = fold_convert (unsigned_type, low);
+ delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
+ }
+
+ /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
+ would get out of the range. */
+ niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
+ max = derive_constant_upper_bound (niter_bound);
+ record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
+}
+
+/* Returns true if REF is a reference to an array at the end of a dynamically
+ allocated structure. If this is the case, the array may be allocated larger
+ than its upper bound implies. */
+
+bool
+array_at_struct_end_p (tree ref)
+{
+ tree base = get_base_address (ref);
+ tree parent, field;
+
+ /* Unless the reference is through a pointer, the size of the array matches
+ its declaration. */
+ if (!base || (!INDIRECT_REF_P (base) && TREE_CODE (base) != MEM_REF))
+ return false;
+
+ for (;handled_component_p (ref); ref = parent)
+ {
+ parent = TREE_OPERAND (ref, 0);
+
+ if (TREE_CODE (ref) == COMPONENT_REF)
+ {
+ /* All fields of a union are at its end. */
+ if (TREE_CODE (TREE_TYPE (parent)) == UNION_TYPE)
+ continue;
+
+ /* Unless the field is at the end of the struct, we are done. */
+ field = TREE_OPERAND (ref, 1);
+ if (DECL_CHAIN (field))
+ return false;
+ }
+
+ /* The other options are ARRAY_REF, ARRAY_RANGE_REF, VIEW_CONVERT_EXPR.
+ In all these cases, we might be accessing the last element, and
+ although in practice this will probably never happen, it is legal for
+ the indices of this last element to exceed the bounds of the array.
+ Therefore, continue checking. */
+ }
+
+ return true;
+}
+
+/* Determine information about number of iterations a LOOP from the index
+ IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
+ guaranteed to be executed in every iteration of LOOP. Callback for
+ for_each_index. */
+
+struct ilb_data
+{
+ struct loop *loop;
+ gimple stmt;
+ bool reliable;
+};
+
+static bool
+idx_infer_loop_bounds (tree base, tree *idx, void *dta)
+{
+ struct ilb_data *data = (struct ilb_data *) dta;
+ tree ev, init, step;
+ tree low, high, type, next;
+ bool sign, upper = data->reliable, at_end = false;
+ struct loop *loop = data->loop;
+
+ if (TREE_CODE (base) != ARRAY_REF)
+ return true;
+
+ /* For arrays at the end of the structure, we are not guaranteed that they
+ do not really extend over their declared size. However, for arrays of
+ size greater than one, this is unlikely to be intended. */
+ if (array_at_struct_end_p (base))
+ {
+ at_end = true;
+ upper = false;
+ }
+
+ ev = instantiate_parameters (loop, analyze_scalar_evolution (loop, *idx));
+ init = initial_condition (ev);
+ step = evolution_part_in_loop_num (ev, loop->num);
+
+ if (!init
+ || !step
+ || TREE_CODE (step) != INTEGER_CST
+ || integer_zerop (step)
+ || tree_contains_chrecs (init, NULL)
+ || chrec_contains_symbols_defined_in_loop (init, loop->num))
+ return true;
+
+ low = array_ref_low_bound (base);
+ high = array_ref_up_bound (base);
+
+ /* The case of nonconstant bounds could be handled, but it would be
+ complicated. */
+ if (TREE_CODE (low) != INTEGER_CST
+ || !high
+ || TREE_CODE (high) != INTEGER_CST)
+ return true;
+ sign = tree_int_cst_sign_bit (step);
+ type = TREE_TYPE (step);
+
+ /* The array of length 1 at the end of a structure most likely extends
+ beyond its bounds. */
+ if (at_end
+ && operand_equal_p (low, high, 0))
+ return true;
+
+ /* In case the relevant bound of the array does not fit in type, or
+ it does, but bound + step (in type) still belongs into the range of the
+ array, the index may wrap and still stay within the range of the array
+ (consider e.g. if the array is indexed by the full range of
+ unsigned char).
+
+ To make things simpler, we require both bounds to fit into type, although
+ there are cases where this would not be strictly necessary. */
+ if (!int_fits_type_p (high, type)
+ || !int_fits_type_p (low, type))
+ return true;
+ low = fold_convert (type, low);
+ high = fold_convert (type, high);
+
+ if (sign)
+ next = fold_binary (PLUS_EXPR, type, low, step);
+ else
+ next = fold_binary (PLUS_EXPR, type, high, step);
+
+ if (tree_int_cst_compare (low, next) <= 0
+ && tree_int_cst_compare (next, high) <= 0)
+ return true;
+
+ record_nonwrapping_iv (loop, init, step, data->stmt, low, high, true, upper);
+ return true;
+}
+
+/* Determine information about number of iterations a LOOP from the bounds
+ of arrays in the data reference REF accessed in STMT. RELIABLE is true if
+ STMT is guaranteed to be executed in every iteration of LOOP.*/
+
+static void
+infer_loop_bounds_from_ref (struct loop *loop, gimple stmt, tree ref,
+ bool reliable)
+{
+ struct ilb_data data;
+
+ data.loop = loop;
+ data.stmt = stmt;
+ data.reliable = reliable;
+ for_each_index (&ref, idx_infer_loop_bounds, &data);
+}
+
+/* Determine information about number of iterations of a LOOP from the way
+ arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
+ executed in every iteration of LOOP. */
+
+static void
+infer_loop_bounds_from_array (struct loop *loop, gimple stmt, bool reliable)
+{
+ if (is_gimple_assign (stmt))
+ {
+ tree op0 = gimple_assign_lhs (stmt);
+ tree op1 = gimple_assign_rhs1 (stmt);
+
+ /* For each memory access, analyze its access function
+ and record a bound on the loop iteration domain. */
+ if (REFERENCE_CLASS_P (op0))
+ infer_loop_bounds_from_ref (loop, stmt, op0, reliable);
+
+ if (REFERENCE_CLASS_P (op1))
+ infer_loop_bounds_from_ref (loop, stmt, op1, reliable);
+ }
+ else if (is_gimple_call (stmt))
+ {
+ tree arg, lhs;
+ unsigned i, n = gimple_call_num_args (stmt);
+
+ lhs = gimple_call_lhs (stmt);
+ if (lhs && REFERENCE_CLASS_P (lhs))
+ infer_loop_bounds_from_ref (loop, stmt, lhs, reliable);
+
+ for (i = 0; i < n; i++)