1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004, 2005 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "coretypes.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
31 #include "diagnostic.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
36 #include "tree-pass.h"
38 #include "tree-chrec.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-data-ref.h"
44 #include "tree-inline.h"
46 #define SWAP(X, Y) do { void *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
51 Analysis of number of iterations of an affine exit test.
55 /* Returns true if ARG is either NULL_TREE or constant zero. Unlike
56 integer_zerop, it does not care about overflow flags. */
64 if (TREE_CODE (arg) != INTEGER_CST)
67 return (TREE_INT_CST_LOW (arg) == 0 && TREE_INT_CST_HIGH (arg) == 0);
70 /* Returns true if ARG a nonzero constant. Unlike integer_nonzerop, it does
71 not care about overflow flags. */
79 if (TREE_CODE (arg) != INTEGER_CST)
82 return (TREE_INT_CST_LOW (arg) != 0 || TREE_INT_CST_HIGH (arg) != 0);
85 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
88 inverse (tree x, tree mask)
90 tree type = TREE_TYPE (x);
92 unsigned ctr = tree_floor_log2 (mask);
94 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
96 unsigned HOST_WIDE_INT ix;
97 unsigned HOST_WIDE_INT imask;
98 unsigned HOST_WIDE_INT irslt = 1;
100 gcc_assert (cst_and_fits_in_hwi (x));
101 gcc_assert (cst_and_fits_in_hwi (mask));
103 ix = int_cst_value (x);
104 imask = int_cst_value (mask);
113 rslt = build_int_cst_type (type, irslt);
117 rslt = build_int_cst_type (type, 1);
120 rslt = int_const_binop (MULT_EXPR, rslt, x, 0);
121 x = int_const_binop (MULT_EXPR, x, x, 0);
123 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask, 0);
129 /* Determine the number of iterations according to condition (for staying
130 inside loop) which compares two induction variables using comparison
131 operator CODE. The induction variable on left side of the comparison
132 has base BASE0 and step STEP0. the right-hand side one has base
133 BASE1 and step STEP1. Both induction variables must have type TYPE,
134 which must be an integer or pointer type. STEP0 and STEP1 must be
135 constants (or NULL_TREE, which is interpreted as constant zero).
137 The results (number of iterations and assumptions as described in
138 comments at struct tree_niter_desc in tree-flow.h) are stored to NITER.
139 In case we are unable to determine number of iterations, contents of
140 this structure is unchanged. */
143 number_of_iterations_cond (tree type, tree base0, tree step0,
144 enum tree_code code, tree base1, tree step1,
145 struct tree_niter_desc *niter)
147 tree step, delta, mmin, mmax;
148 tree may_xform, bound, s, d, tmp;
149 bool was_sharp = false;
151 tree assumptions = boolean_true_node;
152 tree noloop_assumptions = boolean_false_node;
153 tree niter_type, signed_niter_type;
156 /* The meaning of these assumptions is this:
158 then the rest of information does not have to be valid
159 if noloop_assumptions then the loop does not have to roll
160 (but it is only conservative approximation, i.e. it only says that
161 if !noloop_assumptions, then the loop does not end before the computed
162 number of iterations) */
164 /* Make < comparison from > ones. */
170 code = swap_tree_comparison (code);
173 /* We can handle the case when neither of the sides of the comparison is
174 invariant, provided that the test is NE_EXPR. This rarely occurs in
175 practice, but it is simple enough to manage. */
176 if (!zero_p (step0) && !zero_p (step1))
181 step0 = fold_binary_to_constant (MINUS_EXPR, type, step0, step1);
185 /* If the result is a constant, the loop is weird. More precise handling
186 would be possible, but the situation is not common enough to waste time
188 if (zero_p (step0) && zero_p (step1))
191 /* Ignore loops of while (i-- < 10) type. */
194 if (step0 && tree_int_cst_sign_bit (step0))
197 if (!zero_p (step1) && !tree_int_cst_sign_bit (step1))
201 if (POINTER_TYPE_P (type))
203 /* We assume pointer arithmetic never overflows. */
204 mmin = mmax = NULL_TREE;
208 mmin = TYPE_MIN_VALUE (type);
209 mmax = TYPE_MAX_VALUE (type);
212 /* Some more condition normalization. We must record some assumptions
217 /* We want to take care only of <=; this is easy,
218 as in cases the overflow would make the transformation unsafe the loop
219 does not roll. Seemingly it would make more sense to want to take
220 care of <, as NE is more similar to it, but the problem is that here
221 the transformation would be more difficult due to possibly infinite
226 assumption = fold_build2 (EQ_EXPR, boolean_type_node, base0, mmax);
228 assumption = boolean_false_node;
229 if (nonzero_p (assumption))
231 base0 = fold_build2 (PLUS_EXPR, type, base0,
232 build_int_cst_type (type, 1));
237 assumption = fold_build2 (EQ_EXPR, boolean_type_node, base1, mmin);
239 assumption = boolean_false_node;
240 if (nonzero_p (assumption))
242 base1 = fold_build2 (MINUS_EXPR, type, base1,
243 build_int_cst_type (type, 1));
245 noloop_assumptions = assumption;
248 /* It will be useful to be able to tell the difference once more in
249 <= -> != reduction. */
253 /* Take care of trivially infinite loops. */
258 && operand_equal_p (base0, mmin, 0))
262 && operand_equal_p (base1, mmax, 0))
266 /* If we can we want to take care of NE conditions instead of size
267 comparisons, as they are much more friendly (most importantly
268 this takes care of special handling of loops with step 1). We can
269 do it if we first check that upper bound is greater or equal to
270 lower bound, their difference is constant c modulo step and that
271 there is not an overflow. */
275 step = fold_unary_to_constant (NEGATE_EXPR, type, step1);
278 delta = fold_build2 (MINUS_EXPR, type, base1, base0);
279 delta = fold_build2 (FLOOR_MOD_EXPR, type, delta, step);
280 may_xform = boolean_false_node;
282 if (TREE_CODE (delta) == INTEGER_CST)
284 tmp = fold_binary_to_constant (MINUS_EXPR, type, step,
285 build_int_cst_type (type, 1));
287 && operand_equal_p (delta, tmp, 0))
289 /* A special case. We have transformed condition of type
290 for (i = 0; i < 4; i += 4)
292 for (i = 0; i <= 3; i += 4)
293 obviously if the test for overflow during that transformation
294 passed, we cannot overflow here. Most importantly any
295 loop with sharp end condition and step 1 falls into this
296 category, so handling this case specially is definitely
297 worth the troubles. */
298 may_xform = boolean_true_node;
300 else if (zero_p (step0))
303 may_xform = boolean_true_node;
306 bound = fold_binary_to_constant (PLUS_EXPR, type,
308 bound = fold_binary_to_constant (MINUS_EXPR, type,
310 may_xform = fold_build2 (LE_EXPR, boolean_type_node,
317 may_xform = boolean_true_node;
320 bound = fold_binary_to_constant (MINUS_EXPR, type,
322 bound = fold_binary_to_constant (PLUS_EXPR, type,
324 may_xform = fold_build2 (LE_EXPR, boolean_type_node,
330 if (!zero_p (may_xform))
332 /* We perform the transformation always provided that it is not
333 completely senseless. This is OK, as we would need this assumption
334 to determine the number of iterations anyway. */
335 if (!nonzero_p (may_xform))
336 assumptions = may_xform;
340 base0 = fold_build2 (PLUS_EXPR, type, base0, delta);
341 base0 = fold_build2 (MINUS_EXPR, type, base0, step);
345 base1 = fold_build2 (MINUS_EXPR, type, base1, delta);
346 base1 = fold_build2 (PLUS_EXPR, type, base1, step);
349 assumption = fold_build2 (GT_EXPR, boolean_type_node, base0, base1);
350 noloop_assumptions = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
351 noloop_assumptions, assumption);
356 /* Count the number of iterations. */
357 niter_type = unsigned_type_for (type);
358 signed_niter_type = signed_type_for (type);
362 /* Everything we do here is just arithmetics modulo size of mode. This
363 makes us able to do more involved computations of number of iterations
364 than in other cases. First transform the condition into shape
365 s * i <> c, with s positive. */
366 base1 = fold_build2 (MINUS_EXPR, type, base1, base0);
369 step0 = fold_unary_to_constant (NEGATE_EXPR, type, step1);
371 if (tree_int_cst_sign_bit (fold_convert (signed_niter_type, step0)))
373 step0 = fold_unary_to_constant (NEGATE_EXPR, type, step0);
374 base1 = fold_build1 (NEGATE_EXPR, type, base1);
377 base1 = fold_convert (niter_type, base1);
378 step0 = fold_convert (niter_type, step0);
380 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
381 is infinite. Otherwise, the number of iterations is
382 (inverse(s/d) * (c/d)) mod (size of mode/d). */
383 bits = num_ending_zeros (step0);
384 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
385 build_int_cst_type (niter_type, 1), bits);
386 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, step0, bits);
388 bound = build_low_bits_mask (niter_type,
389 (TYPE_PRECISION (niter_type)
390 - tree_low_cst (bits, 1)));
392 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, base1, d);
393 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
395 build_int_cst (niter_type, 0));
396 assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
397 assumptions, assumption);
399 tmp = fold_build2 (EXACT_DIV_EXPR, niter_type, base1, d);
400 tmp = fold_build2 (MULT_EXPR, niter_type, tmp, inverse (s, bound));
401 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
406 /* Condition in shape a + s * i <= b
407 We must know that b + s does not overflow and a <= b + s and then we
408 can compute number of iterations as (b + s - a) / s. (It might
409 seem that we in fact could be more clever about testing the b + s
410 overflow condition using some information about b - a mod s,
411 but it was already taken into account during LE -> NE transform). */
415 bound = fold_binary_to_constant (MINUS_EXPR, type, mmax, step0);
416 assumption = fold_build2 (LE_EXPR, boolean_type_node,
418 assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
419 assumptions, assumption);
423 tmp = fold_build2 (PLUS_EXPR, type, base1, step0);
424 assumption = fold_build2 (GT_EXPR, boolean_type_node, base0, tmp);
425 delta = fold_build2 (PLUS_EXPR, type, base1, step);
426 delta = fold_build2 (MINUS_EXPR, type, delta, base0);
427 delta = fold_convert (niter_type, delta);
431 /* Condition in shape a <= b - s * i
432 We must know that a - s does not overflow and a - s <= b and then
433 we can again compute number of iterations as (b - (a - s)) / s. */
436 bound = fold_binary_to_constant (MINUS_EXPR, type, mmin, step1);
437 assumption = fold_build2 (LE_EXPR, boolean_type_node,
439 assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
440 assumptions, assumption);
442 step = fold_build1 (NEGATE_EXPR, type, step1);
443 tmp = fold_build2 (PLUS_EXPR, type, base0, step1);
444 assumption = fold_build2 (GT_EXPR, boolean_type_node, tmp, base1);
445 delta = fold_build2 (MINUS_EXPR, type, base0, step);
446 delta = fold_build2 (MINUS_EXPR, type, base1, delta);
447 delta = fold_convert (niter_type, delta);
449 noloop_assumptions = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
450 noloop_assumptions, assumption);
451 delta = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta,
452 fold_convert (niter_type, step));
453 niter->niter = delta;
456 niter->assumptions = assumptions;
457 niter->may_be_zero = noloop_assumptions;
461 niter->assumptions = boolean_true_node;
462 niter->may_be_zero = boolean_true_node;
463 niter->niter = build_int_cst_type (type, 0);
468 /* Similar to number_of_iterations_cond, but only handles the special
469 case of loops with step 1 or -1. The meaning of the arguments
470 is the same as in number_of_iterations_cond. The function
471 returns true if the special case was recognized, false otherwise. */
474 number_of_iterations_special (tree type, tree base0, tree step0,
475 enum tree_code code, tree base1, tree step1,
476 struct tree_niter_desc *niter)
478 tree niter_type = unsigned_type_for (type), mmax, mmin;
480 /* Make < comparison from > ones. */
486 code = swap_tree_comparison (code);
499 else if (!zero_p (step1))
502 if (integer_onep (step0))
504 /* for (i = base0; i != base1; i++) */
505 niter->assumptions = boolean_true_node;
506 niter->may_be_zero = boolean_false_node;
507 niter->niter = fold_build2 (MINUS_EXPR, type, base1, base0);
508 niter->additional_info = boolean_true_node;
510 else if (integer_all_onesp (step0))
512 /* for (i = base0; i != base1; i--) */
513 niter->assumptions = boolean_true_node;
514 niter->may_be_zero = boolean_false_node;
515 niter->niter = fold_build2 (MINUS_EXPR, type, base0, base1);
523 if ((step0 && integer_onep (step0) && zero_p (step1))
524 || (step1 && integer_all_onesp (step1) && zero_p (step0)))
526 /* for (i = base0; i < base1; i++)
530 for (i = base1; i > base0; i--).
532 In both cases # of iterations is base1 - base0. */
534 niter->assumptions = boolean_true_node;
535 niter->may_be_zero = fold_build2 (GT_EXPR, boolean_type_node,
537 niter->niter = fold_build2 (MINUS_EXPR, type, base1, base0);
544 if (POINTER_TYPE_P (type))
546 /* We assume pointer arithmetic never overflows. */
547 mmin = mmax = NULL_TREE;
551 mmin = TYPE_MIN_VALUE (type);
552 mmax = TYPE_MAX_VALUE (type);
555 if (step0 && integer_onep (step0) && zero_p (step1))
557 /* for (i = base0; i <= base1; i++) */
559 niter->assumptions = fold_build2 (NE_EXPR, boolean_type_node,
562 niter->assumptions = boolean_true_node;
563 base1 = fold_build2 (PLUS_EXPR, type, base1,
564 build_int_cst_type (type, 1));
566 else if (step1 && integer_all_onesp (step1) && zero_p (step0))
568 /* for (i = base1; i >= base0; i--) */
570 niter->assumptions = fold_build2 (NE_EXPR, boolean_type_node,
573 niter->assumptions = boolean_true_node;
574 base0 = fold_build2 (MINUS_EXPR, type, base0,
575 build_int_cst_type (type, 1));
580 niter->may_be_zero = fold_build2 (GT_EXPR, boolean_type_node,
582 niter->niter = fold_build2 (MINUS_EXPR, type, base1, base0);
589 niter->niter = fold_convert (niter_type, niter->niter);
590 niter->additional_info = boolean_true_node;
594 /* Substitute NEW for OLD in EXPR and fold the result. */
597 simplify_replace_tree (tree expr, tree old, tree new)
600 tree ret = NULL_TREE, e, se;
606 || operand_equal_p (expr, old, 0))
607 return unshare_expr (new);
612 n = TREE_CODE_LENGTH (TREE_CODE (expr));
613 for (i = 0; i < n; i++)
615 e = TREE_OPERAND (expr, i);
616 se = simplify_replace_tree (e, old, new);
621 ret = copy_node (expr);
623 TREE_OPERAND (ret, i) = se;
626 return (ret ? fold (ret) : expr);
629 /* Expand definitions of ssa names in EXPR as long as they are simple
630 enough, and return the new expression. */
633 expand_simple_operations (tree expr)
636 tree ret = NULL_TREE, e, ee, stmt;
639 if (expr == NULL_TREE)
642 if (is_gimple_min_invariant (expr))
645 code = TREE_CODE (expr);
646 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
648 n = TREE_CODE_LENGTH (code);
649 for (i = 0; i < n; i++)
651 e = TREE_OPERAND (expr, i);
652 ee = expand_simple_operations (e);
657 ret = copy_node (expr);
659 TREE_OPERAND (ret, i) = ee;
662 return (ret ? fold (ret) : expr);
665 if (TREE_CODE (expr) != SSA_NAME)
668 stmt = SSA_NAME_DEF_STMT (expr);
669 if (TREE_CODE (stmt) != MODIFY_EXPR)
672 e = TREE_OPERAND (stmt, 1);
673 if (/* Casts are simple. */
674 TREE_CODE (e) != NOP_EXPR
675 && TREE_CODE (e) != CONVERT_EXPR
676 /* Copies are simple. */
677 && TREE_CODE (e) != SSA_NAME
678 /* Assignments of invariants are simple. */
679 && !is_gimple_min_invariant (e)
680 /* And increments and decrements by a constant are simple. */
681 && !((TREE_CODE (e) == PLUS_EXPR
682 || TREE_CODE (e) == MINUS_EXPR)
683 && is_gimple_min_invariant (TREE_OPERAND (e, 1))))
686 return expand_simple_operations (e);
689 /* Tries to simplify EXPR using the condition COND. Returns the simplified
690 expression (or EXPR unchanged, if no simplification was possible). */
693 tree_simplify_using_condition_1 (tree cond, tree expr)
696 tree e, te, e0, e1, e2, notcond;
697 enum tree_code code = TREE_CODE (expr);
699 if (code == INTEGER_CST)
702 if (code == TRUTH_OR_EXPR
703 || code == TRUTH_AND_EXPR
704 || code == COND_EXPR)
708 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
709 if (TREE_OPERAND (expr, 0) != e0)
712 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
713 if (TREE_OPERAND (expr, 1) != e1)
716 if (code == COND_EXPR)
718 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
719 if (TREE_OPERAND (expr, 2) != e2)
727 if (code == COND_EXPR)
728 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
730 expr = fold_build2 (code, boolean_type_node, e0, e1);
736 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
737 propagation, and vice versa. Fold does not handle this, since it is
738 considered too expensive. */
739 if (TREE_CODE (cond) == EQ_EXPR)
741 e0 = TREE_OPERAND (cond, 0);
742 e1 = TREE_OPERAND (cond, 1);
744 /* We know that e0 == e1. Check whether we cannot simplify expr
746 e = simplify_replace_tree (expr, e0, e1);
747 if (zero_p (e) || nonzero_p (e))
750 e = simplify_replace_tree (expr, e1, e0);
751 if (zero_p (e) || nonzero_p (e))
754 if (TREE_CODE (expr) == EQ_EXPR)
756 e0 = TREE_OPERAND (expr, 0);
757 e1 = TREE_OPERAND (expr, 1);
759 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
760 e = simplify_replace_tree (cond, e0, e1);
763 e = simplify_replace_tree (cond, e1, e0);
767 if (TREE_CODE (expr) == NE_EXPR)
769 e0 = TREE_OPERAND (expr, 0);
770 e1 = TREE_OPERAND (expr, 1);
772 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
773 e = simplify_replace_tree (cond, e0, e1);
775 return boolean_true_node;
776 e = simplify_replace_tree (cond, e1, e0);
778 return boolean_true_node;
781 te = expand_simple_operations (expr);
783 /* Check whether COND ==> EXPR. */
784 notcond = invert_truthvalue (cond);
785 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
789 /* Check whether COND ==> not EXPR. */
790 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
797 /* Tries to simplify EXPR using the condition COND. Returns the simplified
798 expression (or EXPR unchanged, if no simplification was possible).
799 Wrapper around tree_simplify_using_condition_1 that ensures that chains
800 of simple operations in definitions of ssa names in COND are expanded,
801 so that things like casts or incrementing the value of the bound before
802 the loop do not cause us to fail. */
805 tree_simplify_using_condition (tree cond, tree expr)
807 cond = expand_simple_operations (cond);
809 return tree_simplify_using_condition_1 (cond, expr);
812 /* Tries to simplify EXPR using the conditions on entry to LOOP.
813 Record the conditions used for simplification to CONDS_USED.
814 Returns the simplified expression (or EXPR unchanged, if no
815 simplification was possible).*/
818 simplify_using_initial_conditions (struct loop *loop, tree expr,
825 if (TREE_CODE (expr) == INTEGER_CST)
828 for (bb = loop->header;
829 bb != ENTRY_BLOCK_PTR;
830 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
832 if (!single_pred_p (bb))
834 e = single_pred_edge (bb);
836 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
839 cond = COND_EXPR_COND (last_stmt (e->src));
840 if (e->flags & EDGE_FALSE_VALUE)
841 cond = invert_truthvalue (cond);
842 exp = tree_simplify_using_condition (cond, expr);
845 *conds_used = fold_build2 (TRUTH_AND_EXPR,
856 /* Tries to simplify EXPR using the evolutions of the loop invariants
857 in the superloops of LOOP. Returns the simplified expression
858 (or EXPR unchanged, if no simplification was possible). */
861 simplify_using_outer_evolutions (struct loop *loop, tree expr)
863 enum tree_code code = TREE_CODE (expr);
867 if (is_gimple_min_invariant (expr))
870 if (code == TRUTH_OR_EXPR
871 || code == TRUTH_AND_EXPR
872 || code == COND_EXPR)
876 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
877 if (TREE_OPERAND (expr, 0) != e0)
880 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
881 if (TREE_OPERAND (expr, 1) != e1)
884 if (code == COND_EXPR)
886 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
887 if (TREE_OPERAND (expr, 2) != e2)
895 if (code == COND_EXPR)
896 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
898 expr = fold_build2 (code, boolean_type_node, e0, e1);
904 e = instantiate_parameters (loop, expr);
905 if (is_gimple_min_invariant (e))
911 /* Stores description of number of iterations of LOOP derived from
912 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
913 useful information could be derived (and fields of NITER has
914 meaning described in comments at struct tree_niter_desc
915 declaration), false otherwise. If WARN is true and
916 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
917 potentially unsafe assumptions. */
920 number_of_iterations_exit (struct loop *loop, edge exit,
921 struct tree_niter_desc *niter,
924 tree stmt, cond, type;
925 tree op0, base0, step0;
926 tree op1, base1, step1;
929 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
932 niter->assumptions = boolean_false_node;
933 stmt = last_stmt (exit->src);
934 if (!stmt || TREE_CODE (stmt) != COND_EXPR)
937 /* We want the condition for staying inside loop. */
938 cond = COND_EXPR_COND (stmt);
939 if (exit->flags & EDGE_TRUE_VALUE)
940 cond = invert_truthvalue (cond);
942 code = TREE_CODE (cond);
956 op0 = TREE_OPERAND (cond, 0);
957 op1 = TREE_OPERAND (cond, 1);
958 type = TREE_TYPE (op0);
960 if (TREE_CODE (type) != INTEGER_TYPE
961 && !POINTER_TYPE_P (type))
964 if (!simple_iv (loop, stmt, op0, &base0, &step0, false))
966 if (!simple_iv (loop, stmt, op1, &base1, &step1, false))
969 niter->niter = NULL_TREE;
971 /* Handle common special cases first, so that we do not need to use
972 generic (and slow) analysis very often. */
973 if (!number_of_iterations_special (type, base0, step0, code, base1, step1,
977 number_of_iterations_cond (type, base0, step0, code, base1, step1,
986 niter->assumptions = simplify_using_outer_evolutions (loop,
988 niter->may_be_zero = simplify_using_outer_evolutions (loop,
990 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
993 niter->additional_info = boolean_true_node;
995 = simplify_using_initial_conditions (loop,
997 &niter->additional_info);
999 = simplify_using_initial_conditions (loop,
1001 &niter->additional_info);
1003 if (integer_onep (niter->assumptions))
1006 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
1007 But if we can prove that there is overflow or some other source of weird
1008 behavior, ignore the loop even with -funsafe-loop-optimizations. */
1009 if (integer_zerop (niter->assumptions))
1012 if (flag_unsafe_loop_optimizations)
1013 niter->assumptions = boolean_true_node;
1017 const char *wording;
1018 location_t loc = EXPR_LOCATION (stmt);
1020 /* We can provide a more specific warning if one of the operator is
1021 constant and the other advances by +1 or -1. */
1022 if (step1 ? !step0 && (integer_onep (step1) || integer_all_onesp (step1))
1023 : step0 && (integer_onep (step0) || integer_all_onesp (step0)))
1025 flag_unsafe_loop_optimizations
1026 ? N_("assuming that the loop is not infinite")
1027 : N_("cannot optimize possibly infinite loops");
1030 flag_unsafe_loop_optimizations
1031 ? N_("assuming that the loop counter does not overflow")
1032 : N_("cannot optimize loop, the loop counter may overflow");
1034 if (LOCATION_LINE (loc) > 0)
1035 warning (OPT_Wunsafe_loop_optimizations, "%H%s", &loc, gettext (wording));
1037 warning (OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
1040 return flag_unsafe_loop_optimizations;
1043 /* Try to determine the number of iterations of LOOP. If we succeed,
1044 expression giving number of iterations is returned and *EXIT is
1045 set to the edge from that the information is obtained. Otherwise
1046 chrec_dont_know is returned. */
1049 find_loop_niter (struct loop *loop, edge *exit)
1051 unsigned n_exits, i;
1052 edge *exits = get_loop_exit_edges (loop, &n_exits);
1054 tree niter = NULL_TREE, aniter;
1055 struct tree_niter_desc desc;
1058 for (i = 0; i < n_exits; i++)
1061 if (!just_once_each_iteration_p (loop, ex->src))
1064 if (!number_of_iterations_exit (loop, ex, &desc, false))
1067 if (nonzero_p (desc.may_be_zero))
1069 /* We exit in the first iteration through this exit.
1070 We won't find anything better. */
1071 niter = build_int_cst_type (unsigned_type_node, 0);
1076 if (!zero_p (desc.may_be_zero))
1079 aniter = desc.niter;
1083 /* Nothing recorded yet. */
1089 /* Prefer constants, the lower the better. */
1090 if (TREE_CODE (aniter) != INTEGER_CST)
1093 if (TREE_CODE (niter) != INTEGER_CST)
1100 if (tree_int_cst_lt (aniter, niter))
1109 return niter ? niter : chrec_dont_know;
1114 Analysis of a number of iterations of a loop by a brute-force evaluation.
1118 /* Bound on the number of iterations we try to evaluate. */
1120 #define MAX_ITERATIONS_TO_TRACK \
1121 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
1123 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
1124 result by a chain of operations such that all but exactly one of their
1125 operands are constants. */
1128 chain_of_csts_start (struct loop *loop, tree x)
1130 tree stmt = SSA_NAME_DEF_STMT (x);
1132 basic_block bb = bb_for_stmt (stmt);
1135 || !flow_bb_inside_loop_p (loop, bb))
1138 if (TREE_CODE (stmt) == PHI_NODE)
1140 if (bb == loop->header)
1146 if (TREE_CODE (stmt) != MODIFY_EXPR)
1149 if (!ZERO_SSA_OPERANDS (stmt, SSA_OP_ALL_VIRTUALS))
1151 if (SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF) == NULL_DEF_OPERAND_P)
1154 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
1155 if (use == NULL_USE_OPERAND_P)
1158 return chain_of_csts_start (loop, use);
1161 /* Determines whether the expression X is derived from a result of a phi node
1162 in header of LOOP such that
1164 * the derivation of X consists only from operations with constants
1165 * the initial value of the phi node is constant
1166 * the value of the phi node in the next iteration can be derived from the
1167 value in the current iteration by a chain of operations with constants.
1169 If such phi node exists, it is returned. If X is a constant, X is returned
1170 unchanged. Otherwise NULL_TREE is returned. */
1173 get_base_for (struct loop *loop, tree x)
1175 tree phi, init, next;
1177 if (is_gimple_min_invariant (x))
1180 phi = chain_of_csts_start (loop, x);
1184 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1185 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
1187 if (TREE_CODE (next) != SSA_NAME)
1190 if (!is_gimple_min_invariant (init))
1193 if (chain_of_csts_start (loop, next) != phi)
1199 /* Given an expression X, then
1201 * if BASE is NULL_TREE, X must be a constant and we return X.
1202 * otherwise X is a SSA name, whose value in the considered loop is derived
1203 by a chain of operations with constant from a result of a phi node in
1204 the header of the loop. Then we return value of X when the value of the
1205 result of this phi node is given by the constant BASE. */
1208 get_val_for (tree x, tree base)
1217 stmt = SSA_NAME_DEF_STMT (x);
1218 if (TREE_CODE (stmt) == PHI_NODE)
1221 FOR_EACH_SSA_USE_OPERAND (op, stmt, iter, SSA_OP_USE)
1223 nx = USE_FROM_PTR (op);
1224 val = get_val_for (nx, base);
1226 val = fold (TREE_OPERAND (stmt, 1));
1228 /* only iterate loop once. */
1232 /* Should never reach here. */
1236 /* Tries to count the number of iterations of LOOP till it exits by EXIT
1237 by brute force -- i.e. by determining the value of the operands of the
1238 condition at EXIT in first few iterations of the loop (assuming that
1239 these values are constant) and determining the first one in that the
1240 condition is not satisfied. Returns the constant giving the number
1241 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
1244 loop_niter_by_eval (struct loop *loop, edge exit)
1246 tree cond, cnd, acnd;
1247 tree op[2], val[2], next[2], aval[2], phi[2];
1251 cond = last_stmt (exit->src);
1252 if (!cond || TREE_CODE (cond) != COND_EXPR)
1253 return chrec_dont_know;
1255 cnd = COND_EXPR_COND (cond);
1256 if (exit->flags & EDGE_TRUE_VALUE)
1257 cnd = invert_truthvalue (cnd);
1259 cmp = TREE_CODE (cnd);
1268 for (j = 0; j < 2; j++)
1269 op[j] = TREE_OPERAND (cnd, j);
1273 return chrec_dont_know;
1276 for (j = 0; j < 2; j++)
1278 phi[j] = get_base_for (loop, op[j]);
1280 return chrec_dont_know;
1283 for (j = 0; j < 2; j++)
1285 if (TREE_CODE (phi[j]) == PHI_NODE)
1287 val[j] = PHI_ARG_DEF_FROM_EDGE (phi[j], loop_preheader_edge (loop));
1288 next[j] = PHI_ARG_DEF_FROM_EDGE (phi[j], loop_latch_edge (loop));
1293 next[j] = NULL_TREE;
1298 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
1300 for (j = 0; j < 2; j++)
1301 aval[j] = get_val_for (op[j], val[j]);
1303 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
1304 if (acnd && zero_p (acnd))
1306 if (dump_file && (dump_flags & TDF_DETAILS))
1308 "Proved that loop %d iterates %d times using brute force.\n",
1310 return build_int_cst (unsigned_type_node, i);
1313 for (j = 0; j < 2; j++)
1314 val[j] = get_val_for (next[j], val[j]);
1317 return chrec_dont_know;
1320 /* Finds the exit of the LOOP by that the loop exits after a constant
1321 number of iterations and stores the exit edge to *EXIT. The constant
1322 giving the number of iterations of LOOP is returned. The number of
1323 iterations is determined using loop_niter_by_eval (i.e. by brute force
1324 evaluation). If we are unable to find the exit for that loop_niter_by_eval
1325 determines the number of iterations, chrec_dont_know is returned. */
1328 find_loop_niter_by_eval (struct loop *loop, edge *exit)
1330 unsigned n_exits, i;
1331 edge *exits = get_loop_exit_edges (loop, &n_exits);
1333 tree niter = NULL_TREE, aniter;
1336 for (i = 0; i < n_exits; i++)
1339 if (!just_once_each_iteration_p (loop, ex->src))
1342 aniter = loop_niter_by_eval (loop, ex);
1343 if (chrec_contains_undetermined (aniter))
1347 && !tree_int_cst_lt (aniter, niter))
1355 return niter ? niter : chrec_dont_know;
1360 Analysis of upper bounds on number of iterations of a loop.
1364 /* Records that AT_STMT is executed at most BOUND times in LOOP. The
1365 additional condition ADDITIONAL is recorded with the bound. */
1368 record_estimate (struct loop *loop, tree bound, tree additional, tree at_stmt)
1370 struct nb_iter_bound *elt = xmalloc (sizeof (struct nb_iter_bound));
1372 if (dump_file && (dump_flags & TDF_DETAILS))
1374 fprintf (dump_file, "Statements after ");
1375 print_generic_expr (dump_file, at_stmt, TDF_SLIM);
1376 fprintf (dump_file, " are executed at most ");
1377 print_generic_expr (dump_file, bound, TDF_SLIM);
1378 fprintf (dump_file, " times in loop %d.\n", loop->num);
1382 elt->at_stmt = at_stmt;
1383 elt->additional = additional;
1384 elt->next = loop->bounds;
1388 /* Initialize LOOP->ESTIMATED_NB_ITERATIONS with the lowest safe
1389 approximation of the number of iterations for LOOP. */
1392 compute_estimated_nb_iterations (struct loop *loop)
1394 struct nb_iter_bound *bound;
1396 for (bound = loop->bounds; bound; bound = bound->next)
1397 if (TREE_CODE (bound->bound) == INTEGER_CST
1398 /* Update only when there is no previous estimation. */
1399 && (chrec_contains_undetermined (loop->estimated_nb_iterations)
1400 /* Or when the current estimation is smaller. */
1401 || tree_int_cst_lt (bound->bound, loop->estimated_nb_iterations)))
1402 loop->estimated_nb_iterations = bound->bound;
1405 /* The following analyzers are extracting informations on the bounds
1406 of LOOP from the following undefined behaviors:
1408 - data references should not access elements over the statically
1411 - signed variables should not overflow when flag_wrapv is not set.
1415 infer_loop_bounds_from_undefined (struct loop *loop)
1418 basic_block bb, *bbs;
1419 block_stmt_iterator bsi;
1421 bbs = get_loop_body (loop);
1423 for (i = 0; i < loop->num_nodes; i++)
1427 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
1429 tree stmt = bsi_stmt (bsi);
1431 switch (TREE_CODE (stmt))
1435 tree op0 = TREE_OPERAND (stmt, 0);
1436 tree op1 = TREE_OPERAND (stmt, 1);
1438 /* For each array access, analyze its access function
1439 and record a bound on the loop iteration domain. */
1440 if (TREE_CODE (op1) == ARRAY_REF)
1441 estimate_iters_using_array (stmt, op1);
1443 if (TREE_CODE (op0) == ARRAY_REF)
1444 estimate_iters_using_array (stmt, op0);
1446 /* For each signed type variable in LOOP, analyze its
1447 scalar evolution and record a bound of the loop
1448 based on the type's ranges. */
1449 else if (!flag_wrapv && TREE_CODE (op0) == SSA_NAME)
1451 tree init, step, diff, estimation;
1452 tree scev = instantiate_parameters
1453 (loop, analyze_scalar_evolution (loop, op0));
1454 tree type = chrec_type (scev);
1457 if (chrec_contains_undetermined (scev)
1458 || TYPE_UNSIGNED (type))
1461 init = initial_condition_in_loop_num (scev, loop->num);
1462 step = evolution_part_in_loop_num (scev, loop->num);
1464 if (init == NULL_TREE
1465 || step == NULL_TREE
1466 || TREE_CODE (init) != INTEGER_CST
1467 || TREE_CODE (step) != INTEGER_CST
1468 || TYPE_MIN_VALUE (type) == NULL_TREE
1469 || TYPE_MAX_VALUE (type) == NULL_TREE)
1472 utype = unsigned_type_for (type);
1473 if (tree_int_cst_lt (step, integer_zero_node))
1474 diff = fold_build2 (MINUS_EXPR, utype, init,
1475 TYPE_MIN_VALUE (type));
1477 diff = fold_build2 (MINUS_EXPR, utype,
1478 TYPE_MAX_VALUE (type), init);
1480 estimation = fold_build2 (CEIL_DIV_EXPR, utype, diff,
1482 record_estimate (loop, estimation, boolean_true_node, stmt);
1492 for (args = TREE_OPERAND (stmt, 1); args;
1493 args = TREE_CHAIN (args))
1494 if (TREE_CODE (TREE_VALUE (args)) == ARRAY_REF)
1495 estimate_iters_using_array (stmt, TREE_VALUE (args));
1505 if (chrec_contains_undetermined (loop->estimated_nb_iterations))
1506 compute_estimated_nb_iterations (loop);
1512 /* Records estimates on numbers of iterations of LOOP. */
1515 estimate_numbers_of_iterations_loop (struct loop *loop)
1519 unsigned i, n_exits;
1520 struct tree_niter_desc niter_desc;
1522 /* Give up if we already have tried to compute an estimation. */
1523 if (loop->estimated_nb_iterations == chrec_dont_know
1524 /* Or when we already have an estimation. */
1525 || (loop->estimated_nb_iterations != NULL_TREE
1526 && TREE_CODE (loop->estimated_nb_iterations) == INTEGER_CST))
1529 loop->estimated_nb_iterations = chrec_dont_know;
1531 exits = get_loop_exit_edges (loop, &n_exits);
1532 for (i = 0; i < n_exits; i++)
1534 if (!number_of_iterations_exit (loop, exits[i], &niter_desc, false))
1537 niter = niter_desc.niter;
1538 type = TREE_TYPE (niter);
1539 if (!zero_p (niter_desc.may_be_zero)
1540 && !nonzero_p (niter_desc.may_be_zero))
1541 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
1542 build_int_cst_type (type, 0),
1544 record_estimate (loop, niter,
1545 niter_desc.additional_info,
1546 last_stmt (exits[i]->src));
1550 if (chrec_contains_undetermined (loop->estimated_nb_iterations))
1551 infer_loop_bounds_from_undefined (loop);
1554 /* Records estimates on numbers of iterations of LOOPS. */
1557 estimate_numbers_of_iterations (struct loops *loops)
1562 for (i = 1; i < loops->num; i++)
1564 loop = loops->parray[i];
1566 estimate_numbers_of_iterations_loop (loop);
1570 /* If A > B, returns -1. If A == B, returns 0. If A < B, returns 1.
1571 If neither of these relations can be proved, returns 2. */
1574 compare_trees (tree a, tree b)
1576 tree typea = TREE_TYPE (a), typeb = TREE_TYPE (b);
1579 if (TYPE_PRECISION (typea) > TYPE_PRECISION (typeb))
1584 a = fold_convert (type, a);
1585 b = fold_convert (type, b);
1587 if (nonzero_p (fold_binary (EQ_EXPR, boolean_type_node, a, b)))
1589 if (nonzero_p (fold_binary (LT_EXPR, boolean_type_node, a, b)))
1591 if (nonzero_p (fold_binary (GT_EXPR, boolean_type_node, a, b)))
1597 /* Returns true if statement S1 dominates statement S2. */
1600 stmt_dominates_stmt_p (tree s1, tree s2)
1602 basic_block bb1 = bb_for_stmt (s1), bb2 = bb_for_stmt (s2);
1610 block_stmt_iterator bsi;
1612 for (bsi = bsi_start (bb1); bsi_stmt (bsi) != s2; bsi_next (&bsi))
1613 if (bsi_stmt (bsi) == s1)
1619 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
1622 /* Return true when it is possible to prove that the induction
1623 variable does not wrap: vary outside the type specified bounds.
1624 Checks whether BOUND < VALID_NITER that means in the context of iv
1625 conversion that all the iterations in the loop are safe: not
1628 The statement NITER_BOUND->AT_STMT is executed at most
1629 NITER_BOUND->BOUND times in the loop.
1631 NITER_BOUND->ADDITIONAL is the additional condition recorded for
1632 operands of the bound. This is useful in the following case,
1633 created by loop header copying:
1642 If the n > 0 condition is taken into account, the number of iterations of the
1643 loop can be expressed as n - 1. If the type of n is signed, the ADDITIONAL
1644 assumption "n > 0" says us that the value of the number of iterations is at
1645 most MAX_TYPE - 1 (without this assumption, it might overflow). */
1648 proved_non_wrapping_p (tree at_stmt,
1649 struct nb_iter_bound *niter_bound,
1654 tree bound = niter_bound->bound;
1657 if (TYPE_PRECISION (new_type) > TYPE_PRECISION (TREE_TYPE (bound)))
1658 bound = fold_convert (unsigned_type_for (new_type), bound);
1660 valid_niter = fold_convert (TREE_TYPE (bound), valid_niter);
1662 /* Give up if BOUND was not folded to an INTEGER_CST, as in PR23434. */
1663 if (TREE_CODE (bound) != INTEGER_CST)
1666 /* After the statement niter_bound->at_stmt we know that anything is
1667 executed at most BOUND times. */
1668 if (at_stmt && stmt_dominates_stmt_p (niter_bound->at_stmt, at_stmt))
1670 /* Before the statement niter_bound->at_stmt we know that anything
1671 is executed at most BOUND + 1 times. */
1675 cond = fold_binary (cmp, boolean_type_node, valid_niter, bound);
1676 if (nonzero_p (cond))
1679 cond = build2 (cmp, boolean_type_node, valid_niter, bound);
1680 /* Try taking additional conditions into account. */
1681 cond = fold_binary (TRUTH_OR_EXPR, boolean_type_node,
1682 invert_truthvalue (niter_bound->additional),
1685 if (nonzero_p (cond))
1691 /* Checks whether it is correct to count the induction variable BASE +
1692 STEP * I at AT_STMT in a wider type NEW_TYPE, using the bounds on
1693 numbers of iterations of a LOOP. If it is possible, return the
1694 value of step of the induction variable in the NEW_TYPE, otherwise
1695 return NULL_TREE. */
1698 convert_step_widening (struct loop *loop, tree new_type, tree base, tree step,
1701 struct nb_iter_bound *bound;
1702 tree base_in_new_type, base_plus_step_in_new_type, step_in_new_type;
1703 tree delta, step_abs;
1704 tree unsigned_type, valid_niter;
1706 /* Compute the new step. For example, {(uchar) 100, +, (uchar) 240}
1707 is converted to {(uint) 100, +, (uint) 0xfffffff0} in order to
1708 keep the values of the induction variable unchanged: 100, 84, 68,
1711 Another example is: (uint) {(uchar)100, +, (uchar)3} is converted
1712 to {(uint)100, +, (uint)3}.
1714 Before returning the new step, verify that the number of
1715 iterations is less than DELTA / STEP_ABS (i.e. in the previous
1716 example (256 - 100) / 3) such that the iv does not wrap (in which
1717 case the operations are too difficult to be represented and
1718 handled: the values of the iv should be taken modulo 256 in the
1719 wider type; this is not implemented). */
1720 base_in_new_type = fold_convert (new_type, base);
1721 base_plus_step_in_new_type =
1722 fold_convert (new_type,
1723 fold_build2 (PLUS_EXPR, TREE_TYPE (base), base, step));
1724 step_in_new_type = fold_build2 (MINUS_EXPR, new_type,
1725 base_plus_step_in_new_type,
1728 if (TREE_CODE (step_in_new_type) != INTEGER_CST)
1731 switch (compare_trees (base_plus_step_in_new_type, base_in_new_type))
1735 tree extreme = upper_bound_in_type (new_type, TREE_TYPE (base));
1736 delta = fold_build2 (MINUS_EXPR, new_type, extreme,
1738 step_abs = step_in_new_type;
1744 tree extreme = lower_bound_in_type (new_type, TREE_TYPE (base));
1745 delta = fold_build2 (MINUS_EXPR, new_type, base_in_new_type,
1747 step_abs = fold_build1 (NEGATE_EXPR, new_type, step_in_new_type);
1752 return step_in_new_type;
1758 unsigned_type = unsigned_type_for (new_type);
1759 delta = fold_convert (unsigned_type, delta);
1760 step_abs = fold_convert (unsigned_type, step_abs);
1761 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type,
1764 estimate_numbers_of_iterations_loop (loop);
1765 for (bound = loop->bounds; bound; bound = bound->next)
1766 if (proved_non_wrapping_p (at_stmt, bound, new_type, valid_niter))
1767 return step_in_new_type;
1769 /* Fail when the loop has no bound estimations, or when no bound can
1770 be used for verifying the conversion. */
1774 /* Returns true when VAR is used in pointer arithmetics. DEPTH is
1775 used for limiting the search. */
1778 used_in_pointer_arithmetic_p (tree var, int depth)
1780 use_operand_p use_p;
1781 imm_use_iterator iter;
1784 || TREE_CODE (var) != SSA_NAME
1785 || !has_single_use (var))
1788 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
1790 tree stmt = USE_STMT (use_p);
1792 if (stmt && TREE_CODE (stmt) == MODIFY_EXPR)
1794 tree rhs = TREE_OPERAND (stmt, 1);
1796 if (TREE_CODE (rhs) == NOP_EXPR
1797 || TREE_CODE (rhs) == CONVERT_EXPR)
1799 if (POINTER_TYPE_P (TREE_TYPE (rhs)))
1804 return used_in_pointer_arithmetic_p (TREE_OPERAND (stmt, 0),
1811 /* Return false only when the induction variable BASE + STEP * I is
1812 known to not overflow: i.e. when the number of iterations is small
1813 enough with respect to the step and initial condition in order to
1814 keep the evolution confined in TYPEs bounds. Return true when the
1815 iv is known to overflow or when the property is not computable.
1817 Initialize INIT_IS_MAX to true when the evolution goes from
1818 INIT_IS_MAX to LOWER_BOUND_IN_TYPE, false in the contrary case.
1819 When this property cannot be determined, UNKNOWN_MAX is set to
1823 scev_probably_wraps_p (tree type, tree base, tree step,
1824 tree at_stmt, struct loop *loop,
1825 bool *init_is_max, bool *unknown_max)
1827 struct nb_iter_bound *bound;
1828 tree delta, step_abs;
1829 tree unsigned_type, valid_niter;
1830 tree base_plus_step, bpsps;
1833 /* FIXME: The following code will not be used anymore once
1834 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html is
1837 If AT_STMT is a cast to unsigned that is later used for
1838 referencing a memory location, it is followed by a pointer
1839 conversion just after. Because pointers do not wrap, the
1840 sequences that reference the memory do not wrap either. In the
1841 following example, sequences corresponding to D_13 and to D_14
1842 can be proved to not wrap because they are used for computing a
1845 D.1621_13 = (long unsigned intD.4) D.1620_12;
1846 D.1622_14 = D.1621_13 * 8;
1847 D.1623_15 = (doubleD.29 *) D.1622_14;
1849 if (at_stmt && TREE_CODE (at_stmt) == MODIFY_EXPR)
1851 tree op0 = TREE_OPERAND (at_stmt, 0);
1852 tree op1 = TREE_OPERAND (at_stmt, 1);
1853 tree type_op1 = TREE_TYPE (op1);
1855 if ((TYPE_UNSIGNED (type_op1)
1856 && used_in_pointer_arithmetic_p (op0, 2))
1857 || POINTER_TYPE_P (type_op1))
1859 *unknown_max = true;
1864 if (chrec_contains_undetermined (base)
1865 || chrec_contains_undetermined (step)
1866 || TREE_CODE (base) == REAL_CST
1867 || TREE_CODE (step) == REAL_CST)
1869 *unknown_max = true;
1873 *unknown_max = false;
1874 base_plus_step = fold_build2 (PLUS_EXPR, type, base, step);
1875 bpsps = fold_build2 (PLUS_EXPR, type, base_plus_step, step);
1876 cps = compare_trees (base_plus_step, base);
1877 cpsps = compare_trees (bpsps, base_plus_step);
1879 /* Check that the sequence is not wrapping in the first step: it
1880 should have the same monotonicity for the first two steps. See
1889 tree extreme = upper_bound_in_type (type, TREE_TYPE (base));
1890 delta = fold_build2 (MINUS_EXPR, type, extreme, base);
1892 *init_is_max = false;
1898 tree extreme = lower_bound_in_type (type, TREE_TYPE (base));
1899 delta = fold_build2 (MINUS_EXPR, type, base, extreme);
1900 step_abs = fold_build1 (NEGATE_EXPR, type, step);
1901 *init_is_max = true;
1906 /* This means step is equal to 0. This should not happen. It
1907 could happen in convert step, but not here. Safely answer
1908 don't know as in the default case. */
1911 *unknown_max = true;
1915 /* If AT_STMT represents a cast operation, we may not be able to
1916 take advantage of the undefinedness of signed type evolutions.
1918 implement-c.texi states: "For conversion to a type of width
1919 N, the value is reduced modulo 2^N to be within range of the
1922 See PR 21959 for a test case. Essentially, given a cast
1927 sc = (signed char) uc;
1931 where uc and sc have the scev {0, +, 1}, we would consider uc to
1932 wrap around, but not sc, because it is of a signed type. This
1933 causes VRP to erroneously fold the predicate above because it
1934 thinks that sc cannot be negative. */
1935 if (at_stmt && TREE_CODE (at_stmt) == MODIFY_EXPR)
1937 tree rhs = TREE_OPERAND (at_stmt, 1);
1938 tree outer_t = TREE_TYPE (rhs);
1940 if (!TYPE_UNSIGNED (outer_t)
1941 && (TREE_CODE (rhs) == NOP_EXPR || TREE_CODE (rhs) == CONVERT_EXPR))
1943 tree inner_t = TREE_TYPE (TREE_OPERAND (rhs, 0));
1945 /* If the inner type is unsigned and its size and/or
1946 precision are smaller to that of the outer type, then the
1947 expression may wrap around. */
1948 if (TYPE_UNSIGNED (inner_t)
1949 && (TYPE_SIZE (inner_t) <= TYPE_SIZE (outer_t)
1950 || TYPE_PRECISION (inner_t) <= TYPE_PRECISION (outer_t)))
1952 *unknown_max = true;
1958 /* After having set INIT_IS_MAX, we can return false: when not using
1959 wrapping arithmetic, signed types don't wrap. */
1960 if (!flag_wrapv && !TYPE_UNSIGNED (type))
1963 unsigned_type = unsigned_type_for (type);
1964 delta = fold_convert (unsigned_type, delta);
1965 step_abs = fold_convert (unsigned_type, step_abs);
1966 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
1968 estimate_numbers_of_iterations_loop (loop);
1969 for (bound = loop->bounds; bound; bound = bound->next)
1970 if (proved_non_wrapping_p (at_stmt, bound, type, valid_niter))
1973 /* At this point we still don't have a proof that the iv does not
1974 overflow: give up. */
1975 *unknown_max = true;
1979 /* Return the conversion to NEW_TYPE of the STEP of an induction
1980 variable BASE + STEP * I at AT_STMT. When it fails, return
1984 convert_step (struct loop *loop, tree new_type, tree base, tree step,
1989 if (chrec_contains_undetermined (base)
1990 || chrec_contains_undetermined (step))
1993 base_type = TREE_TYPE (base);
1995 /* When not using wrapping arithmetic, signed types don't wrap. */
1996 if (!flag_wrapv && !TYPE_UNSIGNED (base_type))
1997 return fold_convert (new_type, step);
1999 if (TYPE_PRECISION (new_type) > TYPE_PRECISION (base_type))
2000 return convert_step_widening (loop, new_type, base, step, at_stmt);
2002 return fold_convert (new_type, step);
2005 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
2008 free_numbers_of_iterations_estimates_loop (struct loop *loop)
2010 struct nb_iter_bound *bound, *next;
2012 for (bound = loop->bounds; bound; bound = next)
2018 loop->bounds = NULL;
2021 /* Frees the information on upper bounds on numbers of iterations of LOOPS. */
2024 free_numbers_of_iterations_estimates (struct loops *loops)
2029 for (i = 1; i < loops->num; i++)
2031 loop = loops->parray[i];
2033 free_numbers_of_iterations_estimates_loop (loop);
2037 /* Substitute value VAL for ssa name NAME inside expressions held
2041 substitute_in_loop_info (struct loop *loop, tree name, tree val)
2043 struct nb_iter_bound *bound;
2045 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
2046 loop->estimated_nb_iterations
2047 = simplify_replace_tree (loop->estimated_nb_iterations, name, val);
2048 for (bound = loop->bounds; bound; bound = bound->next)
2050 bound->bound = simplify_replace_tree (bound->bound, name, val);
2051 bound->additional = simplify_replace_tree (bound->additional, name, val);