1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
34 #include "tree-pretty-print.h"
35 #include "gimple-pretty-print.h"
36 #include "diagnostic-core.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-chrec.h"
42 #include "gimple-fold.h"
45 /* Type of value ranges. See value_range_d for a description of these
47 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
49 /* Range of values that can be associated with an SSA_NAME after VRP
53 /* Lattice value represented by this range. */
54 enum value_range_type type;
56 /* Minimum and maximum values represented by this range. These
57 values should be interpreted as follows:
59 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
62 - If TYPE == VR_RANGE then MIN holds the minimum value and
63 MAX holds the maximum value of the range [MIN, MAX].
65 - If TYPE == ANTI_RANGE the variable is known to NOT
66 take any values in the range [MIN, MAX]. */
70 /* Set of SSA names whose value ranges are equivalent to this one.
71 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
75 typedef struct value_range_d value_range_t;
77 /* Set of SSA names found live during the RPO traversal of the function
78 for still active basic-blocks. */
81 /* Return true if the SSA name NAME is live on the edge E. */
84 live_on_edge (edge e, tree name)
86 return (live[e->dest->index]
87 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
90 /* Local functions. */
91 static int compare_values (tree val1, tree val2);
92 static int compare_values_warnv (tree val1, tree val2, bool *);
93 static void vrp_meet (value_range_t *, value_range_t *);
94 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
95 tree, tree, bool, bool *,
98 /* Location information for ASSERT_EXPRs. Each instance of this
99 structure describes an ASSERT_EXPR for an SSA name. Since a single
100 SSA name may have more than one assertion associated with it, these
101 locations are kept in a linked list attached to the corresponding
103 struct assert_locus_d
105 /* Basic block where the assertion would be inserted. */
108 /* Some assertions need to be inserted on an edge (e.g., assertions
109 generated by COND_EXPRs). In those cases, BB will be NULL. */
112 /* Pointer to the statement that generated this assertion. */
113 gimple_stmt_iterator si;
115 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
116 enum tree_code comp_code;
118 /* Value being compared against. */
121 /* Expression to compare. */
124 /* Next node in the linked list. */
125 struct assert_locus_d *next;
128 typedef struct assert_locus_d *assert_locus_t;
130 /* If bit I is present, it means that SSA name N_i has a list of
131 assertions that should be inserted in the IL. */
132 static bitmap need_assert_for;
134 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
135 holds a list of ASSERT_LOCUS_T nodes that describe where
136 ASSERT_EXPRs for SSA name N_I should be inserted. */
137 static assert_locus_t *asserts_for;
139 /* Value range array. After propagation, VR_VALUE[I] holds the range
140 of values that SSA name N_I may take. */
141 static value_range_t **vr_value;
143 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
144 number of executable edges we saw the last time we visited the
146 static int *vr_phi_edge_counts;
153 static VEC (edge, heap) *to_remove_edges;
154 DEF_VEC_O(switch_update);
155 DEF_VEC_ALLOC_O(switch_update, heap);
156 static VEC (switch_update, heap) *to_update_switch_stmts;
159 /* Return the maximum value for TYPE. */
162 vrp_val_max (const_tree type)
164 if (!INTEGRAL_TYPE_P (type))
167 return TYPE_MAX_VALUE (type);
170 /* Return the minimum value for TYPE. */
173 vrp_val_min (const_tree type)
175 if (!INTEGRAL_TYPE_P (type))
178 return TYPE_MIN_VALUE (type);
181 /* Return whether VAL is equal to the maximum value of its type. This
182 will be true for a positive overflow infinity. We can't do a
183 simple equality comparison with TYPE_MAX_VALUE because C typedefs
184 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
185 to the integer constant with the same value in the type. */
188 vrp_val_is_max (const_tree val)
190 tree type_max = vrp_val_max (TREE_TYPE (val));
191 return (val == type_max
192 || (type_max != NULL_TREE
193 && operand_equal_p (val, type_max, 0)));
196 /* Return whether VAL is equal to the minimum value of its type. This
197 will be true for a negative overflow infinity. */
200 vrp_val_is_min (const_tree val)
202 tree type_min = vrp_val_min (TREE_TYPE (val));
203 return (val == type_min
204 || (type_min != NULL_TREE
205 && operand_equal_p (val, type_min, 0)));
209 /* Return whether TYPE should use an overflow infinity distinct from
210 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
211 represent a signed overflow during VRP computations. An infinity
212 is distinct from a half-range, which will go from some number to
213 TYPE_{MIN,MAX}_VALUE. */
216 needs_overflow_infinity (const_tree type)
218 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
221 /* Return whether TYPE can support our overflow infinity
222 representation: we use the TREE_OVERFLOW flag, which only exists
223 for constants. If TYPE doesn't support this, we don't optimize
224 cases which would require signed overflow--we drop them to
228 supports_overflow_infinity (const_tree type)
230 tree min = vrp_val_min (type), max = vrp_val_max (type);
231 #ifdef ENABLE_CHECKING
232 gcc_assert (needs_overflow_infinity (type));
234 return (min != NULL_TREE
235 && CONSTANT_CLASS_P (min)
237 && CONSTANT_CLASS_P (max));
240 /* VAL is the maximum or minimum value of a type. Return a
241 corresponding overflow infinity. */
244 make_overflow_infinity (tree val)
246 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
247 val = copy_node (val);
248 TREE_OVERFLOW (val) = 1;
252 /* Return a negative overflow infinity for TYPE. */
255 negative_overflow_infinity (tree type)
257 gcc_checking_assert (supports_overflow_infinity (type));
258 return make_overflow_infinity (vrp_val_min (type));
261 /* Return a positive overflow infinity for TYPE. */
264 positive_overflow_infinity (tree type)
266 gcc_checking_assert (supports_overflow_infinity (type));
267 return make_overflow_infinity (vrp_val_max (type));
270 /* Return whether VAL is a negative overflow infinity. */
273 is_negative_overflow_infinity (const_tree val)
275 return (needs_overflow_infinity (TREE_TYPE (val))
276 && CONSTANT_CLASS_P (val)
277 && TREE_OVERFLOW (val)
278 && vrp_val_is_min (val));
281 /* Return whether VAL is a positive overflow infinity. */
284 is_positive_overflow_infinity (const_tree val)
286 return (needs_overflow_infinity (TREE_TYPE (val))
287 && CONSTANT_CLASS_P (val)
288 && TREE_OVERFLOW (val)
289 && vrp_val_is_max (val));
292 /* Return whether VAL is a positive or negative overflow infinity. */
295 is_overflow_infinity (const_tree val)
297 return (needs_overflow_infinity (TREE_TYPE (val))
298 && CONSTANT_CLASS_P (val)
299 && TREE_OVERFLOW (val)
300 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
303 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
306 stmt_overflow_infinity (gimple stmt)
308 if (is_gimple_assign (stmt)
309 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
311 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
315 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
316 the same value with TREE_OVERFLOW clear. This can be used to avoid
317 confusing a regular value with an overflow value. */
320 avoid_overflow_infinity (tree val)
322 if (!is_overflow_infinity (val))
325 if (vrp_val_is_max (val))
326 return vrp_val_max (TREE_TYPE (val));
329 gcc_checking_assert (vrp_val_is_min (val));
330 return vrp_val_min (TREE_TYPE (val));
335 /* Return true if ARG is marked with the nonnull attribute in the
336 current function signature. */
339 nonnull_arg_p (const_tree arg)
341 tree t, attrs, fntype;
342 unsigned HOST_WIDE_INT arg_num;
344 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
346 /* The static chain decl is always non null. */
347 if (arg == cfun->static_chain_decl)
350 fntype = TREE_TYPE (current_function_decl);
351 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
353 /* If "nonnull" wasn't specified, we know nothing about the argument. */
354 if (attrs == NULL_TREE)
357 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
358 if (TREE_VALUE (attrs) == NULL_TREE)
361 /* Get the position number for ARG in the function signature. */
362 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
364 t = DECL_CHAIN (t), arg_num++)
370 gcc_assert (t == arg);
372 /* Now see if ARG_NUM is mentioned in the nonnull list. */
373 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
375 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
383 /* Set value range VR to VR_VARYING. */
386 set_value_range_to_varying (value_range_t *vr)
388 vr->type = VR_VARYING;
389 vr->min = vr->max = NULL_TREE;
391 bitmap_clear (vr->equiv);
395 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
398 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
399 tree max, bitmap equiv)
401 #if defined ENABLE_CHECKING
402 /* Check the validity of the range. */
403 if (t == VR_RANGE || t == VR_ANTI_RANGE)
407 gcc_assert (min && max);
409 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
410 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
412 cmp = compare_values (min, max);
413 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
415 if (needs_overflow_infinity (TREE_TYPE (min)))
416 gcc_assert (!is_overflow_infinity (min)
417 || !is_overflow_infinity (max));
420 if (t == VR_UNDEFINED || t == VR_VARYING)
421 gcc_assert (min == NULL_TREE && max == NULL_TREE);
423 if (t == VR_UNDEFINED || t == VR_VARYING)
424 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
431 /* Since updating the equivalence set involves deep copying the
432 bitmaps, only do it if absolutely necessary. */
433 if (vr->equiv == NULL
435 vr->equiv = BITMAP_ALLOC (NULL);
437 if (equiv != vr->equiv)
439 if (equiv && !bitmap_empty_p (equiv))
440 bitmap_copy (vr->equiv, equiv);
442 bitmap_clear (vr->equiv);
447 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
448 This means adjusting T, MIN and MAX representing the case of a
449 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
450 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
451 In corner cases where MAX+1 or MIN-1 wraps this will fall back
453 This routine exists to ease canonicalization in the case where we
454 extract ranges from var + CST op limit. */
457 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
458 tree min, tree max, bitmap equiv)
460 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
462 && t != VR_ANTI_RANGE)
463 || TREE_CODE (min) != INTEGER_CST
464 || TREE_CODE (max) != INTEGER_CST)
466 set_value_range (vr, t, min, max, equiv);
470 /* Wrong order for min and max, to swap them and the VR type we need
472 if (tree_int_cst_lt (max, min))
474 tree one = build_int_cst (TREE_TYPE (min), 1);
475 tree tmp = int_const_binop (PLUS_EXPR, max, one);
476 max = int_const_binop (MINUS_EXPR, min, one);
479 /* There's one corner case, if we had [C+1, C] before we now have
480 that again. But this represents an empty value range, so drop
481 to varying in this case. */
482 if (tree_int_cst_lt (max, min))
484 set_value_range_to_varying (vr);
488 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
491 /* Anti-ranges that can be represented as ranges should be so. */
492 if (t == VR_ANTI_RANGE)
494 bool is_min = vrp_val_is_min (min);
495 bool is_max = vrp_val_is_max (max);
497 if (is_min && is_max)
499 /* We cannot deal with empty ranges, drop to varying. */
500 set_value_range_to_varying (vr);
504 /* As a special exception preserve non-null ranges. */
505 && !(TYPE_UNSIGNED (TREE_TYPE (min))
506 && integer_zerop (max)))
508 tree one = build_int_cst (TREE_TYPE (max), 1);
509 min = int_const_binop (PLUS_EXPR, max, one);
510 max = vrp_val_max (TREE_TYPE (max));
515 tree one = build_int_cst (TREE_TYPE (min), 1);
516 max = int_const_binop (MINUS_EXPR, min, one);
517 min = vrp_val_min (TREE_TYPE (min));
522 set_value_range (vr, t, min, max, equiv);
525 /* Copy value range FROM into value range TO. */
528 copy_value_range (value_range_t *to, value_range_t *from)
530 set_value_range (to, from->type, from->min, from->max, from->equiv);
533 /* Set value range VR to a single value. This function is only called
534 with values we get from statements, and exists to clear the
535 TREE_OVERFLOW flag so that we don't think we have an overflow
536 infinity when we shouldn't. */
539 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
541 gcc_assert (is_gimple_min_invariant (val));
542 val = avoid_overflow_infinity (val);
543 set_value_range (vr, VR_RANGE, val, val, equiv);
546 /* Set value range VR to a non-negative range of type TYPE.
547 OVERFLOW_INFINITY indicates whether to use an overflow infinity
548 rather than TYPE_MAX_VALUE; this should be true if we determine
549 that the range is nonnegative based on the assumption that signed
550 overflow does not occur. */
553 set_value_range_to_nonnegative (value_range_t *vr, tree type,
554 bool overflow_infinity)
558 if (overflow_infinity && !supports_overflow_infinity (type))
560 set_value_range_to_varying (vr);
564 zero = build_int_cst (type, 0);
565 set_value_range (vr, VR_RANGE, zero,
567 ? positive_overflow_infinity (type)
568 : TYPE_MAX_VALUE (type)),
572 /* Set value range VR to a non-NULL range of type TYPE. */
575 set_value_range_to_nonnull (value_range_t *vr, tree type)
577 tree zero = build_int_cst (type, 0);
578 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
582 /* Set value range VR to a NULL range of type TYPE. */
585 set_value_range_to_null (value_range_t *vr, tree type)
587 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
591 /* Set value range VR to a range of a truthvalue of type TYPE. */
594 set_value_range_to_truthvalue (value_range_t *vr, tree type)
596 if (TYPE_PRECISION (type) == 1)
597 set_value_range_to_varying (vr);
599 set_value_range (vr, VR_RANGE,
600 build_int_cst (type, 0), build_int_cst (type, 1),
605 /* Set value range VR to VR_UNDEFINED. */
608 set_value_range_to_undefined (value_range_t *vr)
610 vr->type = VR_UNDEFINED;
611 vr->min = vr->max = NULL_TREE;
613 bitmap_clear (vr->equiv);
617 /* If abs (min) < abs (max), set VR to [-max, max], if
618 abs (min) >= abs (max), set VR to [-min, min]. */
621 abs_extent_range (value_range_t *vr, tree min, tree max)
625 gcc_assert (TREE_CODE (min) == INTEGER_CST);
626 gcc_assert (TREE_CODE (max) == INTEGER_CST);
627 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
628 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
629 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
630 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
631 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
633 set_value_range_to_varying (vr);
636 cmp = compare_values (min, max);
638 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
639 else if (cmp == 0 || cmp == 1)
642 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
646 set_value_range_to_varying (vr);
649 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
653 /* Return value range information for VAR.
655 If we have no values ranges recorded (ie, VRP is not running), then
656 return NULL. Otherwise create an empty range if none existed for VAR. */
658 static value_range_t *
659 get_value_range (const_tree var)
663 unsigned ver = SSA_NAME_VERSION (var);
665 /* If we have no recorded ranges, then return NULL. */
673 /* Create a default value range. */
674 vr_value[ver] = vr = XCNEW (value_range_t);
676 /* Defer allocating the equivalence set. */
679 /* If VAR is a default definition, the variable can take any value
681 sym = SSA_NAME_VAR (var);
682 if (SSA_NAME_IS_DEFAULT_DEF (var))
684 /* Try to use the "nonnull" attribute to create ~[0, 0]
685 anti-ranges for pointers. Note that this is only valid with
686 default definitions of PARM_DECLs. */
687 if (TREE_CODE (sym) == PARM_DECL
688 && POINTER_TYPE_P (TREE_TYPE (sym))
689 && nonnull_arg_p (sym))
690 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
692 set_value_range_to_varying (vr);
698 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
701 vrp_operand_equal_p (const_tree val1, const_tree val2)
705 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
707 if (is_overflow_infinity (val1))
708 return is_overflow_infinity (val2);
712 /* Return true, if the bitmaps B1 and B2 are equal. */
715 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
718 || ((!b1 || bitmap_empty_p (b1))
719 && (!b2 || bitmap_empty_p (b2)))
721 && bitmap_equal_p (b1, b2)));
724 /* Update the value range and equivalence set for variable VAR to
725 NEW_VR. Return true if NEW_VR is different from VAR's previous
728 NOTE: This function assumes that NEW_VR is a temporary value range
729 object created for the sole purpose of updating VAR's range. The
730 storage used by the equivalence set from NEW_VR will be freed by
731 this function. Do not call update_value_range when NEW_VR
732 is the range object associated with another SSA name. */
735 update_value_range (const_tree var, value_range_t *new_vr)
737 value_range_t *old_vr;
740 /* Update the value range, if necessary. */
741 old_vr = get_value_range (var);
742 is_new = old_vr->type != new_vr->type
743 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
744 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
745 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
748 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
751 BITMAP_FREE (new_vr->equiv);
757 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
758 point where equivalence processing can be turned on/off. */
761 add_equivalence (bitmap *equiv, const_tree var)
763 unsigned ver = SSA_NAME_VERSION (var);
764 value_range_t *vr = vr_value[ver];
767 *equiv = BITMAP_ALLOC (NULL);
768 bitmap_set_bit (*equiv, ver);
770 bitmap_ior_into (*equiv, vr->equiv);
774 /* Return true if VR is ~[0, 0]. */
777 range_is_nonnull (value_range_t *vr)
779 return vr->type == VR_ANTI_RANGE
780 && integer_zerop (vr->min)
781 && integer_zerop (vr->max);
785 /* Return true if VR is [0, 0]. */
788 range_is_null (value_range_t *vr)
790 return vr->type == VR_RANGE
791 && integer_zerop (vr->min)
792 && integer_zerop (vr->max);
795 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
799 range_int_cst_p (value_range_t *vr)
801 return (vr->type == VR_RANGE
802 && TREE_CODE (vr->max) == INTEGER_CST
803 && TREE_CODE (vr->min) == INTEGER_CST
804 && !TREE_OVERFLOW (vr->max)
805 && !TREE_OVERFLOW (vr->min));
808 /* Return true if VR is a INTEGER_CST singleton. */
811 range_int_cst_singleton_p (value_range_t *vr)
813 return (range_int_cst_p (vr)
814 && tree_int_cst_equal (vr->min, vr->max));
817 /* Return true if value range VR involves at least one symbol. */
820 symbolic_range_p (value_range_t *vr)
822 return (!is_gimple_min_invariant (vr->min)
823 || !is_gimple_min_invariant (vr->max));
826 /* Return true if value range VR uses an overflow infinity. */
829 overflow_infinity_range_p (value_range_t *vr)
831 return (vr->type == VR_RANGE
832 && (is_overflow_infinity (vr->min)
833 || is_overflow_infinity (vr->max)));
836 /* Return false if we can not make a valid comparison based on VR;
837 this will be the case if it uses an overflow infinity and overflow
838 is not undefined (i.e., -fno-strict-overflow is in effect).
839 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
840 uses an overflow infinity. */
843 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
845 gcc_assert (vr->type == VR_RANGE);
846 if (is_overflow_infinity (vr->min))
848 *strict_overflow_p = true;
849 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
852 if (is_overflow_infinity (vr->max))
854 *strict_overflow_p = true;
855 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
862 /* Like tree_expr_nonnegative_warnv_p, but this function uses value
863 ranges obtained so far. */
866 vrp_expr_computes_nonnegative (tree expr, bool *strict_overflow_p)
868 return (tree_expr_nonnegative_warnv_p (expr, strict_overflow_p)
869 || (TREE_CODE (expr) == SSA_NAME
870 && ssa_name_nonnegative_p (expr)));
873 /* Return true if the result of assignment STMT is know to be non-negative.
874 If the return value is based on the assumption that signed overflow is
875 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
876 *STRICT_OVERFLOW_P.*/
879 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
881 enum tree_code code = gimple_assign_rhs_code (stmt);
882 switch (get_gimple_rhs_class (code))
884 case GIMPLE_UNARY_RHS:
885 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
886 gimple_expr_type (stmt),
887 gimple_assign_rhs1 (stmt),
889 case GIMPLE_BINARY_RHS:
890 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
891 gimple_expr_type (stmt),
892 gimple_assign_rhs1 (stmt),
893 gimple_assign_rhs2 (stmt),
895 case GIMPLE_TERNARY_RHS:
897 case GIMPLE_SINGLE_RHS:
898 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
900 case GIMPLE_INVALID_RHS:
907 /* Return true if return value of call STMT is know to be non-negative.
908 If the return value is based on the assumption that signed overflow is
909 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
910 *STRICT_OVERFLOW_P.*/
913 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
915 tree arg0 = gimple_call_num_args (stmt) > 0 ?
916 gimple_call_arg (stmt, 0) : NULL_TREE;
917 tree arg1 = gimple_call_num_args (stmt) > 1 ?
918 gimple_call_arg (stmt, 1) : NULL_TREE;
920 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
921 gimple_call_fndecl (stmt),
927 /* Return true if STMT is know to to compute a non-negative value.
928 If the return value is based on the assumption that signed overflow is
929 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
930 *STRICT_OVERFLOW_P.*/
933 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
935 switch (gimple_code (stmt))
938 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
940 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
946 /* Return true if the result of assignment STMT is know to be non-zero.
947 If the return value is based on the assumption that signed overflow is
948 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
949 *STRICT_OVERFLOW_P.*/
952 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
954 enum tree_code code = gimple_assign_rhs_code (stmt);
955 switch (get_gimple_rhs_class (code))
957 case GIMPLE_UNARY_RHS:
958 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
959 gimple_expr_type (stmt),
960 gimple_assign_rhs1 (stmt),
962 case GIMPLE_BINARY_RHS:
963 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
964 gimple_expr_type (stmt),
965 gimple_assign_rhs1 (stmt),
966 gimple_assign_rhs2 (stmt),
968 case GIMPLE_TERNARY_RHS:
970 case GIMPLE_SINGLE_RHS:
971 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
973 case GIMPLE_INVALID_RHS:
980 /* Return true if STMT is know to to compute a non-zero value.
981 If the return value is based on the assumption that signed overflow is
982 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
983 *STRICT_OVERFLOW_P.*/
986 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
988 switch (gimple_code (stmt))
991 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
993 return gimple_alloca_call_p (stmt);
999 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1003 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1005 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1008 /* If we have an expression of the form &X->a, then the expression
1009 is nonnull if X is nonnull. */
1010 if (is_gimple_assign (stmt)
1011 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1013 tree expr = gimple_assign_rhs1 (stmt);
1014 tree base = get_base_address (TREE_OPERAND (expr, 0));
1016 if (base != NULL_TREE
1017 && TREE_CODE (base) == MEM_REF
1018 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1020 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1021 if (range_is_nonnull (vr))
1029 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1030 a gimple invariant, or SSA_NAME +- CST. */
1033 valid_value_p (tree expr)
1035 if (TREE_CODE (expr) == SSA_NAME)
1038 if (TREE_CODE (expr) == PLUS_EXPR
1039 || TREE_CODE (expr) == MINUS_EXPR)
1040 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1041 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1043 return is_gimple_min_invariant (expr);
1049 -2 if those are incomparable. */
1051 operand_less_p (tree val, tree val2)
1053 /* LT is folded faster than GE and others. Inline the common case. */
1054 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1056 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1057 return INT_CST_LT_UNSIGNED (val, val2);
1060 if (INT_CST_LT (val, val2))
1068 fold_defer_overflow_warnings ();
1070 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1072 fold_undefer_and_ignore_overflow_warnings ();
1075 || TREE_CODE (tcmp) != INTEGER_CST)
1078 if (!integer_zerop (tcmp))
1082 /* val >= val2, not considering overflow infinity. */
1083 if (is_negative_overflow_infinity (val))
1084 return is_negative_overflow_infinity (val2) ? 0 : 1;
1085 else if (is_positive_overflow_infinity (val2))
1086 return is_positive_overflow_infinity (val) ? 0 : 1;
1091 /* Compare two values VAL1 and VAL2. Return
1093 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1096 +1 if VAL1 > VAL2, and
1099 This is similar to tree_int_cst_compare but supports pointer values
1100 and values that cannot be compared at compile time.
1102 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1103 true if the return value is only valid if we assume that signed
1104 overflow is undefined. */
1107 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1112 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1114 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1115 == POINTER_TYPE_P (TREE_TYPE (val2)));
1116 /* Convert the two values into the same type. This is needed because
1117 sizetype causes sign extension even for unsigned types. */
1118 val2 = fold_convert (TREE_TYPE (val1), val2);
1119 STRIP_USELESS_TYPE_CONVERSION (val2);
1121 if ((TREE_CODE (val1) == SSA_NAME
1122 || TREE_CODE (val1) == PLUS_EXPR
1123 || TREE_CODE (val1) == MINUS_EXPR)
1124 && (TREE_CODE (val2) == SSA_NAME
1125 || TREE_CODE (val2) == PLUS_EXPR
1126 || TREE_CODE (val2) == MINUS_EXPR))
1128 tree n1, c1, n2, c2;
1129 enum tree_code code1, code2;
1131 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1132 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1133 same name, return -2. */
1134 if (TREE_CODE (val1) == SSA_NAME)
1142 code1 = TREE_CODE (val1);
1143 n1 = TREE_OPERAND (val1, 0);
1144 c1 = TREE_OPERAND (val1, 1);
1145 if (tree_int_cst_sgn (c1) == -1)
1147 if (is_negative_overflow_infinity (c1))
1149 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1152 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1156 if (TREE_CODE (val2) == SSA_NAME)
1164 code2 = TREE_CODE (val2);
1165 n2 = TREE_OPERAND (val2, 0);
1166 c2 = TREE_OPERAND (val2, 1);
1167 if (tree_int_cst_sgn (c2) == -1)
1169 if (is_negative_overflow_infinity (c2))
1171 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1174 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1178 /* Both values must use the same name. */
1182 if (code1 == SSA_NAME
1183 && code2 == SSA_NAME)
1187 /* If overflow is defined we cannot simplify more. */
1188 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1191 if (strict_overflow_p != NULL
1192 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1193 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1194 *strict_overflow_p = true;
1196 if (code1 == SSA_NAME)
1198 if (code2 == PLUS_EXPR)
1199 /* NAME < NAME + CST */
1201 else if (code2 == MINUS_EXPR)
1202 /* NAME > NAME - CST */
1205 else if (code1 == PLUS_EXPR)
1207 if (code2 == SSA_NAME)
1208 /* NAME + CST > NAME */
1210 else if (code2 == PLUS_EXPR)
1211 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1212 return compare_values_warnv (c1, c2, strict_overflow_p);
1213 else if (code2 == MINUS_EXPR)
1214 /* NAME + CST1 > NAME - CST2 */
1217 else if (code1 == MINUS_EXPR)
1219 if (code2 == SSA_NAME)
1220 /* NAME - CST < NAME */
1222 else if (code2 == PLUS_EXPR)
1223 /* NAME - CST1 < NAME + CST2 */
1225 else if (code2 == MINUS_EXPR)
1226 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1227 C1 and C2 are swapped in the call to compare_values. */
1228 return compare_values_warnv (c2, c1, strict_overflow_p);
1234 /* We cannot compare non-constants. */
1235 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1238 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1240 /* We cannot compare overflowed values, except for overflow
1242 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1244 if (strict_overflow_p != NULL)
1245 *strict_overflow_p = true;
1246 if (is_negative_overflow_infinity (val1))
1247 return is_negative_overflow_infinity (val2) ? 0 : -1;
1248 else if (is_negative_overflow_infinity (val2))
1250 else if (is_positive_overflow_infinity (val1))
1251 return is_positive_overflow_infinity (val2) ? 0 : 1;
1252 else if (is_positive_overflow_infinity (val2))
1257 return tree_int_cst_compare (val1, val2);
1263 /* First see if VAL1 and VAL2 are not the same. */
1264 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1267 /* If VAL1 is a lower address than VAL2, return -1. */
1268 if (operand_less_p (val1, val2) == 1)
1271 /* If VAL1 is a higher address than VAL2, return +1. */
1272 if (operand_less_p (val2, val1) == 1)
1275 /* If VAL1 is different than VAL2, return +2.
1276 For integer constants we either have already returned -1 or 1
1277 or they are equivalent. We still might succeed in proving
1278 something about non-trivial operands. */
1279 if (TREE_CODE (val1) != INTEGER_CST
1280 || TREE_CODE (val2) != INTEGER_CST)
1282 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1283 if (t && integer_onep (t))
1291 /* Compare values like compare_values_warnv, but treat comparisons of
1292 nonconstants which rely on undefined overflow as incomparable. */
1295 compare_values (tree val1, tree val2)
1301 ret = compare_values_warnv (val1, val2, &sop);
1303 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1309 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1310 0 if VAL is not inside VR,
1311 -2 if we cannot tell either way.
1313 FIXME, the current semantics of this functions are a bit quirky
1314 when taken in the context of VRP. In here we do not care
1315 about VR's type. If VR is the anti-range ~[3, 5] the call
1316 value_inside_range (4, VR) will return 1.
1318 This is counter-intuitive in a strict sense, but the callers
1319 currently expect this. They are calling the function
1320 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1321 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1324 This also applies to value_ranges_intersect_p and
1325 range_includes_zero_p. The semantics of VR_RANGE and
1326 VR_ANTI_RANGE should be encoded here, but that also means
1327 adapting the users of these functions to the new semantics.
1329 Benchmark compile/20001226-1.c compilation time after changing this
1333 value_inside_range (tree val, value_range_t * vr)
1337 cmp1 = operand_less_p (val, vr->min);
1343 cmp2 = operand_less_p (vr->max, val);
1351 /* Return true if value ranges VR0 and VR1 have a non-empty
1354 Benchmark compile/20001226-1.c compilation time after changing this
1359 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1361 /* The value ranges do not intersect if the maximum of the first range is
1362 less than the minimum of the second range or vice versa.
1363 When those relations are unknown, we can't do any better. */
1364 if (operand_less_p (vr0->max, vr1->min) != 0)
1366 if (operand_less_p (vr1->max, vr0->min) != 0)
1372 /* Return true if VR includes the value zero, false otherwise. FIXME,
1373 currently this will return false for an anti-range like ~[-4, 3].
1374 This will be wrong when the semantics of value_inside_range are
1375 modified (currently the users of this function expect these
1379 range_includes_zero_p (value_range_t *vr)
1383 gcc_assert (vr->type != VR_UNDEFINED
1384 && vr->type != VR_VARYING
1385 && !symbolic_range_p (vr));
1387 zero = build_int_cst (TREE_TYPE (vr->min), 0);
1388 return (value_inside_range (zero, vr) == 1);
1391 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1392 false otherwise or if no value range information is available. */
1395 ssa_name_nonnegative_p (const_tree t)
1397 value_range_t *vr = get_value_range (t);
1399 if (INTEGRAL_TYPE_P (t)
1400 && TYPE_UNSIGNED (t))
1406 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1407 which would return a useful value should be encoded as a VR_RANGE. */
1408 if (vr->type == VR_RANGE)
1410 int result = compare_values (vr->min, integer_zero_node);
1412 return (result == 0 || result == 1);
1417 /* If OP has a value range with a single constant value return that,
1418 otherwise return NULL_TREE. This returns OP itself if OP is a
1422 op_with_constant_singleton_value_range (tree op)
1426 if (is_gimple_min_invariant (op))
1429 if (TREE_CODE (op) != SSA_NAME)
1432 vr = get_value_range (op);
1433 if (vr->type == VR_RANGE
1434 && operand_equal_p (vr->min, vr->max, 0)
1435 && is_gimple_min_invariant (vr->min))
1442 /* Extract value range information from an ASSERT_EXPR EXPR and store
1446 extract_range_from_assert (value_range_t *vr_p, tree expr)
1448 tree var, cond, limit, min, max, type;
1449 value_range_t *var_vr, *limit_vr;
1450 enum tree_code cond_code;
1452 var = ASSERT_EXPR_VAR (expr);
1453 cond = ASSERT_EXPR_COND (expr);
1455 gcc_assert (COMPARISON_CLASS_P (cond));
1457 /* Find VAR in the ASSERT_EXPR conditional. */
1458 if (var == TREE_OPERAND (cond, 0)
1459 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1460 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1462 /* If the predicate is of the form VAR COMP LIMIT, then we just
1463 take LIMIT from the RHS and use the same comparison code. */
1464 cond_code = TREE_CODE (cond);
1465 limit = TREE_OPERAND (cond, 1);
1466 cond = TREE_OPERAND (cond, 0);
1470 /* If the predicate is of the form LIMIT COMP VAR, then we need
1471 to flip around the comparison code to create the proper range
1473 cond_code = swap_tree_comparison (TREE_CODE (cond));
1474 limit = TREE_OPERAND (cond, 0);
1475 cond = TREE_OPERAND (cond, 1);
1478 limit = avoid_overflow_infinity (limit);
1480 type = TREE_TYPE (limit);
1481 gcc_assert (limit != var);
1483 /* For pointer arithmetic, we only keep track of pointer equality
1485 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1487 set_value_range_to_varying (vr_p);
1491 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1492 try to use LIMIT's range to avoid creating symbolic ranges
1494 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1496 /* LIMIT's range is only interesting if it has any useful information. */
1498 && (limit_vr->type == VR_UNDEFINED
1499 || limit_vr->type == VR_VARYING
1500 || symbolic_range_p (limit_vr)))
1503 /* Initially, the new range has the same set of equivalences of
1504 VAR's range. This will be revised before returning the final
1505 value. Since assertions may be chained via mutually exclusive
1506 predicates, we will need to trim the set of equivalences before
1508 gcc_assert (vr_p->equiv == NULL);
1509 add_equivalence (&vr_p->equiv, var);
1511 /* Extract a new range based on the asserted comparison for VAR and
1512 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1513 will only use it for equality comparisons (EQ_EXPR). For any
1514 other kind of assertion, we cannot derive a range from LIMIT's
1515 anti-range that can be used to describe the new range. For
1516 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1517 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1518 no single range for x_2 that could describe LE_EXPR, so we might
1519 as well build the range [b_4, +INF] for it.
1520 One special case we handle is extracting a range from a
1521 range test encoded as (unsigned)var + CST <= limit. */
1522 if (TREE_CODE (cond) == NOP_EXPR
1523 || TREE_CODE (cond) == PLUS_EXPR)
1525 if (TREE_CODE (cond) == PLUS_EXPR)
1527 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1528 TREE_OPERAND (cond, 1));
1529 max = int_const_binop (PLUS_EXPR, limit, min);
1530 cond = TREE_OPERAND (cond, 0);
1534 min = build_int_cst (TREE_TYPE (var), 0);
1538 /* Make sure to not set TREE_OVERFLOW on the final type
1539 conversion. We are willingly interpreting large positive
1540 unsigned values as negative singed values here. */
1541 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1543 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1546 /* We can transform a max, min range to an anti-range or
1547 vice-versa. Use set_and_canonicalize_value_range which does
1549 if (cond_code == LE_EXPR)
1550 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1551 min, max, vr_p->equiv);
1552 else if (cond_code == GT_EXPR)
1553 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1554 min, max, vr_p->equiv);
1558 else if (cond_code == EQ_EXPR)
1560 enum value_range_type range_type;
1564 range_type = limit_vr->type;
1565 min = limit_vr->min;
1566 max = limit_vr->max;
1570 range_type = VR_RANGE;
1575 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1577 /* When asserting the equality VAR == LIMIT and LIMIT is another
1578 SSA name, the new range will also inherit the equivalence set
1580 if (TREE_CODE (limit) == SSA_NAME)
1581 add_equivalence (&vr_p->equiv, limit);
1583 else if (cond_code == NE_EXPR)
1585 /* As described above, when LIMIT's range is an anti-range and
1586 this assertion is an inequality (NE_EXPR), then we cannot
1587 derive anything from the anti-range. For instance, if
1588 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1589 not imply that VAR's range is [0, 0]. So, in the case of
1590 anti-ranges, we just assert the inequality using LIMIT and
1593 If LIMIT_VR is a range, we can only use it to build a new
1594 anti-range if LIMIT_VR is a single-valued range. For
1595 instance, if LIMIT_VR is [0, 1], the predicate
1596 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1597 Rather, it means that for value 0 VAR should be ~[0, 0]
1598 and for value 1, VAR should be ~[1, 1]. We cannot
1599 represent these ranges.
1601 The only situation in which we can build a valid
1602 anti-range is when LIMIT_VR is a single-valued range
1603 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1604 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1606 && limit_vr->type == VR_RANGE
1607 && compare_values (limit_vr->min, limit_vr->max) == 0)
1609 min = limit_vr->min;
1610 max = limit_vr->max;
1614 /* In any other case, we cannot use LIMIT's range to build a
1615 valid anti-range. */
1619 /* If MIN and MAX cover the whole range for their type, then
1620 just use the original LIMIT. */
1621 if (INTEGRAL_TYPE_P (type)
1622 && vrp_val_is_min (min)
1623 && vrp_val_is_max (max))
1626 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1628 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1630 min = TYPE_MIN_VALUE (type);
1632 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1636 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1637 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1639 max = limit_vr->max;
1642 /* If the maximum value forces us to be out of bounds, simply punt.
1643 It would be pointless to try and do anything more since this
1644 all should be optimized away above us. */
1645 if ((cond_code == LT_EXPR
1646 && compare_values (max, min) == 0)
1647 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1648 set_value_range_to_varying (vr_p);
1651 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1652 if (cond_code == LT_EXPR)
1654 tree one = build_int_cst (type, 1);
1655 max = fold_build2 (MINUS_EXPR, type, max, one);
1657 TREE_NO_WARNING (max) = 1;
1660 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1663 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1665 max = TYPE_MAX_VALUE (type);
1667 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1671 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1672 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1674 min = limit_vr->min;
1677 /* If the minimum value forces us to be out of bounds, simply punt.
1678 It would be pointless to try and do anything more since this
1679 all should be optimized away above us. */
1680 if ((cond_code == GT_EXPR
1681 && compare_values (min, max) == 0)
1682 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1683 set_value_range_to_varying (vr_p);
1686 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1687 if (cond_code == GT_EXPR)
1689 tree one = build_int_cst (type, 1);
1690 min = fold_build2 (PLUS_EXPR, type, min, one);
1692 TREE_NO_WARNING (min) = 1;
1695 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1701 /* If VAR already had a known range, it may happen that the new
1702 range we have computed and VAR's range are not compatible. For
1706 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1708 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1710 While the above comes from a faulty program, it will cause an ICE
1711 later because p_8 and p_6 will have incompatible ranges and at
1712 the same time will be considered equivalent. A similar situation
1716 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1718 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1720 Again i_6 and i_7 will have incompatible ranges. It would be
1721 pointless to try and do anything with i_7's range because
1722 anything dominated by 'if (i_5 < 5)' will be optimized away.
1723 Note, due to the wa in which simulation proceeds, the statement
1724 i_7 = ASSERT_EXPR <...> we would never be visited because the
1725 conditional 'if (i_5 < 5)' always evaluates to false. However,
1726 this extra check does not hurt and may protect against future
1727 changes to VRP that may get into a situation similar to the
1728 NULL pointer dereference example.
1730 Note that these compatibility tests are only needed when dealing
1731 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1732 are both anti-ranges, they will always be compatible, because two
1733 anti-ranges will always have a non-empty intersection. */
1735 var_vr = get_value_range (var);
1737 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1738 ranges or anti-ranges. */
1739 if (vr_p->type == VR_VARYING
1740 || vr_p->type == VR_UNDEFINED
1741 || var_vr->type == VR_VARYING
1742 || var_vr->type == VR_UNDEFINED
1743 || symbolic_range_p (vr_p)
1744 || symbolic_range_p (var_vr))
1747 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1749 /* If the two ranges have a non-empty intersection, we can
1750 refine the resulting range. Since the assert expression
1751 creates an equivalency and at the same time it asserts a
1752 predicate, we can take the intersection of the two ranges to
1753 get better precision. */
1754 if (value_ranges_intersect_p (var_vr, vr_p))
1756 /* Use the larger of the two minimums. */
1757 if (compare_values (vr_p->min, var_vr->min) == -1)
1762 /* Use the smaller of the two maximums. */
1763 if (compare_values (vr_p->max, var_vr->max) == 1)
1768 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1772 /* The two ranges do not intersect, set the new range to
1773 VARYING, because we will not be able to do anything
1774 meaningful with it. */
1775 set_value_range_to_varying (vr_p);
1778 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1779 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1781 /* A range and an anti-range will cancel each other only if
1782 their ends are the same. For instance, in the example above,
1783 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1784 so VR_P should be set to VR_VARYING. */
1785 if (compare_values (var_vr->min, vr_p->min) == 0
1786 && compare_values (var_vr->max, vr_p->max) == 0)
1787 set_value_range_to_varying (vr_p);
1790 tree min, max, anti_min, anti_max, real_min, real_max;
1793 /* We want to compute the logical AND of the two ranges;
1794 there are three cases to consider.
1797 1. The VR_ANTI_RANGE range is completely within the
1798 VR_RANGE and the endpoints of the ranges are
1799 different. In that case the resulting range
1800 should be whichever range is more precise.
1801 Typically that will be the VR_RANGE.
1803 2. The VR_ANTI_RANGE is completely disjoint from
1804 the VR_RANGE. In this case the resulting range
1805 should be the VR_RANGE.
1807 3. There is some overlap between the VR_ANTI_RANGE
1810 3a. If the high limit of the VR_ANTI_RANGE resides
1811 within the VR_RANGE, then the result is a new
1812 VR_RANGE starting at the high limit of the
1813 VR_ANTI_RANGE + 1 and extending to the
1814 high limit of the original VR_RANGE.
1816 3b. If the low limit of the VR_ANTI_RANGE resides
1817 within the VR_RANGE, then the result is a new
1818 VR_RANGE starting at the low limit of the original
1819 VR_RANGE and extending to the low limit of the
1820 VR_ANTI_RANGE - 1. */
1821 if (vr_p->type == VR_ANTI_RANGE)
1823 anti_min = vr_p->min;
1824 anti_max = vr_p->max;
1825 real_min = var_vr->min;
1826 real_max = var_vr->max;
1830 anti_min = var_vr->min;
1831 anti_max = var_vr->max;
1832 real_min = vr_p->min;
1833 real_max = vr_p->max;
1837 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1838 not including any endpoints. */
1839 if (compare_values (anti_max, real_max) == -1
1840 && compare_values (anti_min, real_min) == 1)
1842 /* If the range is covering the whole valid range of
1843 the type keep the anti-range. */
1844 if (!vrp_val_is_min (real_min)
1845 || !vrp_val_is_max (real_max))
1846 set_value_range (vr_p, VR_RANGE, real_min,
1847 real_max, vr_p->equiv);
1849 /* Case 2, VR_ANTI_RANGE completely disjoint from
1851 else if (compare_values (anti_min, real_max) == 1
1852 || compare_values (anti_max, real_min) == -1)
1854 set_value_range (vr_p, VR_RANGE, real_min,
1855 real_max, vr_p->equiv);
1857 /* Case 3a, the anti-range extends into the low
1858 part of the real range. Thus creating a new
1859 low for the real range. */
1860 else if (((cmp = compare_values (anti_max, real_min)) == 1
1862 && compare_values (anti_max, real_max) == -1)
1864 gcc_assert (!is_positive_overflow_infinity (anti_max));
1865 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1866 && vrp_val_is_max (anti_max))
1868 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1870 set_value_range_to_varying (vr_p);
1873 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1875 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1876 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1878 build_int_cst (TREE_TYPE (var_vr->min), 1));
1880 min = fold_build_pointer_plus_hwi (anti_max, 1);
1882 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1884 /* Case 3b, the anti-range extends into the high
1885 part of the real range. Thus creating a new
1886 higher for the real range. */
1887 else if (compare_values (anti_min, real_min) == 1
1888 && ((cmp = compare_values (anti_min, real_max)) == -1
1891 gcc_assert (!is_negative_overflow_infinity (anti_min));
1892 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1893 && vrp_val_is_min (anti_min))
1895 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1897 set_value_range_to_varying (vr_p);
1900 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1902 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1903 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1905 build_int_cst (TREE_TYPE (var_vr->min), 1));
1907 max = fold_build_pointer_plus_hwi (anti_min, -1);
1909 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1916 /* Extract range information from SSA name VAR and store it in VR. If
1917 VAR has an interesting range, use it. Otherwise, create the
1918 range [VAR, VAR] and return it. This is useful in situations where
1919 we may have conditionals testing values of VARYING names. For
1926 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1930 extract_range_from_ssa_name (value_range_t *vr, tree var)
1932 value_range_t *var_vr = get_value_range (var);
1934 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1935 copy_value_range (vr, var_vr);
1937 set_value_range (vr, VR_RANGE, var, var, NULL);
1939 add_equivalence (&vr->equiv, var);
1943 /* Wrapper around int_const_binop. If the operation overflows and we
1944 are not using wrapping arithmetic, then adjust the result to be
1945 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1946 NULL_TREE if we need to use an overflow infinity representation but
1947 the type does not support it. */
1950 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1954 res = int_const_binop (code, val1, val2);
1956 /* If we are using unsigned arithmetic, operate symbolically
1957 on -INF and +INF as int_const_binop only handles signed overflow. */
1958 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1960 int checkz = compare_values (res, val1);
1961 bool overflow = false;
1963 /* Ensure that res = val1 [+*] val2 >= val1
1964 or that res = val1 - val2 <= val1. */
1965 if ((code == PLUS_EXPR
1966 && !(checkz == 1 || checkz == 0))
1967 || (code == MINUS_EXPR
1968 && !(checkz == 0 || checkz == -1)))
1972 /* Checking for multiplication overflow is done by dividing the
1973 output of the multiplication by the first input of the
1974 multiplication. If the result of that division operation is
1975 not equal to the second input of the multiplication, then the
1976 multiplication overflowed. */
1977 else if (code == MULT_EXPR && !integer_zerop (val1))
1979 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1982 int check = compare_values (tmp, val2);
1990 res = copy_node (res);
1991 TREE_OVERFLOW (res) = 1;
1995 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1996 /* If the singed operation wraps then int_const_binop has done
1997 everything we want. */
1999 else if ((TREE_OVERFLOW (res)
2000 && !TREE_OVERFLOW (val1)
2001 && !TREE_OVERFLOW (val2))
2002 || is_overflow_infinity (val1)
2003 || is_overflow_infinity (val2))
2005 /* If the operation overflowed but neither VAL1 nor VAL2 are
2006 overflown, return -INF or +INF depending on the operation
2007 and the combination of signs of the operands. */
2008 int sgn1 = tree_int_cst_sgn (val1);
2009 int sgn2 = tree_int_cst_sgn (val2);
2011 if (needs_overflow_infinity (TREE_TYPE (res))
2012 && !supports_overflow_infinity (TREE_TYPE (res)))
2015 /* We have to punt on adding infinities of different signs,
2016 since we can't tell what the sign of the result should be.
2017 Likewise for subtracting infinities of the same sign. */
2018 if (((code == PLUS_EXPR && sgn1 != sgn2)
2019 || (code == MINUS_EXPR && sgn1 == sgn2))
2020 && is_overflow_infinity (val1)
2021 && is_overflow_infinity (val2))
2024 /* Don't try to handle division or shifting of infinities. */
2025 if ((code == TRUNC_DIV_EXPR
2026 || code == FLOOR_DIV_EXPR
2027 || code == CEIL_DIV_EXPR
2028 || code == EXACT_DIV_EXPR
2029 || code == ROUND_DIV_EXPR
2030 || code == RSHIFT_EXPR)
2031 && (is_overflow_infinity (val1)
2032 || is_overflow_infinity (val2)))
2035 /* Notice that we only need to handle the restricted set of
2036 operations handled by extract_range_from_binary_expr.
2037 Among them, only multiplication, addition and subtraction
2038 can yield overflow without overflown operands because we
2039 are working with integral types only... except in the
2040 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2041 for division too. */
2043 /* For multiplication, the sign of the overflow is given
2044 by the comparison of the signs of the operands. */
2045 if ((code == MULT_EXPR && sgn1 == sgn2)
2046 /* For addition, the operands must be of the same sign
2047 to yield an overflow. Its sign is therefore that
2048 of one of the operands, for example the first. For
2049 infinite operands X + -INF is negative, not positive. */
2050 || (code == PLUS_EXPR
2052 ? !is_negative_overflow_infinity (val2)
2053 : is_positive_overflow_infinity (val2)))
2054 /* For subtraction, non-infinite operands must be of
2055 different signs to yield an overflow. Its sign is
2056 therefore that of the first operand or the opposite of
2057 that of the second operand. A first operand of 0 counts
2058 as positive here, for the corner case 0 - (-INF), which
2059 overflows, but must yield +INF. For infinite operands 0
2060 - INF is negative, not positive. */
2061 || (code == MINUS_EXPR
2063 ? !is_positive_overflow_infinity (val2)
2064 : is_negative_overflow_infinity (val2)))
2065 /* We only get in here with positive shift count, so the
2066 overflow direction is the same as the sign of val1.
2067 Actually rshift does not overflow at all, but we only
2068 handle the case of shifting overflowed -INF and +INF. */
2069 || (code == RSHIFT_EXPR
2071 /* For division, the only case is -INF / -1 = +INF. */
2072 || code == TRUNC_DIV_EXPR
2073 || code == FLOOR_DIV_EXPR
2074 || code == CEIL_DIV_EXPR
2075 || code == EXACT_DIV_EXPR
2076 || code == ROUND_DIV_EXPR)
2077 return (needs_overflow_infinity (TREE_TYPE (res))
2078 ? positive_overflow_infinity (TREE_TYPE (res))
2079 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2081 return (needs_overflow_infinity (TREE_TYPE (res))
2082 ? negative_overflow_infinity (TREE_TYPE (res))
2083 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2090 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
2091 bitmask if some bit is unset, it means for all numbers in the range
2092 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
2093 bitmask if some bit is set, it means for all numbers in the range
2094 the bit is 1, otherwise it might be 0 or 1. */
2097 zero_nonzero_bits_from_vr (value_range_t *vr, double_int *may_be_nonzero,
2098 double_int *must_be_nonzero)
2100 if (range_int_cst_p (vr))
2102 if (range_int_cst_singleton_p (vr))
2104 *may_be_nonzero = tree_to_double_int (vr->min);
2105 *must_be_nonzero = *may_be_nonzero;
2108 if (tree_int_cst_sgn (vr->min) >= 0)
2110 double_int dmin = tree_to_double_int (vr->min);
2111 double_int dmax = tree_to_double_int (vr->max);
2112 double_int xor_mask = double_int_xor (dmin, dmax);
2113 *may_be_nonzero = double_int_ior (dmin, dmax);
2114 *must_be_nonzero = double_int_and (dmin, dmax);
2115 if (xor_mask.high != 0)
2117 unsigned HOST_WIDE_INT mask
2118 = ((unsigned HOST_WIDE_INT) 1
2119 << floor_log2 (xor_mask.high)) - 1;
2120 may_be_nonzero->low = ALL_ONES;
2121 may_be_nonzero->high |= mask;
2122 must_be_nonzero->low = 0;
2123 must_be_nonzero->high &= ~mask;
2125 else if (xor_mask.low != 0)
2127 unsigned HOST_WIDE_INT mask
2128 = ((unsigned HOST_WIDE_INT) 1
2129 << floor_log2 (xor_mask.low)) - 1;
2130 may_be_nonzero->low |= mask;
2131 must_be_nonzero->low &= ~mask;
2136 may_be_nonzero->low = ALL_ONES;
2137 may_be_nonzero->high = ALL_ONES;
2138 must_be_nonzero->low = 0;
2139 must_be_nonzero->high = 0;
2144 /* Extract range information from a binary expression EXPR based on
2145 the ranges of each of its operands and the expression code. */
2148 extract_range_from_binary_expr (value_range_t *vr,
2149 enum tree_code code,
2150 tree expr_type, tree op0, tree op1)
2152 enum value_range_type type;
2155 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2156 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2158 /* Not all binary expressions can be applied to ranges in a
2159 meaningful way. Handle only arithmetic operations. */
2160 if (code != PLUS_EXPR
2161 && code != MINUS_EXPR
2162 && code != POINTER_PLUS_EXPR
2163 && code != MULT_EXPR
2164 && code != TRUNC_DIV_EXPR
2165 && code != FLOOR_DIV_EXPR
2166 && code != CEIL_DIV_EXPR
2167 && code != EXACT_DIV_EXPR
2168 && code != ROUND_DIV_EXPR
2169 && code != TRUNC_MOD_EXPR
2170 && code != RSHIFT_EXPR
2173 && code != BIT_AND_EXPR
2174 && code != BIT_IOR_EXPR
2175 && code != TRUTH_AND_EXPR
2176 && code != TRUTH_OR_EXPR)
2178 /* We can still do constant propagation here. */
2179 tree const_op0 = op_with_constant_singleton_value_range (op0);
2180 tree const_op1 = op_with_constant_singleton_value_range (op1);
2181 if (const_op0 || const_op1)
2183 tree tem = fold_binary (code, expr_type,
2184 const_op0 ? const_op0 : op0,
2185 const_op1 ? const_op1 : op1);
2187 && is_gimple_min_invariant (tem)
2188 && !is_overflow_infinity (tem))
2190 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2194 set_value_range_to_varying (vr);
2198 /* Get value ranges for each operand. For constant operands, create
2199 a new value range with the operand to simplify processing. */
2200 if (TREE_CODE (op0) == SSA_NAME)
2201 vr0 = *(get_value_range (op0));
2202 else if (is_gimple_min_invariant (op0))
2203 set_value_range_to_value (&vr0, op0, NULL);
2205 set_value_range_to_varying (&vr0);
2207 if (TREE_CODE (op1) == SSA_NAME)
2208 vr1 = *(get_value_range (op1));
2209 else if (is_gimple_min_invariant (op1))
2210 set_value_range_to_value (&vr1, op1, NULL);
2212 set_value_range_to_varying (&vr1);
2214 /* If either range is UNDEFINED, so is the result. */
2215 if (vr0.type == VR_UNDEFINED || vr1.type == VR_UNDEFINED)
2217 set_value_range_to_undefined (vr);
2221 /* The type of the resulting value range defaults to VR0.TYPE. */
2224 /* Refuse to operate on VARYING ranges, ranges of different kinds
2225 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2226 because we may be able to derive a useful range even if one of
2227 the operands is VR_VARYING or symbolic range. Similarly for
2228 divisions. TODO, we may be able to derive anti-ranges in
2230 if (code != BIT_AND_EXPR
2231 && code != TRUTH_AND_EXPR
2232 && code != TRUTH_OR_EXPR
2233 && code != TRUNC_DIV_EXPR
2234 && code != FLOOR_DIV_EXPR
2235 && code != CEIL_DIV_EXPR
2236 && code != EXACT_DIV_EXPR
2237 && code != ROUND_DIV_EXPR
2238 && code != TRUNC_MOD_EXPR
2239 && (vr0.type == VR_VARYING
2240 || vr1.type == VR_VARYING
2241 || vr0.type != vr1.type
2242 || symbolic_range_p (&vr0)
2243 || symbolic_range_p (&vr1)))
2245 set_value_range_to_varying (vr);
2249 /* Now evaluate the expression to determine the new range. */
2250 if (POINTER_TYPE_P (expr_type)
2251 || POINTER_TYPE_P (TREE_TYPE (op0))
2252 || POINTER_TYPE_P (TREE_TYPE (op1)))
2254 if (code == MIN_EXPR || code == MAX_EXPR)
2256 /* For MIN/MAX expressions with pointers, we only care about
2257 nullness, if both are non null, then the result is nonnull.
2258 If both are null, then the result is null. Otherwise they
2260 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2261 set_value_range_to_nonnull (vr, expr_type);
2262 else if (range_is_null (&vr0) && range_is_null (&vr1))
2263 set_value_range_to_null (vr, expr_type);
2265 set_value_range_to_varying (vr);
2269 if (code == POINTER_PLUS_EXPR)
2271 /* For pointer types, we are really only interested in asserting
2272 whether the expression evaluates to non-NULL. */
2273 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2274 set_value_range_to_nonnull (vr, expr_type);
2275 else if (range_is_null (&vr0) && range_is_null (&vr1))
2276 set_value_range_to_null (vr, expr_type);
2278 set_value_range_to_varying (vr);
2280 else if (code == BIT_AND_EXPR)
2282 /* For pointer types, we are really only interested in asserting
2283 whether the expression evaluates to non-NULL. */
2284 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2285 set_value_range_to_nonnull (vr, expr_type);
2286 else if (range_is_null (&vr0) || range_is_null (&vr1))
2287 set_value_range_to_null (vr, expr_type);
2289 set_value_range_to_varying (vr);
2297 /* For integer ranges, apply the operation to each end of the
2298 range and see what we end up with. */
2299 if (code == TRUTH_AND_EXPR
2300 || code == TRUTH_OR_EXPR)
2302 /* If one of the operands is zero, we know that the whole
2303 expression evaluates zero. */
2304 if (code == TRUTH_AND_EXPR
2305 && ((vr0.type == VR_RANGE
2306 && integer_zerop (vr0.min)
2307 && integer_zerop (vr0.max))
2308 || (vr1.type == VR_RANGE
2309 && integer_zerop (vr1.min)
2310 && integer_zerop (vr1.max))))
2313 min = max = build_int_cst (expr_type, 0);
2315 /* If one of the operands is one, we know that the whole
2316 expression evaluates one. */
2317 else if (code == TRUTH_OR_EXPR
2318 && ((vr0.type == VR_RANGE
2319 && integer_onep (vr0.min)
2320 && integer_onep (vr0.max))
2321 || (vr1.type == VR_RANGE
2322 && integer_onep (vr1.min)
2323 && integer_onep (vr1.max))))
2326 min = max = build_int_cst (expr_type, 1);
2328 else if (vr0.type != VR_VARYING
2329 && vr1.type != VR_VARYING
2330 && vr0.type == vr1.type
2331 && !symbolic_range_p (&vr0)
2332 && !overflow_infinity_range_p (&vr0)
2333 && !symbolic_range_p (&vr1)
2334 && !overflow_infinity_range_p (&vr1))
2336 /* Boolean expressions cannot be folded with int_const_binop. */
2337 min = fold_binary (code, expr_type, vr0.min, vr1.min);
2338 max = fold_binary (code, expr_type, vr0.max, vr1.max);
2342 /* The result of a TRUTH_*_EXPR is always true or false. */
2343 set_value_range_to_truthvalue (vr, expr_type);
2347 else if (code == PLUS_EXPR
2349 || code == MAX_EXPR)
2351 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2352 VR_VARYING. It would take more effort to compute a precise
2353 range for such a case. For example, if we have op0 == 1 and
2354 op1 == -1 with their ranges both being ~[0,0], we would have
2355 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2356 Note that we are guaranteed to have vr0.type == vr1.type at
2358 if (vr0.type == VR_ANTI_RANGE)
2360 if (code == PLUS_EXPR)
2362 set_value_range_to_varying (vr);
2365 /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs,
2366 the resulting VR_ANTI_RANGE is the same - intersection
2367 of the two ranges. */
2368 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2369 max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max);
2373 /* For operations that make the resulting range directly
2374 proportional to the original ranges, apply the operation to
2375 the same end of each range. */
2376 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2377 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2380 /* If both additions overflowed the range kind is still correct.
2381 This happens regularly with subtracting something in unsigned
2383 ??? See PR30318 for all the cases we do not handle. */
2384 if (code == PLUS_EXPR
2385 && (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2386 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2388 min = build_int_cst_wide (TREE_TYPE (min),
2389 TREE_INT_CST_LOW (min),
2390 TREE_INT_CST_HIGH (min));
2391 max = build_int_cst_wide (TREE_TYPE (max),
2392 TREE_INT_CST_LOW (max),
2393 TREE_INT_CST_HIGH (max));
2396 else if (code == MULT_EXPR
2397 || code == TRUNC_DIV_EXPR
2398 || code == FLOOR_DIV_EXPR
2399 || code == CEIL_DIV_EXPR
2400 || code == EXACT_DIV_EXPR
2401 || code == ROUND_DIV_EXPR
2402 || code == RSHIFT_EXPR)
2408 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2409 drop to VR_VARYING. It would take more effort to compute a
2410 precise range for such a case. For example, if we have
2411 op0 == 65536 and op1 == 65536 with their ranges both being
2412 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2413 we cannot claim that the product is in ~[0,0]. Note that we
2414 are guaranteed to have vr0.type == vr1.type at this
2416 if (code == MULT_EXPR
2417 && vr0.type == VR_ANTI_RANGE
2418 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
2420 set_value_range_to_varying (vr);
2424 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2425 then drop to VR_VARYING. Outside of this range we get undefined
2426 behavior from the shift operation. We cannot even trust
2427 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2428 shifts, and the operation at the tree level may be widened. */
2429 if (code == RSHIFT_EXPR)
2431 if (vr1.type == VR_ANTI_RANGE
2432 || !vrp_expr_computes_nonnegative (op1, &sop)
2434 (build_int_cst (TREE_TYPE (vr1.max),
2435 TYPE_PRECISION (expr_type) - 1),
2438 set_value_range_to_varying (vr);
2443 else if ((code == TRUNC_DIV_EXPR
2444 || code == FLOOR_DIV_EXPR
2445 || code == CEIL_DIV_EXPR
2446 || code == EXACT_DIV_EXPR
2447 || code == ROUND_DIV_EXPR)
2448 && (vr0.type != VR_RANGE || symbolic_range_p (&vr0)))
2450 /* For division, if op1 has VR_RANGE but op0 does not, something
2451 can be deduced just from that range. Say [min, max] / [4, max]
2452 gives [min / 4, max / 4] range. */
2453 if (vr1.type == VR_RANGE
2454 && !symbolic_range_p (&vr1)
2455 && !range_includes_zero_p (&vr1))
2457 vr0.type = type = VR_RANGE;
2458 vr0.min = vrp_val_min (TREE_TYPE (op0));
2459 vr0.max = vrp_val_max (TREE_TYPE (op1));
2463 set_value_range_to_varying (vr);
2468 /* For divisions, if flag_non_call_exceptions is true, we must
2469 not eliminate a division by zero. */
2470 if ((code == TRUNC_DIV_EXPR
2471 || code == FLOOR_DIV_EXPR
2472 || code == CEIL_DIV_EXPR
2473 || code == EXACT_DIV_EXPR
2474 || code == ROUND_DIV_EXPR)
2475 && cfun->can_throw_non_call_exceptions
2476 && (vr1.type != VR_RANGE
2477 || symbolic_range_p (&vr1)
2478 || range_includes_zero_p (&vr1)))
2480 set_value_range_to_varying (vr);
2484 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2485 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2487 if ((code == TRUNC_DIV_EXPR
2488 || code == FLOOR_DIV_EXPR
2489 || code == CEIL_DIV_EXPR
2490 || code == EXACT_DIV_EXPR
2491 || code == ROUND_DIV_EXPR)
2492 && vr0.type == VR_RANGE
2493 && (vr1.type != VR_RANGE
2494 || symbolic_range_p (&vr1)
2495 || range_includes_zero_p (&vr1)))
2497 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2503 if (vrp_expr_computes_nonnegative (op1, &sop) && !sop)
2505 /* For unsigned division or when divisor is known
2506 to be non-negative, the range has to cover
2507 all numbers from 0 to max for positive max
2508 and all numbers from min to 0 for negative min. */
2509 cmp = compare_values (vr0.max, zero);
2512 else if (cmp == 0 || cmp == 1)
2516 cmp = compare_values (vr0.min, zero);
2519 else if (cmp == 0 || cmp == -1)
2526 /* Otherwise the range is -max .. max or min .. -min
2527 depending on which bound is bigger in absolute value,
2528 as the division can change the sign. */
2529 abs_extent_range (vr, vr0.min, vr0.max);
2532 if (type == VR_VARYING)
2534 set_value_range_to_varying (vr);
2539 /* Multiplications and divisions are a bit tricky to handle,
2540 depending on the mix of signs we have in the two ranges, we
2541 need to operate on different values to get the minimum and
2542 maximum values for the new range. One approach is to figure
2543 out all the variations of range combinations and do the
2546 However, this involves several calls to compare_values and it
2547 is pretty convoluted. It's simpler to do the 4 operations
2548 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2549 MAX1) and then figure the smallest and largest values to form
2553 gcc_assert ((vr0.type == VR_RANGE
2554 || (code == MULT_EXPR && vr0.type == VR_ANTI_RANGE))
2555 && vr0.type == vr1.type);
2557 /* Compute the 4 cross operations. */
2559 val[0] = vrp_int_const_binop (code, vr0.min, vr1.min);
2560 if (val[0] == NULL_TREE)
2563 if (vr1.max == vr1.min)
2567 val[1] = vrp_int_const_binop (code, vr0.min, vr1.max);
2568 if (val[1] == NULL_TREE)
2572 if (vr0.max == vr0.min)
2576 val[2] = vrp_int_const_binop (code, vr0.max, vr1.min);
2577 if (val[2] == NULL_TREE)
2581 if (vr0.min == vr0.max || vr1.min == vr1.max)
2585 val[3] = vrp_int_const_binop (code, vr0.max, vr1.max);
2586 if (val[3] == NULL_TREE)
2592 set_value_range_to_varying (vr);
2596 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2600 for (i = 1; i < 4; i++)
2602 if (!is_gimple_min_invariant (min)
2603 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2604 || !is_gimple_min_invariant (max)
2605 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2610 if (!is_gimple_min_invariant (val[i])
2611 || (TREE_OVERFLOW (val[i])
2612 && !is_overflow_infinity (val[i])))
2614 /* If we found an overflowed value, set MIN and MAX
2615 to it so that we set the resulting range to
2621 if (compare_values (val[i], min) == -1)
2624 if (compare_values (val[i], max) == 1)
2630 else if (code == TRUNC_MOD_EXPR)
2633 if (vr1.type != VR_RANGE
2634 || symbolic_range_p (&vr1)
2635 || range_includes_zero_p (&vr1)
2636 || vrp_val_is_min (vr1.min))
2638 set_value_range_to_varying (vr);
2642 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2643 max = fold_unary_to_constant (ABS_EXPR, TREE_TYPE (vr1.min), vr1.min);
2644 if (tree_int_cst_lt (max, vr1.max))
2646 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2647 /* If the dividend is non-negative the modulus will be
2648 non-negative as well. */
2649 if (TYPE_UNSIGNED (TREE_TYPE (max))
2650 || (vrp_expr_computes_nonnegative (op0, &sop) && !sop))
2651 min = build_int_cst (TREE_TYPE (max), 0);
2653 min = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (max), max);
2655 else if (code == MINUS_EXPR)
2657 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2658 VR_VARYING. It would take more effort to compute a precise
2659 range for such a case. For example, if we have op0 == 1 and
2660 op1 == 1 with their ranges both being ~[0,0], we would have
2661 op0 - op1 == 0, so we cannot claim that the difference is in
2662 ~[0,0]. Note that we are guaranteed to have
2663 vr0.type == vr1.type at this point. */
2664 if (vr0.type == VR_ANTI_RANGE)
2666 set_value_range_to_varying (vr);
2670 /* For MINUS_EXPR, apply the operation to the opposite ends of
2672 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2673 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2675 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR)
2677 bool vr0_int_cst_singleton_p, vr1_int_cst_singleton_p;
2678 bool int_cst_range0, int_cst_range1;
2679 double_int may_be_nonzero0, may_be_nonzero1;
2680 double_int must_be_nonzero0, must_be_nonzero1;
2682 vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0);
2683 vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1);
2684 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
2686 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
2690 if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p)
2691 min = max = int_const_binop (code, vr0.max, vr1.max);
2692 else if (!int_cst_range0 && !int_cst_range1)
2694 set_value_range_to_varying (vr);
2697 else if (code == BIT_AND_EXPR)
2699 min = double_int_to_tree (expr_type,
2700 double_int_and (must_be_nonzero0,
2702 max = double_int_to_tree (expr_type,
2703 double_int_and (may_be_nonzero0,
2705 if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0)
2707 if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0)
2709 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2711 if (min == NULL_TREE)
2712 min = build_int_cst (expr_type, 0);
2713 if (max == NULL_TREE || tree_int_cst_lt (vr0.max, max))
2716 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2718 if (min == NULL_TREE)
2719 min = build_int_cst (expr_type, 0);
2720 if (max == NULL_TREE || tree_int_cst_lt (vr1.max, max))
2724 else if (!int_cst_range0
2726 || tree_int_cst_sgn (vr0.min) < 0
2727 || tree_int_cst_sgn (vr1.min) < 0)
2729 set_value_range_to_varying (vr);
2734 min = double_int_to_tree (expr_type,
2735 double_int_ior (must_be_nonzero0,
2737 max = double_int_to_tree (expr_type,
2738 double_int_ior (may_be_nonzero0,
2740 if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0)
2743 min = vrp_int_const_binop (MAX_EXPR, min, vr0.min);
2744 if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0)
2746 min = vrp_int_const_binop (MAX_EXPR, min, vr1.min);
2752 /* If either MIN or MAX overflowed, then set the resulting range to
2753 VARYING. But we do accept an overflow infinity
2755 if (min == NULL_TREE
2756 || !is_gimple_min_invariant (min)
2757 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2759 || !is_gimple_min_invariant (max)
2760 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2762 set_value_range_to_varying (vr);
2768 2) [-INF, +-INF(OVF)]
2769 3) [+-INF(OVF), +INF]
2770 4) [+-INF(OVF), +-INF(OVF)]
2771 We learn nothing when we have INF and INF(OVF) on both sides.
2772 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2774 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2775 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2777 set_value_range_to_varying (vr);
2781 cmp = compare_values (min, max);
2782 if (cmp == -2 || cmp == 1)
2784 /* If the new range has its limits swapped around (MIN > MAX),
2785 then the operation caused one of them to wrap around, mark
2786 the new range VARYING. */
2787 set_value_range_to_varying (vr);
2790 set_value_range (vr, type, min, max, NULL);
2794 /* Extract range information from a unary expression EXPR based on
2795 the range of its operand and the expression code. */
2798 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
2799 tree type, tree op0)
2803 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2805 /* Refuse to operate on certain unary expressions for which we
2806 cannot easily determine a resulting range. */
2807 if (code == FIX_TRUNC_EXPR
2808 || code == FLOAT_EXPR
2809 || code == BIT_NOT_EXPR
2810 || code == CONJ_EXPR)
2812 /* We can still do constant propagation here. */
2813 if ((op0 = op_with_constant_singleton_value_range (op0)) != NULL_TREE)
2815 tree tem = fold_unary (code, type, op0);
2817 && is_gimple_min_invariant (tem)
2818 && !is_overflow_infinity (tem))
2820 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2824 set_value_range_to_varying (vr);
2828 /* Get value ranges for the operand. For constant operands, create
2829 a new value range with the operand to simplify processing. */
2830 if (TREE_CODE (op0) == SSA_NAME)
2831 vr0 = *(get_value_range (op0));
2832 else if (is_gimple_min_invariant (op0))
2833 set_value_range_to_value (&vr0, op0, NULL);
2835 set_value_range_to_varying (&vr0);
2837 /* If VR0 is UNDEFINED, so is the result. */
2838 if (vr0.type == VR_UNDEFINED)
2840 set_value_range_to_undefined (vr);
2844 /* Refuse to operate on symbolic ranges, or if neither operand is
2845 a pointer or integral type. */
2846 if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0))
2847 && !POINTER_TYPE_P (TREE_TYPE (op0)))
2848 || (vr0.type != VR_VARYING
2849 && symbolic_range_p (&vr0)))
2851 set_value_range_to_varying (vr);
2855 /* If the expression involves pointers, we are only interested in
2856 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2857 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (TREE_TYPE (op0)))
2862 if (range_is_nonnull (&vr0)
2863 || (tree_unary_nonzero_warnv_p (code, type, op0, &sop)
2865 set_value_range_to_nonnull (vr, type);
2866 else if (range_is_null (&vr0))
2867 set_value_range_to_null (vr, type);
2869 set_value_range_to_varying (vr);
2874 /* Handle unary expressions on integer ranges. */
2875 if (CONVERT_EXPR_CODE_P (code)
2876 && INTEGRAL_TYPE_P (type)
2877 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2879 tree inner_type = TREE_TYPE (op0);
2880 tree outer_type = type;
2882 /* If VR0 is varying and we increase the type precision, assume
2883 a full range for the following transformation. */
2884 if (vr0.type == VR_VARYING
2885 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2887 vr0.type = VR_RANGE;
2888 vr0.min = TYPE_MIN_VALUE (inner_type);
2889 vr0.max = TYPE_MAX_VALUE (inner_type);
2892 /* If VR0 is a constant range or anti-range and the conversion is
2893 not truncating we can convert the min and max values and
2894 canonicalize the resulting range. Otherwise we can do the
2895 conversion if the size of the range is less than what the
2896 precision of the target type can represent and the range is
2897 not an anti-range. */
2898 if ((vr0.type == VR_RANGE
2899 || vr0.type == VR_ANTI_RANGE)
2900 && TREE_CODE (vr0.min) == INTEGER_CST
2901 && TREE_CODE (vr0.max) == INTEGER_CST
2902 && (!is_overflow_infinity (vr0.min)
2903 || (vr0.type == VR_RANGE
2904 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2905 && needs_overflow_infinity (outer_type)
2906 && supports_overflow_infinity (outer_type)))
2907 && (!is_overflow_infinity (vr0.max)
2908 || (vr0.type == VR_RANGE
2909 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2910 && needs_overflow_infinity (outer_type)
2911 && supports_overflow_infinity (outer_type)))
2912 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2913 || (vr0.type == VR_RANGE
2914 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2915 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
2916 size_int (TYPE_PRECISION (outer_type)))))))
2918 tree new_min, new_max;
2919 new_min = force_fit_type_double (outer_type,
2920 tree_to_double_int (vr0.min),
2922 new_max = force_fit_type_double (outer_type,
2923 tree_to_double_int (vr0.max),
2925 if (is_overflow_infinity (vr0.min))
2926 new_min = negative_overflow_infinity (outer_type);
2927 if (is_overflow_infinity (vr0.max))
2928 new_max = positive_overflow_infinity (outer_type);
2929 set_and_canonicalize_value_range (vr, vr0.type,
2930 new_min, new_max, NULL);
2934 set_value_range_to_varying (vr);
2938 /* Conversion of a VR_VARYING value to a wider type can result
2939 in a usable range. So wait until after we've handled conversions
2940 before dropping the result to VR_VARYING if we had a source
2941 operand that is VR_VARYING. */
2942 if (vr0.type == VR_VARYING)
2944 set_value_range_to_varying (vr);
2948 /* Apply the operation to each end of the range and see what we end
2950 if (code == NEGATE_EXPR
2951 && !TYPE_UNSIGNED (type))
2953 /* NEGATE_EXPR flips the range around. We need to treat
2954 TYPE_MIN_VALUE specially. */
2955 if (is_positive_overflow_infinity (vr0.max))
2956 min = negative_overflow_infinity (type);
2957 else if (is_negative_overflow_infinity (vr0.max))
2958 min = positive_overflow_infinity (type);
2959 else if (!vrp_val_is_min (vr0.max))
2960 min = fold_unary_to_constant (code, type, vr0.max);
2961 else if (needs_overflow_infinity (type))
2963 if (supports_overflow_infinity (type)
2964 && !is_overflow_infinity (vr0.min)
2965 && !vrp_val_is_min (vr0.min))
2966 min = positive_overflow_infinity (type);
2969 set_value_range_to_varying (vr);
2974 min = TYPE_MIN_VALUE (type);
2976 if (is_positive_overflow_infinity (vr0.min))
2977 max = negative_overflow_infinity (type);
2978 else if (is_negative_overflow_infinity (vr0.min))
2979 max = positive_overflow_infinity (type);
2980 else if (!vrp_val_is_min (vr0.min))
2981 max = fold_unary_to_constant (code, type, vr0.min);
2982 else if (needs_overflow_infinity (type))
2984 if (supports_overflow_infinity (type))
2985 max = positive_overflow_infinity (type);
2988 set_value_range_to_varying (vr);
2993 max = TYPE_MIN_VALUE (type);
2995 else if (code == NEGATE_EXPR
2996 && TYPE_UNSIGNED (type))
2998 if (!range_includes_zero_p (&vr0))
3000 max = fold_unary_to_constant (code, type, vr0.min);
3001 min = fold_unary_to_constant (code, type, vr0.max);
3005 if (range_is_null (&vr0))
3006 set_value_range_to_null (vr, type);
3008 set_value_range_to_varying (vr);
3012 else if (code == ABS_EXPR
3013 && !TYPE_UNSIGNED (type))
3015 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3017 if (!TYPE_OVERFLOW_UNDEFINED (type)
3018 && ((vr0.type == VR_RANGE
3019 && vrp_val_is_min (vr0.min))
3020 || (vr0.type == VR_ANTI_RANGE
3021 && !vrp_val_is_min (vr0.min)
3022 && !range_includes_zero_p (&vr0))))
3024 set_value_range_to_varying (vr);
3028 /* ABS_EXPR may flip the range around, if the original range
3029 included negative values. */
3030 if (is_overflow_infinity (vr0.min))
3031 min = positive_overflow_infinity (type);
3032 else if (!vrp_val_is_min (vr0.min))
3033 min = fold_unary_to_constant (code, type, vr0.min);
3034 else if (!needs_overflow_infinity (type))
3035 min = TYPE_MAX_VALUE (type);
3036 else if (supports_overflow_infinity (type))
3037 min = positive_overflow_infinity (type);
3040 set_value_range_to_varying (vr);
3044 if (is_overflow_infinity (vr0.max))
3045 max = positive_overflow_infinity (type);
3046 else if (!vrp_val_is_min (vr0.max))
3047 max = fold_unary_to_constant (code, type, vr0.max);
3048 else if (!needs_overflow_infinity (type))
3049 max = TYPE_MAX_VALUE (type);
3050 else if (supports_overflow_infinity (type)
3051 /* We shouldn't generate [+INF, +INF] as set_value_range
3052 doesn't like this and ICEs. */
3053 && !is_positive_overflow_infinity (min))
3054 max = positive_overflow_infinity (type);
3057 set_value_range_to_varying (vr);
3061 cmp = compare_values (min, max);
3063 /* If a VR_ANTI_RANGEs contains zero, then we have
3064 ~[-INF, min(MIN, MAX)]. */
3065 if (vr0.type == VR_ANTI_RANGE)
3067 if (range_includes_zero_p (&vr0))
3069 /* Take the lower of the two values. */
3073 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3074 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3075 flag_wrapv is set and the original anti-range doesn't include
3076 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3077 if (TYPE_OVERFLOW_WRAPS (type))
3079 tree type_min_value = TYPE_MIN_VALUE (type);
3081 min = (vr0.min != type_min_value
3082 ? int_const_binop (PLUS_EXPR, type_min_value,
3088 if (overflow_infinity_range_p (&vr0))
3089 min = negative_overflow_infinity (type);
3091 min = TYPE_MIN_VALUE (type);
3096 /* All else has failed, so create the range [0, INF], even for
3097 flag_wrapv since TYPE_MIN_VALUE is in the original
3099 vr0.type = VR_RANGE;
3100 min = build_int_cst (type, 0);
3101 if (needs_overflow_infinity (type))
3103 if (supports_overflow_infinity (type))
3104 max = positive_overflow_infinity (type);
3107 set_value_range_to_varying (vr);
3112 max = TYPE_MAX_VALUE (type);
3116 /* If the range contains zero then we know that the minimum value in the
3117 range will be zero. */
3118 else if (range_includes_zero_p (&vr0))
3122 min = build_int_cst (type, 0);
3126 /* If the range was reversed, swap MIN and MAX. */
3137 /* Otherwise, operate on each end of the range. */
3138 min = fold_unary_to_constant (code, type, vr0.min);
3139 max = fold_unary_to_constant (code, type, vr0.max);
3141 if (needs_overflow_infinity (type))
3143 gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR);
3145 /* If both sides have overflowed, we don't know
3147 if ((is_overflow_infinity (vr0.min)
3148 || TREE_OVERFLOW (min))
3149 && (is_overflow_infinity (vr0.max)
3150 || TREE_OVERFLOW (max)))
3152 set_value_range_to_varying (vr);
3156 if (is_overflow_infinity (vr0.min))
3158 else if (TREE_OVERFLOW (min))
3160 if (supports_overflow_infinity (type))
3161 min = (tree_int_cst_sgn (min) >= 0
3162 ? positive_overflow_infinity (TREE_TYPE (min))
3163 : negative_overflow_infinity (TREE_TYPE (min)));
3166 set_value_range_to_varying (vr);
3171 if (is_overflow_infinity (vr0.max))
3173 else if (TREE_OVERFLOW (max))
3175 if (supports_overflow_infinity (type))
3176 max = (tree_int_cst_sgn (max) >= 0
3177 ? positive_overflow_infinity (TREE_TYPE (max))
3178 : negative_overflow_infinity (TREE_TYPE (max)));
3181 set_value_range_to_varying (vr);
3188 cmp = compare_values (min, max);
3189 if (cmp == -2 || cmp == 1)
3191 /* If the new range has its limits swapped around (MIN > MAX),
3192 then the operation caused one of them to wrap around, mark
3193 the new range VARYING. */
3194 set_value_range_to_varying (vr);
3197 set_value_range (vr, vr0.type, min, max, NULL);
3201 /* Extract range information from a conditional expression EXPR based on
3202 the ranges of each of its operands and the expression code. */
3205 extract_range_from_cond_expr (value_range_t *vr, tree expr)
3208 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3209 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3211 /* Get value ranges for each operand. For constant operands, create
3212 a new value range with the operand to simplify processing. */
3213 op0 = COND_EXPR_THEN (expr);
3214 if (TREE_CODE (op0) == SSA_NAME)
3215 vr0 = *(get_value_range (op0));
3216 else if (is_gimple_min_invariant (op0))
3217 set_value_range_to_value (&vr0, op0, NULL);
3219 set_value_range_to_varying (&vr0);
3221 op1 = COND_EXPR_ELSE (expr);
3222 if (TREE_CODE (op1) == SSA_NAME)
3223 vr1 = *(get_value_range (op1));
3224 else if (is_gimple_min_invariant (op1))
3225 set_value_range_to_value (&vr1, op1, NULL);
3227 set_value_range_to_varying (&vr1);
3229 /* The resulting value range is the union of the operand ranges */
3230 vrp_meet (&vr0, &vr1);
3231 copy_value_range (vr, &vr0);
3235 /* Extract range information from a comparison expression EXPR based
3236 on the range of its operand and the expression code. */
3239 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3240 tree type, tree op0, tree op1)
3245 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3248 /* A disadvantage of using a special infinity as an overflow
3249 representation is that we lose the ability to record overflow
3250 when we don't have an infinity. So we have to ignore a result
3251 which relies on overflow. */
3253 if (val && !is_overflow_infinity (val) && !sop)
3255 /* Since this expression was found on the RHS of an assignment,
3256 its type may be different from _Bool. Convert VAL to EXPR's
3258 val = fold_convert (type, val);
3259 if (is_gimple_min_invariant (val))
3260 set_value_range_to_value (vr, val, vr->equiv);
3262 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3265 /* The result of a comparison is always true or false. */
3266 set_value_range_to_truthvalue (vr, type);
3269 /* Try to derive a nonnegative or nonzero range out of STMT relying
3270 primarily on generic routines in fold in conjunction with range data.
3271 Store the result in *VR */
3274 extract_range_basic (value_range_t *vr, gimple stmt)
3277 tree type = gimple_expr_type (stmt);
3279 if (INTEGRAL_TYPE_P (type)
3280 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3281 set_value_range_to_nonnegative (vr, type,
3282 sop || stmt_overflow_infinity (stmt));
3283 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3285 set_value_range_to_nonnull (vr, type);
3287 set_value_range_to_varying (vr);
3291 /* Try to compute a useful range out of assignment STMT and store it
3295 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3297 enum tree_code code = gimple_assign_rhs_code (stmt);
3299 if (code == ASSERT_EXPR)
3300 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3301 else if (code == SSA_NAME)
3302 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3303 else if (TREE_CODE_CLASS (code) == tcc_binary
3304 || code == TRUTH_AND_EXPR
3305 || code == TRUTH_OR_EXPR
3306 || code == TRUTH_XOR_EXPR)
3307 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3308 gimple_expr_type (stmt),
3309 gimple_assign_rhs1 (stmt),
3310 gimple_assign_rhs2 (stmt));
3311 else if (TREE_CODE_CLASS (code) == tcc_unary)
3312 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3313 gimple_expr_type (stmt),
3314 gimple_assign_rhs1 (stmt));
3315 else if (code == COND_EXPR)
3316 extract_range_from_cond_expr (vr, gimple_assign_rhs1 (stmt));
3317 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3318 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3319 gimple_expr_type (stmt),
3320 gimple_assign_rhs1 (stmt),
3321 gimple_assign_rhs2 (stmt));
3322 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3323 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3324 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3326 set_value_range_to_varying (vr);
3328 if (vr->type == VR_VARYING)
3329 extract_range_basic (vr, stmt);
3332 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3333 would be profitable to adjust VR using scalar evolution information
3334 for VAR. If so, update VR with the new limits. */
3337 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3338 gimple stmt, tree var)
3340 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3341 enum ev_direction dir;
3343 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3344 better opportunities than a regular range, but I'm not sure. */
3345 if (vr->type == VR_ANTI_RANGE)
3348 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3350 /* Like in PR19590, scev can return a constant function. */
3351 if (is_gimple_min_invariant (chrec))
3353 set_value_range_to_value (vr, chrec, vr->equiv);
3357 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3360 init = initial_condition_in_loop_num (chrec, loop->num);
3361 tem = op_with_constant_singleton_value_range (init);
3364 step = evolution_part_in_loop_num (chrec, loop->num);
3365 tem = op_with_constant_singleton_value_range (step);
3369 /* If STEP is symbolic, we can't know whether INIT will be the
3370 minimum or maximum value in the range. Also, unless INIT is
3371 a simple expression, compare_values and possibly other functions
3372 in tree-vrp won't be able to handle it. */
3373 if (step == NULL_TREE
3374 || !is_gimple_min_invariant (step)
3375 || !valid_value_p (init))
3378 dir = scev_direction (chrec);
3379 if (/* Do not adjust ranges if we do not know whether the iv increases
3380 or decreases, ... */
3381 dir == EV_DIR_UNKNOWN
3382 /* ... or if it may wrap. */
3383 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3387 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3388 negative_overflow_infinity and positive_overflow_infinity,
3389 because we have concluded that the loop probably does not
3392 type = TREE_TYPE (var);
3393 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3394 tmin = lower_bound_in_type (type, type);
3396 tmin = TYPE_MIN_VALUE (type);
3397 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3398 tmax = upper_bound_in_type (type, type);
3400 tmax = TYPE_MAX_VALUE (type);
3402 /* Try to use estimated number of iterations for the loop to constrain the
3403 final value in the evolution. */
3404 if (TREE_CODE (step) == INTEGER_CST
3405 && is_gimple_val (init)
3406 && (TREE_CODE (init) != SSA_NAME
3407 || get_value_range (init)->type == VR_RANGE))
3411 if (estimated_loop_iterations (loop, true, &nit))
3413 value_range_t maxvr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3415 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3418 dtmp = double_int_mul_with_sign (tree_to_double_int (step), nit,
3419 unsigned_p, &overflow);
3420 /* If the multiplication overflowed we can't do a meaningful
3421 adjustment. Likewise if the result doesn't fit in the type
3422 of the induction variable. For a signed type we have to
3423 check whether the result has the expected signedness which
3424 is that of the step as number of iterations is unsigned. */
3426 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3428 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3430 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3431 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3432 TREE_TYPE (init), init, tem);
3433 /* Likewise if the addition did. */
3434 if (maxvr.type == VR_RANGE)
3443 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3448 /* For VARYING or UNDEFINED ranges, just about anything we get
3449 from scalar evolutions should be better. */
3451 if (dir == EV_DIR_DECREASES)
3456 /* If we would create an invalid range, then just assume we
3457 know absolutely nothing. This may be over-conservative,
3458 but it's clearly safe, and should happen only in unreachable
3459 parts of code, or for invalid programs. */
3460 if (compare_values (min, max) == 1)
3463 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3465 else if (vr->type == VR_RANGE)
3470 if (dir == EV_DIR_DECREASES)
3472 /* INIT is the maximum value. If INIT is lower than VR->MAX
3473 but no smaller than VR->MIN, set VR->MAX to INIT. */
3474 if (compare_values (init, max) == -1)
3477 /* According to the loop information, the variable does not
3478 overflow. If we think it does, probably because of an
3479 overflow due to arithmetic on a different INF value,
3481 if (is_negative_overflow_infinity (min)
3482 || compare_values (min, tmin) == -1)
3488 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3489 if (compare_values (init, min) == 1)
3492 if (is_positive_overflow_infinity (max)
3493 || compare_values (tmax, max) == -1)
3497 /* If we just created an invalid range with the minimum
3498 greater than the maximum, we fail conservatively.
3499 This should happen only in unreachable
3500 parts of code, or for invalid programs. */
3501 if (compare_values (min, max) == 1)
3504 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3508 /* Return true if VAR may overflow at STMT. This checks any available
3509 loop information to see if we can determine that VAR does not
3513 vrp_var_may_overflow (tree var, gimple stmt)
3516 tree chrec, init, step;
3518 if (current_loops == NULL)
3521 l = loop_containing_stmt (stmt);
3526 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3527 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3530 init = initial_condition_in_loop_num (chrec, l->num);
3531 step = evolution_part_in_loop_num (chrec, l->num);
3533 if (step == NULL_TREE
3534 || !is_gimple_min_invariant (step)
3535 || !valid_value_p (init))
3538 /* If we get here, we know something useful about VAR based on the
3539 loop information. If it wraps, it may overflow. */
3541 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3545 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3547 print_generic_expr (dump_file, var, 0);
3548 fprintf (dump_file, ": loop information indicates does not overflow\n");
3555 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3557 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3558 all the values in the ranges.
3560 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3562 - Return NULL_TREE if it is not always possible to determine the
3563 value of the comparison.
3565 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3566 overflow infinity was used in the test. */
3570 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3571 bool *strict_overflow_p)
3573 /* VARYING or UNDEFINED ranges cannot be compared. */
3574 if (vr0->type == VR_VARYING
3575 || vr0->type == VR_UNDEFINED
3576 || vr1->type == VR_VARYING
3577 || vr1->type == VR_UNDEFINED)
3580 /* Anti-ranges need to be handled separately. */
3581 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3583 /* If both are anti-ranges, then we cannot compute any
3585 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3588 /* These comparisons are never statically computable. */
3595 /* Equality can be computed only between a range and an
3596 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3597 if (vr0->type == VR_RANGE)
3599 /* To simplify processing, make VR0 the anti-range. */
3600 value_range_t *tmp = vr0;
3605 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3607 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3608 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3609 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3614 if (!usable_range_p (vr0, strict_overflow_p)
3615 || !usable_range_p (vr1, strict_overflow_p))
3618 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3619 operands around and change the comparison code. */
3620 if (comp == GT_EXPR || comp == GE_EXPR)
3623 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3629 if (comp == EQ_EXPR)
3631 /* Equality may only be computed if both ranges represent
3632 exactly one value. */
3633 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3634 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3636 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3638 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3640 if (cmp_min == 0 && cmp_max == 0)
3641 return boolean_true_node;
3642 else if (cmp_min != -2 && cmp_max != -2)
3643 return boolean_false_node;
3645 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3646 else if (compare_values_warnv (vr0->min, vr1->max,
3647 strict_overflow_p) == 1
3648 || compare_values_warnv (vr1->min, vr0->max,
3649 strict_overflow_p) == 1)
3650 return boolean_false_node;
3654 else if (comp == NE_EXPR)
3658 /* If VR0 is completely to the left or completely to the right
3659 of VR1, they are always different. Notice that we need to
3660 make sure that both comparisons yield similar results to
3661 avoid comparing values that cannot be compared at
3663 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3664 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3665 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3666 return boolean_true_node;
3668 /* If VR0 and VR1 represent a single value and are identical,
3670 else if (compare_values_warnv (vr0->min, vr0->max,
3671 strict_overflow_p) == 0
3672 && compare_values_warnv (vr1->min, vr1->max,
3673 strict_overflow_p) == 0
3674 && compare_values_warnv (vr0->min, vr1->min,
3675 strict_overflow_p) == 0
3676 && compare_values_warnv (vr0->max, vr1->max,
3677 strict_overflow_p) == 0)
3678 return boolean_false_node;
3680 /* Otherwise, they may or may not be different. */
3684 else if (comp == LT_EXPR || comp == LE_EXPR)
3688 /* If VR0 is to the left of VR1, return true. */
3689 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3690 if ((comp == LT_EXPR && tst == -1)
3691 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3693 if (overflow_infinity_range_p (vr0)
3694 || overflow_infinity_range_p (vr1))
3695 *strict_overflow_p = true;
3696 return boolean_true_node;
3699 /* If VR0 is to the right of VR1, return false. */
3700 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3701 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3702 || (comp == LE_EXPR && tst == 1))
3704 if (overflow_infinity_range_p (vr0)
3705 || overflow_infinity_range_p (vr1))
3706 *strict_overflow_p = true;
3707 return boolean_false_node;
3710 /* Otherwise, we don't know. */
3718 /* Given a value range VR, a value VAL and a comparison code COMP, return
3719 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3720 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3721 always returns false. Return NULL_TREE if it is not always
3722 possible to determine the value of the comparison. Also set
3723 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3724 infinity was used in the test. */
3727 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3728 bool *strict_overflow_p)
3730 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3733 /* Anti-ranges need to be handled separately. */
3734 if (vr->type == VR_ANTI_RANGE)
3736 /* For anti-ranges, the only predicates that we can compute at
3737 compile time are equality and inequality. */
3744 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3745 if (value_inside_range (val, vr) == 1)
3746 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3751 if (!usable_range_p (vr, strict_overflow_p))
3754 if (comp == EQ_EXPR)
3756 /* EQ_EXPR may only be computed if VR represents exactly
3758 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3760 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3762 return boolean_true_node;
3763 else if (cmp == -1 || cmp == 1 || cmp == 2)
3764 return boolean_false_node;
3766 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3767 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3768 return boolean_false_node;
3772 else if (comp == NE_EXPR)
3774 /* If VAL is not inside VR, then they are always different. */
3775 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3776 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3777 return boolean_true_node;
3779 /* If VR represents exactly one value equal to VAL, then return
3781 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3782 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3783 return boolean_false_node;
3785 /* Otherwise, they may or may not be different. */
3788 else if (comp == LT_EXPR || comp == LE_EXPR)
3792 /* If VR is to the left of VAL, return true. */
3793 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3794 if ((comp == LT_EXPR && tst == -1)
3795 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3797 if (overflow_infinity_range_p (vr))
3798 *strict_overflow_p = true;
3799 return boolean_true_node;
3802 /* If VR is to the right of VAL, return false. */
3803 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3804 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3805 || (comp == LE_EXPR && tst == 1))
3807 if (overflow_infinity_range_p (vr))
3808 *strict_overflow_p = true;
3809 return boolean_false_node;
3812 /* Otherwise, we don't know. */
3815 else if (comp == GT_EXPR || comp == GE_EXPR)
3819 /* If VR is to the right of VAL, return true. */
3820 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3821 if ((comp == GT_EXPR && tst == 1)
3822 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3824 if (overflow_infinity_range_p (vr))
3825 *strict_overflow_p = true;
3826 return boolean_true_node;
3829 /* If VR is to the left of VAL, return false. */
3830 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3831 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3832 || (comp == GE_EXPR && tst == -1))
3834 if (overflow_infinity_range_p (vr))
3835 *strict_overflow_p = true;
3836 return boolean_false_node;
3839 /* Otherwise, we don't know. */
3847 /* Debugging dumps. */
3849 void dump_value_range (FILE *, value_range_t *);
3850 void debug_value_range (value_range_t *);
3851 void dump_all_value_ranges (FILE *);
3852 void debug_all_value_ranges (void);
3853 void dump_vr_equiv (FILE *, bitmap);
3854 void debug_vr_equiv (bitmap);
3857 /* Dump value range VR to FILE. */
3860 dump_value_range (FILE *file, value_range_t *vr)
3863 fprintf (file, "[]");
3864 else if (vr->type == VR_UNDEFINED)
3865 fprintf (file, "UNDEFINED");
3866 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3868 tree type = TREE_TYPE (vr->min);
3870 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3872 if (is_negative_overflow_infinity (vr->min))
3873 fprintf (file, "-INF(OVF)");
3874 else if (INTEGRAL_TYPE_P (type)
3875 && !TYPE_UNSIGNED (type)
3876 && vrp_val_is_min (vr->min))
3877 fprintf (file, "-INF");
3879 print_generic_expr (file, vr->min, 0);
3881 fprintf (file, ", ");
3883 if (is_positive_overflow_infinity (vr->max))
3884 fprintf (file, "+INF(OVF)");
3885 else if (INTEGRAL_TYPE_P (type)
3886 && vrp_val_is_max (vr->max))
3887 fprintf (file, "+INF");
3889 print_generic_expr (file, vr->max, 0);
3891 fprintf (file, "]");
3898 fprintf (file, " EQUIVALENCES: { ");
3900 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3902 print_generic_expr (file, ssa_name (i), 0);
3903 fprintf (file, " ");
3907 fprintf (file, "} (%u elements)", c);
3910 else if (vr->type == VR_VARYING)
3911 fprintf (file, "VARYING");
3913 fprintf (file, "INVALID RANGE");
3917 /* Dump value range VR to stderr. */
3920 debug_value_range (value_range_t *vr)
3922 dump_value_range (stderr, vr);
3923 fprintf (stderr, "\n");
3927 /* Dump value ranges of all SSA_NAMEs to FILE. */
3930 dump_all_value_ranges (FILE *file)
3934 for (i = 0; i < num_ssa_names; i++)
3938 print_generic_expr (file, ssa_name (i), 0);
3939 fprintf (file, ": ");
3940 dump_value_range (file, vr_value[i]);
3941 fprintf (file, "\n");
3945 fprintf (file, "\n");
3949 /* Dump all value ranges to stderr. */
3952 debug_all_value_ranges (void)
3954 dump_all_value_ranges (stderr);
3958 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3959 create a new SSA name N and return the assertion assignment
3960 'V = ASSERT_EXPR <V, V OP W>'. */
3963 build_assert_expr_for (tree cond, tree v)
3968 gcc_assert (TREE_CODE (v) == SSA_NAME);
3969 n = duplicate_ssa_name (v, NULL);
3971 if (COMPARISON_CLASS_P (cond))
3973 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3974 assertion = gimple_build_assign (n, a);
3976 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
3978 /* Given !V, build the assignment N = false. */
3979 tree op0 = TREE_OPERAND (cond, 0);
3980 gcc_assert (op0 == v);
3981 assertion = gimple_build_assign (n, boolean_false_node);
3983 else if (TREE_CODE (cond) == SSA_NAME)
3985 /* Given V, build the assignment N = true. */
3986 gcc_assert (v == cond);
3987 assertion = gimple_build_assign (n, boolean_true_node);
3992 SSA_NAME_DEF_STMT (n) = assertion;
3994 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
3995 operand of the ASSERT_EXPR. Register the new name and the old one
3996 in the replacement table so that we can fix the SSA web after
3997 adding all the ASSERT_EXPRs. */
3998 register_new_name_mapping (n, v);
4004 /* Return false if EXPR is a predicate expression involving floating
4008 fp_predicate (gimple stmt)
4010 GIMPLE_CHECK (stmt, GIMPLE_COND);
4012 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4016 /* If the range of values taken by OP can be inferred after STMT executes,
4017 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4018 describes the inferred range. Return true if a range could be
4022 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4025 *comp_code_p = ERROR_MARK;
4027 /* Do not attempt to infer anything in names that flow through
4029 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4032 /* Similarly, don't infer anything from statements that may throw
4034 if (stmt_could_throw_p (stmt))
4037 /* If STMT is the last statement of a basic block with no
4038 successors, there is no point inferring anything about any of its
4039 operands. We would not be able to find a proper insertion point
4040 for the assertion, anyway. */
4041 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4044 /* We can only assume that a pointer dereference will yield
4045 non-NULL if -fdelete-null-pointer-checks is enabled. */
4046 if (flag_delete_null_pointer_checks
4047 && POINTER_TYPE_P (TREE_TYPE (op))
4048 && gimple_code (stmt) != GIMPLE_ASM)
4050 unsigned num_uses, num_loads, num_stores;
4052 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4053 if (num_loads + num_stores > 0)
4055 *val_p = build_int_cst (TREE_TYPE (op), 0);
4056 *comp_code_p = NE_EXPR;
4065 void dump_asserts_for (FILE *, tree);
4066 void debug_asserts_for (tree);
4067 void dump_all_asserts (FILE *);
4068 void debug_all_asserts (void);
4070 /* Dump all the registered assertions for NAME to FILE. */
4073 dump_asserts_for (FILE *file, tree name)
4077 fprintf (file, "Assertions to be inserted for ");
4078 print_generic_expr (file, name, 0);
4079 fprintf (file, "\n");
4081 loc = asserts_for[SSA_NAME_VERSION (name)];
4084 fprintf (file, "\t");
4085 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4086 fprintf (file, "\n\tBB #%d", loc->bb->index);
4089 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4090 loc->e->dest->index);
4091 dump_edge_info (file, loc->e, 0);
4093 fprintf (file, "\n\tPREDICATE: ");
4094 print_generic_expr (file, name, 0);
4095 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4096 print_generic_expr (file, loc->val, 0);
4097 fprintf (file, "\n\n");
4101 fprintf (file, "\n");
4105 /* Dump all the registered assertions for NAME to stderr. */
4108 debug_asserts_for (tree name)
4110 dump_asserts_for (stderr, name);
4114 /* Dump all the registered assertions for all the names to FILE. */
4117 dump_all_asserts (FILE *file)
4122 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4123 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4124 dump_asserts_for (file, ssa_name (i));
4125 fprintf (file, "\n");
4129 /* Dump all the registered assertions for all the names to stderr. */
4132 debug_all_asserts (void)
4134 dump_all_asserts (stderr);
4138 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4139 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4140 E->DEST, then register this location as a possible insertion point
4141 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4143 BB, E and SI provide the exact insertion point for the new
4144 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4145 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4146 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4147 must not be NULL. */
4150 register_new_assert_for (tree name, tree expr,
4151 enum tree_code comp_code,
4155 gimple_stmt_iterator si)
4157 assert_locus_t n, loc, last_loc;
4158 basic_block dest_bb;
4160 gcc_checking_assert (bb == NULL || e == NULL);
4163 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4164 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4166 /* Never build an assert comparing against an integer constant with
4167 TREE_OVERFLOW set. This confuses our undefined overflow warning
4169 if (TREE_CODE (val) == INTEGER_CST
4170 && TREE_OVERFLOW (val))
4171 val = build_int_cst_wide (TREE_TYPE (val),
4172 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4174 /* The new assertion A will be inserted at BB or E. We need to
4175 determine if the new location is dominated by a previously
4176 registered location for A. If we are doing an edge insertion,
4177 assume that A will be inserted at E->DEST. Note that this is not
4180 If E is a critical edge, it will be split. But even if E is
4181 split, the new block will dominate the same set of blocks that
4184 The reverse, however, is not true, blocks dominated by E->DEST
4185 will not be dominated by the new block created to split E. So,
4186 if the insertion location is on a critical edge, we will not use
4187 the new location to move another assertion previously registered
4188 at a block dominated by E->DEST. */
4189 dest_bb = (bb) ? bb : e->dest;
4191 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4192 VAL at a block dominating DEST_BB, then we don't need to insert a new
4193 one. Similarly, if the same assertion already exists at a block
4194 dominated by DEST_BB and the new location is not on a critical
4195 edge, then update the existing location for the assertion (i.e.,
4196 move the assertion up in the dominance tree).
4198 Note, this is implemented as a simple linked list because there
4199 should not be more than a handful of assertions registered per
4200 name. If this becomes a performance problem, a table hashed by
4201 COMP_CODE and VAL could be implemented. */
4202 loc = asserts_for[SSA_NAME_VERSION (name)];
4206 if (loc->comp_code == comp_code
4208 || operand_equal_p (loc->val, val, 0))
4209 && (loc->expr == expr
4210 || operand_equal_p (loc->expr, expr, 0)))
4212 /* If the assertion NAME COMP_CODE VAL has already been
4213 registered at a basic block that dominates DEST_BB, then
4214 we don't need to insert the same assertion again. Note
4215 that we don't check strict dominance here to avoid
4216 replicating the same assertion inside the same basic
4217 block more than once (e.g., when a pointer is
4218 dereferenced several times inside a block).
4220 An exception to this rule are edge insertions. If the
4221 new assertion is to be inserted on edge E, then it will
4222 dominate all the other insertions that we may want to
4223 insert in DEST_BB. So, if we are doing an edge
4224 insertion, don't do this dominance check. */
4226 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4229 /* Otherwise, if E is not a critical edge and DEST_BB
4230 dominates the existing location for the assertion, move
4231 the assertion up in the dominance tree by updating its
4232 location information. */
4233 if ((e == NULL || !EDGE_CRITICAL_P (e))
4234 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4243 /* Update the last node of the list and move to the next one. */
4248 /* If we didn't find an assertion already registered for
4249 NAME COMP_CODE VAL, add a new one at the end of the list of
4250 assertions associated with NAME. */
4251 n = XNEW (struct assert_locus_d);
4255 n->comp_code = comp_code;
4263 asserts_for[SSA_NAME_VERSION (name)] = n;
4265 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4268 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4269 Extract a suitable test code and value and store them into *CODE_P and
4270 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4272 If no extraction was possible, return FALSE, otherwise return TRUE.
4274 If INVERT is true, then we invert the result stored into *CODE_P. */
4277 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4278 tree cond_op0, tree cond_op1,
4279 bool invert, enum tree_code *code_p,
4282 enum tree_code comp_code;
4285 /* Otherwise, we have a comparison of the form NAME COMP VAL
4286 or VAL COMP NAME. */
4287 if (name == cond_op1)
4289 /* If the predicate is of the form VAL COMP NAME, flip
4290 COMP around because we need to register NAME as the
4291 first operand in the predicate. */
4292 comp_code = swap_tree_comparison (cond_code);
4297 /* The comparison is of the form NAME COMP VAL, so the
4298 comparison code remains unchanged. */
4299 comp_code = cond_code;
4303 /* Invert the comparison code as necessary. */
4305 comp_code = invert_tree_comparison (comp_code, 0);
4307 /* VRP does not handle float types. */
4308 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4311 /* Do not register always-false predicates.
4312 FIXME: this works around a limitation in fold() when dealing with
4313 enumerations. Given 'enum { N1, N2 } x;', fold will not
4314 fold 'if (x > N2)' to 'if (0)'. */
4315 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4316 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4318 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4319 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4321 if (comp_code == GT_EXPR
4323 || compare_values (val, max) == 0))
4326 if (comp_code == LT_EXPR
4328 || compare_values (val, min) == 0))
4331 *code_p = comp_code;
4336 /* Try to register an edge assertion for SSA name NAME on edge E for
4337 the condition COND contributing to the conditional jump pointed to by BSI.
4338 Invert the condition COND if INVERT is true.
4339 Return true if an assertion for NAME could be registered. */
4342 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4343 enum tree_code cond_code,
4344 tree cond_op0, tree cond_op1, bool invert)
4347 enum tree_code comp_code;
4348 bool retval = false;
4350 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4353 invert, &comp_code, &val))
4356 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4357 reachable from E. */
4358 if (live_on_edge (e, name)
4359 && !has_single_use (name))
4361 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4365 /* In the case of NAME <= CST and NAME being defined as
4366 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4367 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4368 This catches range and anti-range tests. */
4369 if ((comp_code == LE_EXPR
4370 || comp_code == GT_EXPR)
4371 && TREE_CODE (val) == INTEGER_CST
4372 && TYPE_UNSIGNED (TREE_TYPE (val)))
4374 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4375 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4377 /* Extract CST2 from the (optional) addition. */
4378 if (is_gimple_assign (def_stmt)
4379 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4381 name2 = gimple_assign_rhs1 (def_stmt);
4382 cst2 = gimple_assign_rhs2 (def_stmt);
4383 if (TREE_CODE (name2) == SSA_NAME
4384 && TREE_CODE (cst2) == INTEGER_CST)
4385 def_stmt = SSA_NAME_DEF_STMT (name2);
4388 /* Extract NAME2 from the (optional) sign-changing cast. */
4389 if (gimple_assign_cast_p (def_stmt))
4391 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4392 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4393 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4394 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4395 name3 = gimple_assign_rhs1 (def_stmt);
4398 /* If name3 is used later, create an ASSERT_EXPR for it. */
4399 if (name3 != NULL_TREE
4400 && TREE_CODE (name3) == SSA_NAME
4401 && (cst2 == NULL_TREE
4402 || TREE_CODE (cst2) == INTEGER_CST)
4403 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4404 && live_on_edge (e, name3)
4405 && !has_single_use (name3))
4409 /* Build an expression for the range test. */
4410 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4411 if (cst2 != NULL_TREE)
4412 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4416 fprintf (dump_file, "Adding assert for ");
4417 print_generic_expr (dump_file, name3, 0);
4418 fprintf (dump_file, " from ");
4419 print_generic_expr (dump_file, tmp, 0);
4420 fprintf (dump_file, "\n");
4423 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4428 /* If name2 is used later, create an ASSERT_EXPR for it. */
4429 if (name2 != NULL_TREE
4430 && TREE_CODE (name2) == SSA_NAME
4431 && TREE_CODE (cst2) == INTEGER_CST
4432 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4433 && live_on_edge (e, name2)
4434 && !has_single_use (name2))
4438 /* Build an expression for the range test. */
4440 if (TREE_TYPE (name) != TREE_TYPE (name2))
4441 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4442 if (cst2 != NULL_TREE)
4443 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4447 fprintf (dump_file, "Adding assert for ");
4448 print_generic_expr (dump_file, name2, 0);
4449 fprintf (dump_file, " from ");
4450 print_generic_expr (dump_file, tmp, 0);
4451 fprintf (dump_file, "\n");
4454 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4463 /* OP is an operand of a truth value expression which is known to have
4464 a particular value. Register any asserts for OP and for any
4465 operands in OP's defining statement.
4467 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4468 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4471 register_edge_assert_for_1 (tree op, enum tree_code code,
4472 edge e, gimple_stmt_iterator bsi)
4474 bool retval = false;
4477 enum tree_code rhs_code;
4479 /* We only care about SSA_NAMEs. */
4480 if (TREE_CODE (op) != SSA_NAME)
4483 /* We know that OP will have a zero or nonzero value. If OP is used
4484 more than once go ahead and register an assert for OP.
4486 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4487 it will always be set for OP (because OP is used in a COND_EXPR in
4489 if (!has_single_use (op))
4491 val = build_int_cst (TREE_TYPE (op), 0);
4492 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4496 /* Now look at how OP is set. If it's set from a comparison,
4497 a truth operation or some bit operations, then we may be able
4498 to register information about the operands of that assignment. */
4499 op_def = SSA_NAME_DEF_STMT (op);
4500 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4503 rhs_code = gimple_assign_rhs_code (op_def);
4505 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4507 bool invert = (code == EQ_EXPR ? true : false);
4508 tree op0 = gimple_assign_rhs1 (op_def);
4509 tree op1 = gimple_assign_rhs2 (op_def);
4511 if (TREE_CODE (op0) == SSA_NAME)
4512 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4514 if (TREE_CODE (op1) == SSA_NAME)
4515 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4518 else if ((code == NE_EXPR
4519 && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR
4520 || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR))
4522 && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR
4523 || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)))
4525 /* Recurse on each operand. */
4526 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4528 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4531 else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR)
4533 /* Recurse, flipping CODE. */
4534 code = invert_tree_comparison (code, false);
4535 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4538 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4540 /* Recurse through the copy. */
4541 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4544 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4546 /* Recurse through the type conversion. */
4547 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4554 /* Try to register an edge assertion for SSA name NAME on edge E for
4555 the condition COND contributing to the conditional jump pointed to by SI.
4556 Return true if an assertion for NAME could be registered. */
4559 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4560 enum tree_code cond_code, tree cond_op0,
4564 enum tree_code comp_code;
4565 bool retval = false;
4566 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4568 /* Do not attempt to infer anything in names that flow through
4570 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4573 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4579 /* Register ASSERT_EXPRs for name. */
4580 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4581 cond_op1, is_else_edge);
4584 /* If COND is effectively an equality test of an SSA_NAME against
4585 the value zero or one, then we may be able to assert values
4586 for SSA_NAMEs which flow into COND. */
4588 /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
4589 statement of NAME we can assert both operands of the TRUTH_AND_EXPR
4590 have nonzero value. */
4591 if (((comp_code == EQ_EXPR && integer_onep (val))
4592 || (comp_code == NE_EXPR && integer_zerop (val))))
4594 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4596 if (is_gimple_assign (def_stmt)
4597 && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR
4598 || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR))
4600 tree op0 = gimple_assign_rhs1 (def_stmt);
4601 tree op1 = gimple_assign_rhs2 (def_stmt);
4602 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
4603 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
4607 /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
4608 statement of NAME we can assert both operands of the TRUTH_OR_EXPR
4610 if (((comp_code == EQ_EXPR && integer_zerop (val))
4611 || (comp_code == NE_EXPR && integer_onep (val))))
4613 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4615 if (is_gimple_assign (def_stmt)
4616 && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR
4617 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4618 necessarily zero value. */
4619 || (comp_code == EQ_EXPR
4620 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR))))
4622 tree op0 = gimple_assign_rhs1 (def_stmt);
4623 tree op1 = gimple_assign_rhs2 (def_stmt);
4624 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
4625 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
4633 /* Determine whether the outgoing edges of BB should receive an
4634 ASSERT_EXPR for each of the operands of BB's LAST statement.
4635 The last statement of BB must be a COND_EXPR.
4637 If any of the sub-graphs rooted at BB have an interesting use of
4638 the predicate operands, an assert location node is added to the
4639 list of assertions for the corresponding operands. */
4642 find_conditional_asserts (basic_block bb, gimple last)
4645 gimple_stmt_iterator bsi;
4651 need_assert = false;
4652 bsi = gsi_for_stmt (last);
4654 /* Look for uses of the operands in each of the sub-graphs
4655 rooted at BB. We need to check each of the outgoing edges
4656 separately, so that we know what kind of ASSERT_EXPR to
4658 FOR_EACH_EDGE (e, ei, bb->succs)
4663 /* Register the necessary assertions for each operand in the
4664 conditional predicate. */
4665 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4667 need_assert |= register_edge_assert_for (op, e, bsi,
4668 gimple_cond_code (last),
4669 gimple_cond_lhs (last),
4670 gimple_cond_rhs (last));
4683 /* Compare two case labels sorting first by the destination bb index
4684 and then by the case value. */
4687 compare_case_labels (const void *p1, const void *p2)
4689 const struct case_info *ci1 = (const struct case_info *) p1;
4690 const struct case_info *ci2 = (const struct case_info *) p2;
4691 int idx1 = ci1->bb->index;
4692 int idx2 = ci2->bb->index;
4696 else if (idx1 == idx2)
4698 /* Make sure the default label is first in a group. */
4699 if (!CASE_LOW (ci1->expr))
4701 else if (!CASE_LOW (ci2->expr))
4704 return tree_int_cst_compare (CASE_LOW (ci1->expr),
4705 CASE_LOW (ci2->expr));
4711 /* Determine whether the outgoing edges of BB should receive an
4712 ASSERT_EXPR for each of the operands of BB's LAST statement.
4713 The last statement of BB must be a SWITCH_EXPR.
4715 If any of the sub-graphs rooted at BB have an interesting use of
4716 the predicate operands, an assert location node is added to the
4717 list of assertions for the corresponding operands. */
4720 find_switch_asserts (basic_block bb, gimple last)
4723 gimple_stmt_iterator bsi;
4726 struct case_info *ci;
4727 size_t n = gimple_switch_num_labels (last);
4728 #if GCC_VERSION >= 4000
4731 /* Work around GCC 3.4 bug (PR 37086). */
4732 volatile unsigned int idx;
4735 need_assert = false;
4736 bsi = gsi_for_stmt (last);
4737 op = gimple_switch_index (last);
4738 if (TREE_CODE (op) != SSA_NAME)
4741 /* Build a vector of case labels sorted by destination label. */
4742 ci = XNEWVEC (struct case_info, n);
4743 for (idx = 0; idx < n; ++idx)
4745 ci[idx].expr = gimple_switch_label (last, idx);
4746 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
4748 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
4750 for (idx = 0; idx < n; ++idx)
4753 tree cl = ci[idx].expr;
4754 basic_block cbb = ci[idx].bb;
4756 min = CASE_LOW (cl);
4757 max = CASE_HIGH (cl);
4759 /* If there are multiple case labels with the same destination
4760 we need to combine them to a single value range for the edge. */
4761 if (idx + 1 < n && cbb == ci[idx + 1].bb)
4763 /* Skip labels until the last of the group. */
4766 } while (idx < n && cbb == ci[idx].bb);
4769 /* Pick up the maximum of the case label range. */
4770 if (CASE_HIGH (ci[idx].expr))
4771 max = CASE_HIGH (ci[idx].expr);
4773 max = CASE_LOW (ci[idx].expr);
4776 /* Nothing to do if the range includes the default label until we
4777 can register anti-ranges. */
4778 if (min == NULL_TREE)
4781 /* Find the edge to register the assert expr on. */
4782 e = find_edge (bb, cbb);
4784 /* Register the necessary assertions for the operand in the
4786 need_assert |= register_edge_assert_for (op, e, bsi,
4787 max ? GE_EXPR : EQ_EXPR,
4789 fold_convert (TREE_TYPE (op),
4793 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
4795 fold_convert (TREE_TYPE (op),
4805 /* Traverse all the statements in block BB looking for statements that
4806 may generate useful assertions for the SSA names in their operand.
4807 If a statement produces a useful assertion A for name N_i, then the
4808 list of assertions already generated for N_i is scanned to
4809 determine if A is actually needed.
4811 If N_i already had the assertion A at a location dominating the
4812 current location, then nothing needs to be done. Otherwise, the
4813 new location for A is recorded instead.
4815 1- For every statement S in BB, all the variables used by S are
4816 added to bitmap FOUND_IN_SUBGRAPH.
4818 2- If statement S uses an operand N in a way that exposes a known
4819 value range for N, then if N was not already generated by an
4820 ASSERT_EXPR, create a new assert location for N. For instance,
4821 if N is a pointer and the statement dereferences it, we can
4822 assume that N is not NULL.
4824 3- COND_EXPRs are a special case of #2. We can derive range
4825 information from the predicate but need to insert different
4826 ASSERT_EXPRs for each of the sub-graphs rooted at the
4827 conditional block. If the last statement of BB is a conditional
4828 expression of the form 'X op Y', then
4830 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4832 b) If the conditional is the only entry point to the sub-graph
4833 corresponding to the THEN_CLAUSE, recurse into it. On
4834 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4835 an ASSERT_EXPR is added for the corresponding variable.
4837 c) Repeat step (b) on the ELSE_CLAUSE.
4839 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4848 In this case, an assertion on the THEN clause is useful to
4849 determine that 'a' is always 9 on that edge. However, an assertion
4850 on the ELSE clause would be unnecessary.
4852 4- If BB does not end in a conditional expression, then we recurse
4853 into BB's dominator children.
4855 At the end of the recursive traversal, every SSA name will have a
4856 list of locations where ASSERT_EXPRs should be added. When a new
4857 location for name N is found, it is registered by calling
4858 register_new_assert_for. That function keeps track of all the
4859 registered assertions to prevent adding unnecessary assertions.
4860 For instance, if a pointer P_4 is dereferenced more than once in a
4861 dominator tree, only the location dominating all the dereference of
4862 P_4 will receive an ASSERT_EXPR.
4864 If this function returns true, then it means that there are names
4865 for which we need to generate ASSERT_EXPRs. Those assertions are
4866 inserted by process_assert_insertions. */
4869 find_assert_locations_1 (basic_block bb, sbitmap live)
4871 gimple_stmt_iterator si;
4876 need_assert = false;
4877 last = last_stmt (bb);
4879 /* If BB's last statement is a conditional statement involving integer
4880 operands, determine if we need to add ASSERT_EXPRs. */
4882 && gimple_code (last) == GIMPLE_COND
4883 && !fp_predicate (last)
4884 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4885 need_assert |= find_conditional_asserts (bb, last);
4887 /* If BB's last statement is a switch statement involving integer
4888 operands, determine if we need to add ASSERT_EXPRs. */
4890 && gimple_code (last) == GIMPLE_SWITCH
4891 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4892 need_assert |= find_switch_asserts (bb, last);
4894 /* Traverse all the statements in BB marking used names and looking
4895 for statements that may infer assertions for their used operands. */
4896 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4902 stmt = gsi_stmt (si);
4904 if (is_gimple_debug (stmt))
4907 /* See if we can derive an assertion for any of STMT's operands. */
4908 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4911 enum tree_code comp_code;
4913 /* Mark OP in our live bitmap. */
4914 SET_BIT (live, SSA_NAME_VERSION (op));
4916 /* If OP is used in such a way that we can infer a value
4917 range for it, and we don't find a previous assertion for
4918 it, create a new assertion location node for OP. */
4919 if (infer_value_range (stmt, op, &comp_code, &value))
4921 /* If we are able to infer a nonzero value range for OP,
4922 then walk backwards through the use-def chain to see if OP
4923 was set via a typecast.
4925 If so, then we can also infer a nonzero value range
4926 for the operand of the NOP_EXPR. */
4927 if (comp_code == NE_EXPR && integer_zerop (value))
4930 gimple def_stmt = SSA_NAME_DEF_STMT (t);
4932 while (is_gimple_assign (def_stmt)
4933 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
4935 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4937 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4939 t = gimple_assign_rhs1 (def_stmt);
4940 def_stmt = SSA_NAME_DEF_STMT (t);
4942 /* Note we want to register the assert for the
4943 operand of the NOP_EXPR after SI, not after the
4945 if (! has_single_use (t))
4947 register_new_assert_for (t, t, comp_code, value,
4954 /* If OP is used only once, namely in this STMT, don't
4955 bother creating an ASSERT_EXPR for it. Such an
4956 ASSERT_EXPR would do nothing but increase compile time. */
4957 if (!has_single_use (op))
4959 register_new_assert_for (op, op, comp_code, value,
4967 /* Traverse all PHI nodes in BB marking used operands. */
4968 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
4970 use_operand_p arg_p;
4972 phi = gsi_stmt (si);
4974 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4976 tree arg = USE_FROM_PTR (arg_p);
4977 if (TREE_CODE (arg) == SSA_NAME)
4978 SET_BIT (live, SSA_NAME_VERSION (arg));
4985 /* Do an RPO walk over the function computing SSA name liveness
4986 on-the-fly and deciding on assert expressions to insert.
4987 Returns true if there are assert expressions to be inserted. */
4990 find_assert_locations (void)
4992 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4993 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4994 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4998 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
4999 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5000 for (i = 0; i < rpo_cnt; ++i)
5003 need_asserts = false;
5004 for (i = rpo_cnt-1; i >= 0; --i)
5006 basic_block bb = BASIC_BLOCK (rpo[i]);
5012 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5013 sbitmap_zero (live[rpo[i]]);
5016 /* Process BB and update the live information with uses in
5018 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5020 /* Merge liveness into the predecessor blocks and free it. */
5021 if (!sbitmap_empty_p (live[rpo[i]]))
5024 FOR_EACH_EDGE (e, ei, bb->preds)
5026 int pred = e->src->index;
5027 if (e->flags & EDGE_DFS_BACK)
5032 live[pred] = sbitmap_alloc (num_ssa_names);
5033 sbitmap_zero (live[pred]);
5035 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
5037 if (bb_rpo[pred] < pred_rpo)
5038 pred_rpo = bb_rpo[pred];
5041 /* Record the RPO number of the last visited block that needs
5042 live information from this block. */
5043 last_rpo[rpo[i]] = pred_rpo;
5047 sbitmap_free (live[rpo[i]]);
5048 live[rpo[i]] = NULL;
5051 /* We can free all successors live bitmaps if all their
5052 predecessors have been visited already. */
5053 FOR_EACH_EDGE (e, ei, bb->succs)
5054 if (last_rpo[e->dest->index] == i
5055 && live[e->dest->index])
5057 sbitmap_free (live[e->dest->index]);
5058 live[e->dest->index] = NULL;
5063 XDELETEVEC (bb_rpo);
5064 XDELETEVEC (last_rpo);
5065 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
5067 sbitmap_free (live[i]);
5070 return need_asserts;
5073 /* Create an ASSERT_EXPR for NAME and insert it in the location
5074 indicated by LOC. Return true if we made any edge insertions. */
5077 process_assert_insertions_for (tree name, assert_locus_t loc)
5079 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5086 /* If we have X <=> X do not insert an assert expr for that. */
5087 if (loc->expr == loc->val)
5090 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5091 assert_stmt = build_assert_expr_for (cond, name);
5094 /* We have been asked to insert the assertion on an edge. This
5095 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5096 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5097 || (gimple_code (gsi_stmt (loc->si))
5100 gsi_insert_on_edge (loc->e, assert_stmt);
5104 /* Otherwise, we can insert right after LOC->SI iff the
5105 statement must not be the last statement in the block. */
5106 stmt = gsi_stmt (loc->si);
5107 if (!stmt_ends_bb_p (stmt))
5109 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
5113 /* If STMT must be the last statement in BB, we can only insert new
5114 assertions on the non-abnormal edge out of BB. Note that since
5115 STMT is not control flow, there may only be one non-abnormal edge
5117 FOR_EACH_EDGE (e, ei, loc->bb->succs)
5118 if (!(e->flags & EDGE_ABNORMAL))
5120 gsi_insert_on_edge (e, assert_stmt);
5128 /* Process all the insertions registered for every name N_i registered
5129 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5130 found in ASSERTS_FOR[i]. */
5133 process_assert_insertions (void)
5137 bool update_edges_p = false;
5138 int num_asserts = 0;
5140 if (dump_file && (dump_flags & TDF_DETAILS))
5141 dump_all_asserts (dump_file);
5143 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
5145 assert_locus_t loc = asserts_for[i];
5150 assert_locus_t next = loc->next;
5151 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
5159 gsi_commit_edge_inserts ();
5161 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
5166 /* Traverse the flowgraph looking for conditional jumps to insert range
5167 expressions. These range expressions are meant to provide information
5168 to optimizations that need to reason in terms of value ranges. They
5169 will not be expanded into RTL. For instance, given:
5178 this pass will transform the code into:
5184 x = ASSERT_EXPR <x, x < y>
5189 y = ASSERT_EXPR <y, x <= y>
5193 The idea is that once copy and constant propagation have run, other
5194 optimizations will be able to determine what ranges of values can 'x'
5195 take in different paths of the code, simply by checking the reaching
5196 definition of 'x'. */
5199 insert_range_assertions (void)
5201 need_assert_for = BITMAP_ALLOC (NULL);
5202 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5204 calculate_dominance_info (CDI_DOMINATORS);
5206 if (find_assert_locations ())
5208 process_assert_insertions ();
5209 update_ssa (TODO_update_ssa_no_phi);
5212 if (dump_file && (dump_flags & TDF_DETAILS))
5214 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5215 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5219 BITMAP_FREE (need_assert_for);
5222 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5223 and "struct" hacks. If VRP can determine that the
5224 array subscript is a constant, check if it is outside valid
5225 range. If the array subscript is a RANGE, warn if it is
5226 non-overlapping with valid range.
5227 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5230 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5232 value_range_t* vr = NULL;
5233 tree low_sub, up_sub;
5234 tree low_bound, up_bound, up_bound_p1;
5237 if (TREE_NO_WARNING (ref))
5240 low_sub = up_sub = TREE_OPERAND (ref, 1);
5241 up_bound = array_ref_up_bound (ref);
5243 /* Can not check flexible arrays. */
5245 || TREE_CODE (up_bound) != INTEGER_CST)
5248 /* Accesses to trailing arrays via pointers may access storage
5249 beyond the types array bounds. */
5250 base = get_base_address (ref);
5251 if (base && TREE_CODE (base) == MEM_REF)
5253 tree cref, next = NULL_TREE;
5255 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5258 cref = TREE_OPERAND (ref, 0);
5259 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5260 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
5261 next && TREE_CODE (next) != FIELD_DECL;
5262 next = DECL_CHAIN (next))
5265 /* If this is the last field in a struct type or a field in a
5266 union type do not warn. */
5271 low_bound = array_ref_low_bound (ref);
5272 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
5274 if (TREE_CODE (low_sub) == SSA_NAME)
5276 vr = get_value_range (low_sub);
5277 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5279 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5280 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5284 if (vr && vr->type == VR_ANTI_RANGE)
5286 if (TREE_CODE (up_sub) == INTEGER_CST
5287 && tree_int_cst_lt (up_bound, up_sub)
5288 && TREE_CODE (low_sub) == INTEGER_CST
5289 && tree_int_cst_lt (low_sub, low_bound))
5291 warning_at (location, OPT_Warray_bounds,
5292 "array subscript is outside array bounds");
5293 TREE_NO_WARNING (ref) = 1;
5296 else if (TREE_CODE (up_sub) == INTEGER_CST
5297 && (ignore_off_by_one
5298 ? (tree_int_cst_lt (up_bound, up_sub)
5299 && !tree_int_cst_equal (up_bound_p1, up_sub))
5300 : (tree_int_cst_lt (up_bound, up_sub)
5301 || tree_int_cst_equal (up_bound_p1, up_sub))))
5303 warning_at (location, OPT_Warray_bounds,
5304 "array subscript is above array bounds");
5305 TREE_NO_WARNING (ref) = 1;
5307 else if (TREE_CODE (low_sub) == INTEGER_CST
5308 && tree_int_cst_lt (low_sub, low_bound))
5310 warning_at (location, OPT_Warray_bounds,
5311 "array subscript is below array bounds");
5312 TREE_NO_WARNING (ref) = 1;
5316 /* Searches if the expr T, located at LOCATION computes
5317 address of an ARRAY_REF, and call check_array_ref on it. */
5320 search_for_addr_array (tree t, location_t location)
5322 while (TREE_CODE (t) == SSA_NAME)
5324 gimple g = SSA_NAME_DEF_STMT (t);
5326 if (gimple_code (g) != GIMPLE_ASSIGN)
5329 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5330 != GIMPLE_SINGLE_RHS)
5333 t = gimple_assign_rhs1 (g);
5337 /* We are only interested in addresses of ARRAY_REF's. */
5338 if (TREE_CODE (t) != ADDR_EXPR)
5341 /* Check each ARRAY_REFs in the reference chain. */
5344 if (TREE_CODE (t) == ARRAY_REF)
5345 check_array_ref (location, t, true /*ignore_off_by_one*/);
5347 t = TREE_OPERAND (t, 0);
5349 while (handled_component_p (t));
5351 if (TREE_CODE (t) == MEM_REF
5352 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5353 && !TREE_NO_WARNING (t))
5355 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5356 tree low_bound, up_bound, el_sz;
5358 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5359 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5360 || !TYPE_DOMAIN (TREE_TYPE (tem)))
5363 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5364 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5365 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5367 || TREE_CODE (low_bound) != INTEGER_CST
5369 || TREE_CODE (up_bound) != INTEGER_CST
5371 || TREE_CODE (el_sz) != INTEGER_CST)
5374 idx = mem_ref_offset (t);
5375 idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
5376 if (double_int_scmp (idx, double_int_zero) < 0)
5378 warning_at (location, OPT_Warray_bounds,
5379 "array subscript is below array bounds");
5380 TREE_NO_WARNING (t) = 1;
5382 else if (double_int_scmp (idx,
5385 (tree_to_double_int (up_bound),
5387 (tree_to_double_int (low_bound))),
5388 double_int_one)) > 0)
5390 warning_at (location, OPT_Warray_bounds,
5391 "array subscript is above array bounds");
5392 TREE_NO_WARNING (t) = 1;
5397 /* walk_tree() callback that checks if *TP is
5398 an ARRAY_REF inside an ADDR_EXPR (in which an array
5399 subscript one outside the valid range is allowed). Call
5400 check_array_ref for each ARRAY_REF found. The location is
5404 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5407 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5408 location_t location;
5410 if (EXPR_HAS_LOCATION (t))
5411 location = EXPR_LOCATION (t);
5414 location_t *locp = (location_t *) wi->info;
5418 *walk_subtree = TRUE;
5420 if (TREE_CODE (t) == ARRAY_REF)
5421 check_array_ref (location, t, false /*ignore_off_by_one*/);
5423 if (TREE_CODE (t) == MEM_REF
5424 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5425 search_for_addr_array (TREE_OPERAND (t, 0), location);
5427 if (TREE_CODE (t) == ADDR_EXPR)
5428 *walk_subtree = FALSE;
5433 /* Walk over all statements of all reachable BBs and call check_array_bounds
5437 check_all_array_refs (void)
5440 gimple_stmt_iterator si;
5446 bool executable = false;
5448 /* Skip blocks that were found to be unreachable. */
5449 FOR_EACH_EDGE (e, ei, bb->preds)
5450 executable |= !!(e->flags & EDGE_EXECUTABLE);
5454 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5456 gimple stmt = gsi_stmt (si);
5457 struct walk_stmt_info wi;
5458 if (!gimple_has_location (stmt))
5461 if (is_gimple_call (stmt))
5464 size_t n = gimple_call_num_args (stmt);
5465 for (i = 0; i < n; i++)
5467 tree arg = gimple_call_arg (stmt, i);
5468 search_for_addr_array (arg, gimple_location (stmt));
5473 memset (&wi, 0, sizeof (wi));
5474 wi.info = CONST_CAST (void *, (const void *)
5475 gimple_location_ptr (stmt));
5477 walk_gimple_op (gsi_stmt (si),
5485 /* Convert range assertion expressions into the implied copies and
5486 copy propagate away the copies. Doing the trivial copy propagation
5487 here avoids the need to run the full copy propagation pass after
5490 FIXME, this will eventually lead to copy propagation removing the
5491 names that had useful range information attached to them. For
5492 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5493 then N_i will have the range [3, +INF].
5495 However, by converting the assertion into the implied copy
5496 operation N_i = N_j, we will then copy-propagate N_j into the uses
5497 of N_i and lose the range information. We may want to hold on to
5498 ASSERT_EXPRs a little while longer as the ranges could be used in
5499 things like jump threading.
5501 The problem with keeping ASSERT_EXPRs around is that passes after
5502 VRP need to handle them appropriately.
5504 Another approach would be to make the range information a first
5505 class property of the SSA_NAME so that it can be queried from
5506 any pass. This is made somewhat more complex by the need for
5507 multiple ranges to be associated with one SSA_NAME. */
5510 remove_range_assertions (void)
5513 gimple_stmt_iterator si;
5515 /* Note that the BSI iterator bump happens at the bottom of the
5516 loop and no bump is necessary if we're removing the statement
5517 referenced by the current BSI. */
5519 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5521 gimple stmt = gsi_stmt (si);
5524 if (is_gimple_assign (stmt)
5525 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5527 tree rhs = gimple_assign_rhs1 (stmt);
5529 tree cond = fold (ASSERT_EXPR_COND (rhs));
5530 use_operand_p use_p;
5531 imm_use_iterator iter;
5533 gcc_assert (cond != boolean_false_node);
5535 /* Propagate the RHS into every use of the LHS. */
5536 var = ASSERT_EXPR_VAR (rhs);
5537 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5538 gimple_assign_lhs (stmt))
5539 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5541 SET_USE (use_p, var);
5542 gcc_assert (TREE_CODE (var) == SSA_NAME);
5545 /* And finally, remove the copy, it is not needed. */
5546 gsi_remove (&si, true);
5547 release_defs (stmt);
5555 /* Return true if STMT is interesting for VRP. */
5558 stmt_interesting_for_vrp (gimple stmt)
5560 if (gimple_code (stmt) == GIMPLE_PHI
5561 && is_gimple_reg (gimple_phi_result (stmt))
5562 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5563 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5565 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5567 tree lhs = gimple_get_lhs (stmt);
5569 /* In general, assignments with virtual operands are not useful
5570 for deriving ranges, with the obvious exception of calls to
5571 builtin functions. */
5572 if (lhs && TREE_CODE (lhs) == SSA_NAME
5573 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5574 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5575 && ((is_gimple_call (stmt)
5576 && gimple_call_fndecl (stmt) != NULL_TREE
5577 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
5578 || !gimple_vuse (stmt)))
5581 else if (gimple_code (stmt) == GIMPLE_COND
5582 || gimple_code (stmt) == GIMPLE_SWITCH)
5589 /* Initialize local data structures for VRP. */
5592 vrp_initialize (void)
5596 vr_value = XCNEWVEC (value_range_t *, num_ssa_names);
5597 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
5601 gimple_stmt_iterator si;
5603 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5605 gimple phi = gsi_stmt (si);
5606 if (!stmt_interesting_for_vrp (phi))
5608 tree lhs = PHI_RESULT (phi);
5609 set_value_range_to_varying (get_value_range (lhs));
5610 prop_set_simulate_again (phi, false);
5613 prop_set_simulate_again (phi, true);
5616 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5618 gimple stmt = gsi_stmt (si);
5620 /* If the statement is a control insn, then we do not
5621 want to avoid simulating the statement once. Failure
5622 to do so means that those edges will never get added. */
5623 if (stmt_ends_bb_p (stmt))
5624 prop_set_simulate_again (stmt, true);
5625 else if (!stmt_interesting_for_vrp (stmt))
5629 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
5630 set_value_range_to_varying (get_value_range (def));
5631 prop_set_simulate_again (stmt, false);
5634 prop_set_simulate_again (stmt, true);
5639 /* Return the singleton value-range for NAME or NAME. */
5642 vrp_valueize (tree name)
5644 if (TREE_CODE (name) == SSA_NAME)
5646 value_range_t *vr = get_value_range (name);
5647 if (vr->type == VR_RANGE
5648 && (vr->min == vr->max
5649 || operand_equal_p (vr->min, vr->max, 0)))
5655 /* Visit assignment STMT. If it produces an interesting range, record
5656 the SSA name in *OUTPUT_P. */
5658 static enum ssa_prop_result
5659 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
5663 enum gimple_code code = gimple_code (stmt);
5664 lhs = gimple_get_lhs (stmt);
5666 /* We only keep track of ranges in integral and pointer types. */
5667 if (TREE_CODE (lhs) == SSA_NAME
5668 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5669 /* It is valid to have NULL MIN/MAX values on a type. See
5670 build_range_type. */
5671 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
5672 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
5673 || POINTER_TYPE_P (TREE_TYPE (lhs))))
5675 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5677 /* Try folding the statement to a constant first. */
5678 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
5679 if (tem && !is_overflow_infinity (tem))
5680 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
5681 /* Then dispatch to value-range extracting functions. */
5682 else if (code == GIMPLE_CALL)
5683 extract_range_basic (&new_vr, stmt);
5685 extract_range_from_assignment (&new_vr, stmt);
5687 if (update_value_range (lhs, &new_vr))
5691 if (dump_file && (dump_flags & TDF_DETAILS))
5693 fprintf (dump_file, "Found new range for ");
5694 print_generic_expr (dump_file, lhs, 0);
5695 fprintf (dump_file, ": ");
5696 dump_value_range (dump_file, &new_vr);
5697 fprintf (dump_file, "\n\n");
5700 if (new_vr.type == VR_VARYING)
5701 return SSA_PROP_VARYING;
5703 return SSA_PROP_INTERESTING;
5706 return SSA_PROP_NOT_INTERESTING;
5709 /* Every other statement produces no useful ranges. */
5710 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5711 set_value_range_to_varying (get_value_range (def));
5713 return SSA_PROP_VARYING;
5716 /* Helper that gets the value range of the SSA_NAME with version I
5717 or a symbolic range containing the SSA_NAME only if the value range
5718 is varying or undefined. */
5720 static inline value_range_t
5721 get_vr_for_comparison (int i)
5723 value_range_t vr = *(vr_value[i]);
5725 /* If name N_i does not have a valid range, use N_i as its own
5726 range. This allows us to compare against names that may
5727 have N_i in their ranges. */
5728 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
5731 vr.min = ssa_name (i);
5732 vr.max = ssa_name (i);
5738 /* Compare all the value ranges for names equivalent to VAR with VAL
5739 using comparison code COMP. Return the same value returned by
5740 compare_range_with_value, including the setting of
5741 *STRICT_OVERFLOW_P. */
5744 compare_name_with_value (enum tree_code comp, tree var, tree val,
5745 bool *strict_overflow_p)
5751 int used_strict_overflow;
5753 value_range_t equiv_vr;
5755 /* Get the set of equivalences for VAR. */
5756 e = get_value_range (var)->equiv;
5758 /* Start at -1. Set it to 0 if we do a comparison without relying
5759 on overflow, or 1 if all comparisons rely on overflow. */
5760 used_strict_overflow = -1;
5762 /* Compare vars' value range with val. */
5763 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
5765 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
5767 used_strict_overflow = sop ? 1 : 0;
5769 /* If the equiv set is empty we have done all work we need to do. */
5773 && used_strict_overflow > 0)
5774 *strict_overflow_p = true;
5778 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
5780 equiv_vr = get_vr_for_comparison (i);
5782 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
5785 /* If we get different answers from different members
5786 of the equivalence set this check must be in a dead
5787 code region. Folding it to a trap representation
5788 would be correct here. For now just return don't-know. */
5798 used_strict_overflow = 0;
5799 else if (used_strict_overflow < 0)
5800 used_strict_overflow = 1;
5805 && used_strict_overflow > 0)
5806 *strict_overflow_p = true;
5812 /* Given a comparison code COMP and names N1 and N2, compare all the
5813 ranges equivalent to N1 against all the ranges equivalent to N2
5814 to determine the value of N1 COMP N2. Return the same value
5815 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5816 whether we relied on an overflow infinity in the comparison. */
5820 compare_names (enum tree_code comp, tree n1, tree n2,
5821 bool *strict_overflow_p)
5825 bitmap_iterator bi1, bi2;
5827 int used_strict_overflow;
5828 static bitmap_obstack *s_obstack = NULL;
5829 static bitmap s_e1 = NULL, s_e2 = NULL;
5831 /* Compare the ranges of every name equivalent to N1 against the
5832 ranges of every name equivalent to N2. */
5833 e1 = get_value_range (n1)->equiv;
5834 e2 = get_value_range (n2)->equiv;
5836 /* Use the fake bitmaps if e1 or e2 are not available. */
5837 if (s_obstack == NULL)
5839 s_obstack = XNEW (bitmap_obstack);
5840 bitmap_obstack_initialize (s_obstack);
5841 s_e1 = BITMAP_ALLOC (s_obstack);
5842 s_e2 = BITMAP_ALLOC (s_obstack);
5849 /* Add N1 and N2 to their own set of equivalences to avoid
5850 duplicating the body of the loop just to check N1 and N2
5852 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
5853 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
5855 /* If the equivalence sets have a common intersection, then the two
5856 names can be compared without checking their ranges. */
5857 if (bitmap_intersect_p (e1, e2))
5859 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5860 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5862 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
5864 : boolean_false_node;
5867 /* Start at -1. Set it to 0 if we do a comparison without relying
5868 on overflow, or 1 if all comparisons rely on overflow. */
5869 used_strict_overflow = -1;
5871 /* Otherwise, compare all the equivalent ranges. First, add N1 and
5872 N2 to their own set of equivalences to avoid duplicating the body
5873 of the loop just to check N1 and N2 ranges. */
5874 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
5876 value_range_t vr1 = get_vr_for_comparison (i1);
5878 t = retval = NULL_TREE;
5879 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
5883 value_range_t vr2 = get_vr_for_comparison (i2);
5885 t = compare_ranges (comp, &vr1, &vr2, &sop);
5888 /* If we get different answers from different members
5889 of the equivalence set this check must be in a dead
5890 code region. Folding it to a trap representation
5891 would be correct here. For now just return don't-know. */
5895 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5896 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5902 used_strict_overflow = 0;
5903 else if (used_strict_overflow < 0)
5904 used_strict_overflow = 1;
5910 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5911 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5912 if (used_strict_overflow > 0)
5913 *strict_overflow_p = true;
5918 /* None of the equivalent ranges are useful in computing this
5920 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5921 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5925 /* Helper function for vrp_evaluate_conditional_warnv. */
5928 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
5930 bool * strict_overflow_p)
5932 value_range_t *vr0, *vr1;
5934 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
5935 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
5938 return compare_ranges (code, vr0, vr1, strict_overflow_p);
5939 else if (vr0 && vr1 == NULL)
5940 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
5941 else if (vr0 == NULL && vr1)
5942 return (compare_range_with_value
5943 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
5947 /* Helper function for vrp_evaluate_conditional_warnv. */
5950 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
5951 tree op1, bool use_equiv_p,
5952 bool *strict_overflow_p, bool *only_ranges)
5956 *only_ranges = true;
5958 /* We only deal with integral and pointer types. */
5959 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
5960 && !POINTER_TYPE_P (TREE_TYPE (op0)))
5966 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
5967 (code, op0, op1, strict_overflow_p)))
5969 *only_ranges = false;
5970 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
5971 return compare_names (code, op0, op1, strict_overflow_p);
5972 else if (TREE_CODE (op0) == SSA_NAME)
5973 return compare_name_with_value (code, op0, op1, strict_overflow_p);
5974 else if (TREE_CODE (op1) == SSA_NAME)
5975 return (compare_name_with_value
5976 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
5979 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
5984 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
5985 information. Return NULL if the conditional can not be evaluated.
5986 The ranges of all the names equivalent with the operands in COND
5987 will be used when trying to compute the value. If the result is
5988 based on undefined signed overflow, issue a warning if
5992 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
5998 /* Some passes and foldings leak constants with overflow flag set
5999 into the IL. Avoid doing wrong things with these and bail out. */
6000 if ((TREE_CODE (op0) == INTEGER_CST
6001 && TREE_OVERFLOW (op0))
6002 || (TREE_CODE (op1) == INTEGER_CST
6003 && TREE_OVERFLOW (op1)))
6007 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6012 enum warn_strict_overflow_code wc;
6013 const char* warnmsg;
6015 if (is_gimple_min_invariant (ret))
6017 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6018 warnmsg = G_("assuming signed overflow does not occur when "
6019 "simplifying conditional to constant");
6023 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6024 warnmsg = G_("assuming signed overflow does not occur when "
6025 "simplifying conditional");
6028 if (issue_strict_overflow_warning (wc))
6030 location_t location;
6032 if (!gimple_has_location (stmt))
6033 location = input_location;
6035 location = gimple_location (stmt);
6036 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6040 if (warn_type_limits
6041 && ret && only_ranges
6042 && TREE_CODE_CLASS (code) == tcc_comparison
6043 && TREE_CODE (op0) == SSA_NAME)
6045 /* If the comparison is being folded and the operand on the LHS
6046 is being compared against a constant value that is outside of
6047 the natural range of OP0's type, then the predicate will
6048 always fold regardless of the value of OP0. If -Wtype-limits
6049 was specified, emit a warning. */
6050 tree type = TREE_TYPE (op0);
6051 value_range_t *vr0 = get_value_range (op0);
6053 if (vr0->type != VR_VARYING
6054 && INTEGRAL_TYPE_P (type)
6055 && vrp_val_is_min (vr0->min)
6056 && vrp_val_is_max (vr0->max)
6057 && is_gimple_min_invariant (op1))
6059 location_t location;
6061 if (!gimple_has_location (stmt))
6062 location = input_location;
6064 location = gimple_location (stmt);
6066 warning_at (location, OPT_Wtype_limits,
6068 ? G_("comparison always false "
6069 "due to limited range of data type")
6070 : G_("comparison always true "
6071 "due to limited range of data type"));
6079 /* Visit conditional statement STMT. If we can determine which edge
6080 will be taken out of STMT's basic block, record it in
6081 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6082 SSA_PROP_VARYING. */
6084 static enum ssa_prop_result
6085 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
6090 *taken_edge_p = NULL;
6092 if (dump_file && (dump_flags & TDF_DETAILS))
6097 fprintf (dump_file, "\nVisiting conditional with predicate: ");
6098 print_gimple_stmt (dump_file, stmt, 0, 0);
6099 fprintf (dump_file, "\nWith known ranges\n");
6101 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
6103 fprintf (dump_file, "\t");
6104 print_generic_expr (dump_file, use, 0);
6105 fprintf (dump_file, ": ");
6106 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
6109 fprintf (dump_file, "\n");
6112 /* Compute the value of the predicate COND by checking the known
6113 ranges of each of its operands.
6115 Note that we cannot evaluate all the equivalent ranges here
6116 because those ranges may not yet be final and with the current
6117 propagation strategy, we cannot determine when the value ranges
6118 of the names in the equivalence set have changed.
6120 For instance, given the following code fragment
6124 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
6128 Assume that on the first visit to i_14, i_5 has the temporary
6129 range [8, 8] because the second argument to the PHI function is
6130 not yet executable. We derive the range ~[0, 0] for i_14 and the
6131 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
6132 the first time, since i_14 is equivalent to the range [8, 8], we
6133 determine that the predicate is always false.
6135 On the next round of propagation, i_13 is determined to be
6136 VARYING, which causes i_5 to drop down to VARYING. So, another
6137 visit to i_14 is scheduled. In this second visit, we compute the
6138 exact same range and equivalence set for i_14, namely ~[0, 0] and
6139 { i_5 }. But we did not have the previous range for i_5
6140 registered, so vrp_visit_assignment thinks that the range for
6141 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
6142 is not visited again, which stops propagation from visiting
6143 statements in the THEN clause of that if().
6145 To properly fix this we would need to keep the previous range
6146 value for the names in the equivalence set. This way we would've
6147 discovered that from one visit to the other i_5 changed from
6148 range [8, 8] to VR_VARYING.
6150 However, fixing this apparent limitation may not be worth the
6151 additional checking. Testing on several code bases (GCC, DLV,
6152 MICO, TRAMP3D and SPEC2000) showed that doing this results in
6153 4 more predicates folded in SPEC. */
6156 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
6157 gimple_cond_lhs (stmt),
6158 gimple_cond_rhs (stmt),
6163 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
6166 if (dump_file && (dump_flags & TDF_DETAILS))
6168 "\nIgnoring predicate evaluation because "
6169 "it assumes that signed overflow is undefined");
6174 if (dump_file && (dump_flags & TDF_DETAILS))
6176 fprintf (dump_file, "\nPredicate evaluates to: ");
6177 if (val == NULL_TREE)
6178 fprintf (dump_file, "DON'T KNOW\n");
6180 print_generic_stmt (dump_file, val, 0);
6183 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
6186 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6187 that includes the value VAL. The search is restricted to the range
6188 [START_IDX, n - 1] where n is the size of VEC.
6190 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6193 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6194 it is placed in IDX and false is returned.
6196 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6200 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
6202 size_t n = gimple_switch_num_labels (stmt);
6205 /* Find case label for minimum of the value range or the next one.
6206 At each iteration we are searching in [low, high - 1]. */
6208 for (low = start_idx, high = n; high != low; )
6212 /* Note that i != high, so we never ask for n. */
6213 size_t i = (high + low) / 2;
6214 t = gimple_switch_label (stmt, i);
6216 /* Cache the result of comparing CASE_LOW and val. */
6217 cmp = tree_int_cst_compare (CASE_LOW (t), val);
6221 /* Ranges cannot be empty. */
6230 if (CASE_HIGH (t) != NULL
6231 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
6243 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6244 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6245 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6246 then MAX_IDX < MIN_IDX.
6247 Returns true if the default label is not needed. */
6250 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
6254 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
6255 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
6259 && max_take_default)
6261 /* Only the default case label reached.
6262 Return an empty range. */
6269 bool take_default = min_take_default || max_take_default;
6273 if (max_take_default)
6276 /* If the case label range is continuous, we do not need
6277 the default case label. Verify that. */
6278 high = CASE_LOW (gimple_switch_label (stmt, i));
6279 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6280 high = CASE_HIGH (gimple_switch_label (stmt, i));
6281 for (k = i + 1; k <= j; ++k)
6283 low = CASE_LOW (gimple_switch_label (stmt, k));
6284 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
6286 take_default = true;
6290 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6291 high = CASE_HIGH (gimple_switch_label (stmt, k));
6296 return !take_default;
6300 /* Visit switch statement STMT. If we can determine which edge
6301 will be taken out of STMT's basic block, record it in
6302 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6303 SSA_PROP_VARYING. */
6305 static enum ssa_prop_result
6306 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6310 size_t i = 0, j = 0;
6313 *taken_edge_p = NULL;
6314 op = gimple_switch_index (stmt);
6315 if (TREE_CODE (op) != SSA_NAME)
6316 return SSA_PROP_VARYING;
6318 vr = get_value_range (op);
6319 if (dump_file && (dump_flags & TDF_DETAILS))
6321 fprintf (dump_file, "\nVisiting switch expression with operand ");
6322 print_generic_expr (dump_file, op, 0);
6323 fprintf (dump_file, " with known range ");
6324 dump_value_range (dump_file, vr);
6325 fprintf (dump_file, "\n");
6328 if (vr->type != VR_RANGE
6329 || symbolic_range_p (vr))
6330 return SSA_PROP_VARYING;
6332 /* Find the single edge that is taken from the switch expression. */
6333 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6335 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6339 gcc_assert (take_default);
6340 val = gimple_switch_default_label (stmt);
6344 /* Check if labels with index i to j and maybe the default label
6345 are all reaching the same label. */
6347 val = gimple_switch_label (stmt, i);
6349 && CASE_LABEL (gimple_switch_default_label (stmt))
6350 != CASE_LABEL (val))
6352 if (dump_file && (dump_flags & TDF_DETAILS))
6353 fprintf (dump_file, " not a single destination for this "
6355 return SSA_PROP_VARYING;
6357 for (++i; i <= j; ++i)
6359 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6361 if (dump_file && (dump_flags & TDF_DETAILS))
6362 fprintf (dump_file, " not a single destination for this "
6364 return SSA_PROP_VARYING;
6369 *taken_edge_p = find_edge (gimple_bb (stmt),
6370 label_to_block (CASE_LABEL (val)));
6372 if (dump_file && (dump_flags & TDF_DETAILS))
6374 fprintf (dump_file, " will take edge to ");
6375 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6378 return SSA_PROP_INTERESTING;
6382 /* Evaluate statement STMT. If the statement produces a useful range,
6383 return SSA_PROP_INTERESTING and record the SSA name with the
6384 interesting range into *OUTPUT_P.
6386 If STMT is a conditional branch and we can determine its truth
6387 value, the taken edge is recorded in *TAKEN_EDGE_P.
6389 If STMT produces a varying value, return SSA_PROP_VARYING. */
6391 static enum ssa_prop_result
6392 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6397 if (dump_file && (dump_flags & TDF_DETAILS))
6399 fprintf (dump_file, "\nVisiting statement:\n");
6400 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6401 fprintf (dump_file, "\n");
6404 if (!stmt_interesting_for_vrp (stmt))
6405 gcc_assert (stmt_ends_bb_p (stmt));
6406 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6408 /* In general, assignments with virtual operands are not useful
6409 for deriving ranges, with the obvious exception of calls to
6410 builtin functions. */
6411 if ((is_gimple_call (stmt)
6412 && gimple_call_fndecl (stmt) != NULL_TREE
6413 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
6414 || !gimple_vuse (stmt))
6415 return vrp_visit_assignment_or_call (stmt, output_p);
6417 else if (gimple_code (stmt) == GIMPLE_COND)
6418 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6419 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6420 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6422 /* All other statements produce nothing of interest for VRP, so mark
6423 their outputs varying and prevent further simulation. */
6424 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6425 set_value_range_to_varying (get_value_range (def));
6427 return SSA_PROP_VARYING;
6431 /* Meet operation for value ranges. Given two value ranges VR0 and
6432 VR1, store in VR0 a range that contains both VR0 and VR1. This
6433 may not be the smallest possible such range. */
6436 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6438 if (vr0->type == VR_UNDEFINED)
6440 copy_value_range (vr0, vr1);
6444 if (vr1->type == VR_UNDEFINED)
6446 /* Nothing to do. VR0 already has the resulting range. */
6450 if (vr0->type == VR_VARYING)
6452 /* Nothing to do. VR0 already has the resulting range. */
6456 if (vr1->type == VR_VARYING)
6458 set_value_range_to_varying (vr0);
6462 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6467 /* Compute the convex hull of the ranges. The lower limit of
6468 the new range is the minimum of the two ranges. If they
6469 cannot be compared, then give up. */
6470 cmp = compare_values (vr0->min, vr1->min);
6471 if (cmp == 0 || cmp == 1)
6478 /* Similarly, the upper limit of the new range is the maximum
6479 of the two ranges. If they cannot be compared, then
6481 cmp = compare_values (vr0->max, vr1->max);
6482 if (cmp == 0 || cmp == -1)
6489 /* Check for useless ranges. */
6490 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6491 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6492 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6495 /* The resulting set of equivalences is the intersection of
6497 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6498 bitmap_and_into (vr0->equiv, vr1->equiv);
6499 else if (vr0->equiv && !vr1->equiv)
6500 bitmap_clear (vr0->equiv);
6502 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6504 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6506 /* Two anti-ranges meet only if their complements intersect.
6507 Only handle the case of identical ranges. */
6508 if (compare_values (vr0->min, vr1->min) == 0
6509 && compare_values (vr0->max, vr1->max) == 0
6510 && compare_values (vr0->min, vr0->max) == 0)
6512 /* The resulting set of equivalences is the intersection of
6514 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6515 bitmap_and_into (vr0->equiv, vr1->equiv);
6516 else if (vr0->equiv && !vr1->equiv)
6517 bitmap_clear (vr0->equiv);
6522 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6524 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6525 only handle the case where the ranges have an empty intersection.
6526 The result of the meet operation is the anti-range. */
6527 if (!symbolic_range_p (vr0)
6528 && !symbolic_range_p (vr1)
6529 && !value_ranges_intersect_p (vr0, vr1))
6531 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6532 set. We need to compute the intersection of the two
6533 equivalence sets. */
6534 if (vr1->type == VR_ANTI_RANGE)
6535 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6537 /* The resulting set of equivalences is the intersection of
6539 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6540 bitmap_and_into (vr0->equiv, vr1->equiv);
6541 else if (vr0->equiv && !vr1->equiv)
6542 bitmap_clear (vr0->equiv);
6553 /* Failed to find an efficient meet. Before giving up and setting
6554 the result to VARYING, see if we can at least derive a useful
6555 anti-range. FIXME, all this nonsense about distinguishing
6556 anti-ranges from ranges is necessary because of the odd
6557 semantics of range_includes_zero_p and friends. */
6558 if (!symbolic_range_p (vr0)
6559 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
6560 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
6561 && !symbolic_range_p (vr1)
6562 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
6563 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
6565 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6567 /* Since this meet operation did not result from the meeting of
6568 two equivalent names, VR0 cannot have any equivalences. */
6570 bitmap_clear (vr0->equiv);
6573 set_value_range_to_varying (vr0);
6577 /* Visit all arguments for PHI node PHI that flow through executable
6578 edges. If a valid value range can be derived from all the incoming
6579 value ranges, set a new range for the LHS of PHI. */
6581 static enum ssa_prop_result
6582 vrp_visit_phi_node (gimple phi)
6585 tree lhs = PHI_RESULT (phi);
6586 value_range_t *lhs_vr = get_value_range (lhs);
6587 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6588 int edges, old_edges;
6591 if (dump_file && (dump_flags & TDF_DETAILS))
6593 fprintf (dump_file, "\nVisiting PHI node: ");
6594 print_gimple_stmt (dump_file, phi, 0, dump_flags);
6598 for (i = 0; i < gimple_phi_num_args (phi); i++)
6600 edge e = gimple_phi_arg_edge (phi, i);
6602 if (dump_file && (dump_flags & TDF_DETAILS))
6605 "\n Argument #%d (%d -> %d %sexecutable)\n",
6606 (int) i, e->src->index, e->dest->index,
6607 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
6610 if (e->flags & EDGE_EXECUTABLE)
6612 tree arg = PHI_ARG_DEF (phi, i);
6613 value_range_t vr_arg;
6617 if (TREE_CODE (arg) == SSA_NAME)
6619 vr_arg = *(get_value_range (arg));
6623 if (is_overflow_infinity (arg))
6625 arg = copy_node (arg);
6626 TREE_OVERFLOW (arg) = 0;
6629 vr_arg.type = VR_RANGE;
6632 vr_arg.equiv = NULL;
6635 if (dump_file && (dump_flags & TDF_DETAILS))
6637 fprintf (dump_file, "\t");
6638 print_generic_expr (dump_file, arg, dump_flags);
6639 fprintf (dump_file, "\n\tValue: ");
6640 dump_value_range (dump_file, &vr_arg);
6641 fprintf (dump_file, "\n");
6644 vrp_meet (&vr_result, &vr_arg);
6646 if (vr_result.type == VR_VARYING)
6651 if (vr_result.type == VR_VARYING)
6654 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
6655 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
6657 /* To prevent infinite iterations in the algorithm, derive ranges
6658 when the new value is slightly bigger or smaller than the
6659 previous one. We don't do this if we have seen a new executable
6660 edge; this helps us avoid an overflow infinity for conditionals
6661 which are not in a loop. */
6663 && gimple_phi_num_args (phi) > 1
6664 && edges == old_edges)
6666 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
6667 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
6669 /* For non VR_RANGE or for pointers fall back to varying if
6670 the range changed. */
6671 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
6672 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6673 && (cmp_min != 0 || cmp_max != 0))
6676 /* If the new minimum is smaller or larger than the previous
6677 one, go all the way to -INF. In the first case, to avoid
6678 iterating millions of times to reach -INF, and in the
6679 other case to avoid infinite bouncing between different
6681 if (cmp_min > 0 || cmp_min < 0)
6683 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
6684 || !vrp_var_may_overflow (lhs, phi))
6685 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
6686 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
6688 negative_overflow_infinity (TREE_TYPE (vr_result.min));
6691 /* Similarly, if the new maximum is smaller or larger than
6692 the previous one, go all the way to +INF. */
6693 if (cmp_max < 0 || cmp_max > 0)
6695 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
6696 || !vrp_var_may_overflow (lhs, phi))
6697 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
6698 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
6700 positive_overflow_infinity (TREE_TYPE (vr_result.max));
6703 /* If we dropped either bound to +-INF then if this is a loop
6704 PHI node SCEV may known more about its value-range. */
6705 if ((cmp_min > 0 || cmp_min < 0
6706 || cmp_max < 0 || cmp_max > 0)
6708 && (l = loop_containing_stmt (phi))
6709 && l->header == gimple_bb (phi))
6710 adjust_range_with_scev (&vr_result, l, phi, lhs);
6712 /* If we will end up with a (-INF, +INF) range, set it to
6713 VARYING. Same if the previous max value was invalid for
6714 the type and we end up with vr_result.min > vr_result.max. */
6715 if ((vrp_val_is_max (vr_result.max)
6716 && vrp_val_is_min (vr_result.min))
6717 || compare_values (vr_result.min,
6722 /* If the new range is different than the previous value, keep
6724 if (update_value_range (lhs, &vr_result))
6726 if (dump_file && (dump_flags & TDF_DETAILS))
6728 fprintf (dump_file, "Found new range for ");
6729 print_generic_expr (dump_file, lhs, 0);
6730 fprintf (dump_file, ": ");
6731 dump_value_range (dump_file, &vr_result);
6732 fprintf (dump_file, "\n\n");
6735 return SSA_PROP_INTERESTING;
6738 /* Nothing changed, don't add outgoing edges. */
6739 return SSA_PROP_NOT_INTERESTING;
6741 /* No match found. Set the LHS to VARYING. */
6743 set_value_range_to_varying (lhs_vr);
6744 return SSA_PROP_VARYING;
6747 /* Simplify boolean operations if the source is known
6748 to be already a boolean. */
6750 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6752 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6757 bool need_conversion;
6759 op0 = gimple_assign_rhs1 (stmt);
6760 if (TYPE_PRECISION (TREE_TYPE (op0)) != 1)
6762 if (TREE_CODE (op0) != SSA_NAME)
6764 vr = get_value_range (op0);
6766 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6767 if (!val || !integer_onep (val))
6770 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6771 if (!val || !integer_onep (val))
6775 if (rhs_code == TRUTH_NOT_EXPR)
6778 op1 = build_int_cst (TREE_TYPE (op0), 1);
6782 op1 = gimple_assign_rhs2 (stmt);
6784 /* Reduce number of cases to handle. */
6785 if (is_gimple_min_invariant (op1))
6787 /* Exclude anything that should have been already folded. */
6788 if (rhs_code != EQ_EXPR
6789 && rhs_code != NE_EXPR
6790 && rhs_code != TRUTH_XOR_EXPR)
6793 if (!integer_zerop (op1)
6794 && !integer_onep (op1)
6795 && !integer_all_onesp (op1))
6798 /* Limit the number of cases we have to consider. */
6799 if (rhs_code == EQ_EXPR)
6802 op1 = fold_unary (TRUTH_NOT_EXPR, TREE_TYPE (op1), op1);
6807 /* Punt on A == B as there is no BIT_XNOR_EXPR. */
6808 if (rhs_code == EQ_EXPR)
6811 if (TYPE_PRECISION (TREE_TYPE (op1)) != 1)
6813 vr = get_value_range (op1);
6814 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6815 if (!val || !integer_onep (val))
6818 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6819 if (!val || !integer_onep (val))
6825 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6827 location_t location;
6829 if (!gimple_has_location (stmt))
6830 location = input_location;
6832 location = gimple_location (stmt);
6834 if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR)
6835 warning_at (location, OPT_Wstrict_overflow,
6836 _("assuming signed overflow does not occur when "
6837 "simplifying && or || to & or |"));
6839 warning_at (location, OPT_Wstrict_overflow,
6840 _("assuming signed overflow does not occur when "
6841 "simplifying ==, != or ! to identity or ^"));
6845 !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)),
6848 /* Make sure to not sign-extend -1 as a boolean value. */
6850 && !TYPE_UNSIGNED (TREE_TYPE (op0))
6851 && TYPE_PRECISION (TREE_TYPE (op0)) == 1)
6856 case TRUTH_AND_EXPR:
6857 rhs_code = BIT_AND_EXPR;
6860 rhs_code = BIT_IOR_EXPR;
6862 case TRUTH_XOR_EXPR:
6864 if (integer_zerop (op1))
6866 gimple_assign_set_rhs_with_ops (gsi,
6867 need_conversion ? NOP_EXPR : SSA_NAME,
6869 update_stmt (gsi_stmt (*gsi));
6873 rhs_code = BIT_XOR_EXPR;
6879 if (need_conversion)
6882 gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1);
6883 update_stmt (gsi_stmt (*gsi));
6887 /* Simplify a division or modulo operator to a right shift or
6888 bitwise and if the first operand is unsigned or is greater
6889 than zero and the second operand is an exact power of two. */
6892 simplify_div_or_mod_using_ranges (gimple stmt)
6894 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6896 tree op0 = gimple_assign_rhs1 (stmt);
6897 tree op1 = gimple_assign_rhs2 (stmt);
6898 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
6900 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
6902 val = integer_one_node;
6908 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6912 && integer_onep (val)
6913 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6915 location_t location;
6917 if (!gimple_has_location (stmt))
6918 location = input_location;
6920 location = gimple_location (stmt);
6921 warning_at (location, OPT_Wstrict_overflow,
6922 "assuming signed overflow does not occur when "
6923 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
6927 if (val && integer_onep (val))
6931 if (rhs_code == TRUNC_DIV_EXPR)
6933 t = build_int_cst (integer_type_node, tree_log2 (op1));
6934 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
6935 gimple_assign_set_rhs1 (stmt, op0);
6936 gimple_assign_set_rhs2 (stmt, t);
6940 t = build_int_cst (TREE_TYPE (op1), 1);
6941 t = int_const_binop (MINUS_EXPR, op1, t);
6942 t = fold_convert (TREE_TYPE (op0), t);
6944 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
6945 gimple_assign_set_rhs1 (stmt, op0);
6946 gimple_assign_set_rhs2 (stmt, t);
6956 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
6957 ABS_EXPR. If the operand is <= 0, then simplify the
6958 ABS_EXPR into a NEGATE_EXPR. */
6961 simplify_abs_using_ranges (gimple stmt)
6964 tree op = gimple_assign_rhs1 (stmt);
6965 tree type = TREE_TYPE (op);
6966 value_range_t *vr = get_value_range (op);
6968 if (TYPE_UNSIGNED (type))
6970 val = integer_zero_node;
6976 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
6980 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
6985 if (integer_zerop (val))
6986 val = integer_one_node;
6987 else if (integer_onep (val))
6988 val = integer_zero_node;
6993 && (integer_onep (val) || integer_zerop (val)))
6995 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6997 location_t location;
6999 if (!gimple_has_location (stmt))
7000 location = input_location;
7002 location = gimple_location (stmt);
7003 warning_at (location, OPT_Wstrict_overflow,
7004 "assuming signed overflow does not occur when "
7005 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
7008 gimple_assign_set_rhs1 (stmt, op);
7009 if (integer_onep (val))
7010 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
7012 gimple_assign_set_rhs_code (stmt, SSA_NAME);
7021 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
7022 If all the bits that are being cleared by & are already
7023 known to be zero from VR, or all the bits that are being
7024 set by | are already known to be one from VR, the bit
7025 operation is redundant. */
7028 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7030 tree op0 = gimple_assign_rhs1 (stmt);
7031 tree op1 = gimple_assign_rhs2 (stmt);
7032 tree op = NULL_TREE;
7033 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
7034 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
7035 double_int may_be_nonzero0, may_be_nonzero1;
7036 double_int must_be_nonzero0, must_be_nonzero1;
7039 if (TREE_CODE (op0) == SSA_NAME)
7040 vr0 = *(get_value_range (op0));
7041 else if (is_gimple_min_invariant (op0))
7042 set_value_range_to_value (&vr0, op0, NULL);
7046 if (TREE_CODE (op1) == SSA_NAME)
7047 vr1 = *(get_value_range (op1));
7048 else if (is_gimple_min_invariant (op1))
7049 set_value_range_to_value (&vr1, op1, NULL);
7053 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
7055 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
7058 switch (gimple_assign_rhs_code (stmt))
7061 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7062 if (double_int_zero_p (mask))
7067 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7068 if (double_int_zero_p (mask))
7075 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7076 if (double_int_zero_p (mask))
7081 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7082 if (double_int_zero_p (mask))
7092 if (op == NULL_TREE)
7095 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
7096 update_stmt (gsi_stmt (*gsi));
7100 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
7101 a known value range VR.
7103 If there is one and only one value which will satisfy the
7104 conditional, then return that value. Else return NULL. */
7107 test_for_singularity (enum tree_code cond_code, tree op0,
7108 tree op1, value_range_t *vr)
7113 /* Extract minimum/maximum values which satisfy the
7114 the conditional as it was written. */
7115 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
7117 /* This should not be negative infinity; there is no overflow
7119 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
7122 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
7124 tree one = build_int_cst (TREE_TYPE (op0), 1);
7125 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
7127 TREE_NO_WARNING (max) = 1;
7130 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
7132 /* This should not be positive infinity; there is no overflow
7134 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
7137 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
7139 tree one = build_int_cst (TREE_TYPE (op0), 1);
7140 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
7142 TREE_NO_WARNING (min) = 1;
7146 /* Now refine the minimum and maximum values using any
7147 value range information we have for op0. */
7150 if (compare_values (vr->min, min) == 1)
7152 if (compare_values (vr->max, max) == -1)
7155 /* If the new min/max values have converged to a single value,
7156 then there is only one value which can satisfy the condition,
7157 return that value. */
7158 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
7164 /* Simplify a conditional using a relational operator to an equality
7165 test if the range information indicates only one value can satisfy
7166 the original conditional. */
7169 simplify_cond_using_ranges (gimple stmt)
7171 tree op0 = gimple_cond_lhs (stmt);
7172 tree op1 = gimple_cond_rhs (stmt);
7173 enum tree_code cond_code = gimple_cond_code (stmt);
7175 if (cond_code != NE_EXPR
7176 && cond_code != EQ_EXPR
7177 && TREE_CODE (op0) == SSA_NAME
7178 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
7179 && is_gimple_min_invariant (op1))
7181 value_range_t *vr = get_value_range (op0);
7183 /* If we have range information for OP0, then we might be
7184 able to simplify this conditional. */
7185 if (vr->type == VR_RANGE)
7187 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
7193 fprintf (dump_file, "Simplified relational ");
7194 print_gimple_stmt (dump_file, stmt, 0, 0);
7195 fprintf (dump_file, " into ");
7198 gimple_cond_set_code (stmt, EQ_EXPR);
7199 gimple_cond_set_lhs (stmt, op0);
7200 gimple_cond_set_rhs (stmt, new_tree);
7206 print_gimple_stmt (dump_file, stmt, 0, 0);
7207 fprintf (dump_file, "\n");
7213 /* Try again after inverting the condition. We only deal
7214 with integral types here, so no need to worry about
7215 issues with inverting FP comparisons. */
7216 cond_code = invert_tree_comparison (cond_code, false);
7217 new_tree = test_for_singularity (cond_code, op0, op1, vr);
7223 fprintf (dump_file, "Simplified relational ");
7224 print_gimple_stmt (dump_file, stmt, 0, 0);
7225 fprintf (dump_file, " into ");
7228 gimple_cond_set_code (stmt, NE_EXPR);
7229 gimple_cond_set_lhs (stmt, op0);
7230 gimple_cond_set_rhs (stmt, new_tree);
7236 print_gimple_stmt (dump_file, stmt, 0, 0);
7237 fprintf (dump_file, "\n");
7248 /* Simplify a switch statement using the value range of the switch
7252 simplify_switch_using_ranges (gimple stmt)
7254 tree op = gimple_switch_index (stmt);
7259 size_t i = 0, j = 0, n, n2;
7263 if (TREE_CODE (op) == SSA_NAME)
7265 vr = get_value_range (op);
7267 /* We can only handle integer ranges. */
7268 if (vr->type != VR_RANGE
7269 || symbolic_range_p (vr))
7272 /* Find case label for min/max of the value range. */
7273 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
7275 else if (TREE_CODE (op) == INTEGER_CST)
7277 take_default = !find_case_label_index (stmt, 1, op, &i);
7291 n = gimple_switch_num_labels (stmt);
7293 /* Bail out if this is just all edges taken. */
7299 /* Build a new vector of taken case labels. */
7300 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
7303 /* Add the default edge, if necessary. */
7305 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
7307 for (; i <= j; ++i, ++n2)
7308 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
7310 /* Mark needed edges. */
7311 for (i = 0; i < n2; ++i)
7313 e = find_edge (gimple_bb (stmt),
7314 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
7315 e->aux = (void *)-1;
7318 /* Queue not needed edges for later removal. */
7319 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
7321 if (e->aux == (void *)-1)
7327 if (dump_file && (dump_flags & TDF_DETAILS))
7329 fprintf (dump_file, "removing unreachable case label\n");
7331 VEC_safe_push (edge, heap, to_remove_edges, e);
7332 e->flags &= ~EDGE_EXECUTABLE;
7335 /* And queue an update for the stmt. */
7338 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
7342 /* Simplify an integral conversion from an SSA name in STMT. */
7345 simplify_conversion_using_ranges (gimple stmt)
7347 tree innerop, middleop, finaltype;
7349 value_range_t *innervr;
7350 double_int innermin, innermax, middlemin, middlemax;
7352 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
7353 if (!INTEGRAL_TYPE_P (finaltype))
7355 middleop = gimple_assign_rhs1 (stmt);
7356 def_stmt = SSA_NAME_DEF_STMT (middleop);
7357 if (!is_gimple_assign (def_stmt)
7358 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
7360 innerop = gimple_assign_rhs1 (def_stmt);
7361 if (TREE_CODE (innerop) != SSA_NAME)
7364 /* Get the value-range of the inner operand. */
7365 innervr = get_value_range (innerop);
7366 if (innervr->type != VR_RANGE
7367 || TREE_CODE (innervr->min) != INTEGER_CST
7368 || TREE_CODE (innervr->max) != INTEGER_CST)
7371 /* Simulate the conversion chain to check if the result is equal if
7372 the middle conversion is removed. */
7373 innermin = tree_to_double_int (innervr->min);
7374 innermax = tree_to_double_int (innervr->max);
7375 middlemin = double_int_ext (innermin, TYPE_PRECISION (TREE_TYPE (middleop)),
7376 TYPE_UNSIGNED (TREE_TYPE (middleop)));
7377 middlemax = double_int_ext (innermax, TYPE_PRECISION (TREE_TYPE (middleop)),
7378 TYPE_UNSIGNED (TREE_TYPE (middleop)));
7379 /* If the middle values do not represent a proper range fail. */
7380 if (double_int_cmp (middlemin, middlemax,
7381 TYPE_UNSIGNED (TREE_TYPE (middleop))) > 0)
7383 if (!double_int_equal_p (double_int_ext (middlemin,
7384 TYPE_PRECISION (finaltype),
7385 TYPE_UNSIGNED (finaltype)),
7386 double_int_ext (innermin,
7387 TYPE_PRECISION (finaltype),
7388 TYPE_UNSIGNED (finaltype)))
7389 || !double_int_equal_p (double_int_ext (middlemax,
7390 TYPE_PRECISION (finaltype),
7391 TYPE_UNSIGNED (finaltype)),
7392 double_int_ext (innermax,
7393 TYPE_PRECISION (finaltype),
7394 TYPE_UNSIGNED (finaltype))))
7397 gimple_assign_set_rhs1 (stmt, innerop);
7402 /* Simplify STMT using ranges if possible. */
7405 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
7407 gimple stmt = gsi_stmt (*gsi);
7408 if (is_gimple_assign (stmt))
7410 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7411 tree rhs1 = gimple_assign_rhs1 (stmt);
7417 case TRUTH_NOT_EXPR:
7418 case TRUTH_AND_EXPR:
7420 case TRUTH_XOR_EXPR:
7421 /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR
7422 or identity if the RHS is zero or one, and the LHS are known
7423 to be boolean values. Transform all TRUTH_*_EXPR into
7424 BIT_*_EXPR if both arguments are known to be boolean values. */
7425 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7426 return simplify_truth_ops_using_ranges (gsi, stmt);
7429 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7430 and BIT_AND_EXPR respectively if the first operand is greater
7431 than zero and the second operand is an exact power of two. */
7432 case TRUNC_DIV_EXPR:
7433 case TRUNC_MOD_EXPR:
7434 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
7435 && integer_pow2p (gimple_assign_rhs2 (stmt)))
7436 return simplify_div_or_mod_using_ranges (stmt);
7439 /* Transform ABS (X) into X or -X as appropriate. */
7441 if (TREE_CODE (rhs1) == SSA_NAME
7442 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7443 return simplify_abs_using_ranges (stmt);
7448 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
7449 if all the bits being cleared are already cleared or
7450 all the bits being set are already set. */
7451 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7452 return simplify_bit_ops_using_ranges (gsi, stmt);
7456 if (TREE_CODE (rhs1) == SSA_NAME
7457 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7458 return simplify_conversion_using_ranges (stmt);
7465 else if (gimple_code (stmt) == GIMPLE_COND)
7466 return simplify_cond_using_ranges (stmt);
7467 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7468 return simplify_switch_using_ranges (stmt);
7473 /* If the statement pointed by SI has a predicate whose value can be
7474 computed using the value range information computed by VRP, compute
7475 its value and return true. Otherwise, return false. */
7478 fold_predicate_in (gimple_stmt_iterator *si)
7480 bool assignment_p = false;
7482 gimple stmt = gsi_stmt (*si);
7484 if (is_gimple_assign (stmt)
7485 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
7487 assignment_p = true;
7488 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
7489 gimple_assign_rhs1 (stmt),
7490 gimple_assign_rhs2 (stmt),
7493 else if (gimple_code (stmt) == GIMPLE_COND)
7494 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
7495 gimple_cond_lhs (stmt),
7496 gimple_cond_rhs (stmt),
7504 val = fold_convert (gimple_expr_type (stmt), val);
7508 fprintf (dump_file, "Folding predicate ");
7509 print_gimple_expr (dump_file, stmt, 0, 0);
7510 fprintf (dump_file, " to ");
7511 print_generic_expr (dump_file, val, 0);
7512 fprintf (dump_file, "\n");
7515 if (is_gimple_assign (stmt))
7516 gimple_assign_set_rhs_from_tree (si, val);
7519 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
7520 if (integer_zerop (val))
7521 gimple_cond_make_false (stmt);
7522 else if (integer_onep (val))
7523 gimple_cond_make_true (stmt);
7534 /* Callback for substitute_and_fold folding the stmt at *SI. */
7537 vrp_fold_stmt (gimple_stmt_iterator *si)
7539 if (fold_predicate_in (si))
7542 return simplify_stmt_using_ranges (si);
7545 /* Stack of dest,src equivalency pairs that need to be restored after
7546 each attempt to thread a block's incoming edge to an outgoing edge.
7548 A NULL entry is used to mark the end of pairs which need to be
7550 static VEC(tree,heap) *stack;
7552 /* A trivial wrapper so that we can present the generic jump threading
7553 code with a simple API for simplifying statements. STMT is the
7554 statement we want to simplify, WITHIN_STMT provides the location
7555 for any overflow warnings. */
7558 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
7560 /* We only use VRP information to simplify conditionals. This is
7561 overly conservative, but it's unclear if doing more would be
7562 worth the compile time cost. */
7563 if (gimple_code (stmt) != GIMPLE_COND)
7566 return vrp_evaluate_conditional (gimple_cond_code (stmt),
7567 gimple_cond_lhs (stmt),
7568 gimple_cond_rhs (stmt), within_stmt);
7571 /* Blocks which have more than one predecessor and more than
7572 one successor present jump threading opportunities, i.e.,
7573 when the block is reached from a specific predecessor, we
7574 may be able to determine which of the outgoing edges will
7575 be traversed. When this optimization applies, we are able
7576 to avoid conditionals at runtime and we may expose secondary
7577 optimization opportunities.
7579 This routine is effectively a driver for the generic jump
7580 threading code. It basically just presents the generic code
7581 with edges that may be suitable for jump threading.
7583 Unlike DOM, we do not iterate VRP if jump threading was successful.
7584 While iterating may expose new opportunities for VRP, it is expected
7585 those opportunities would be very limited and the compile time cost
7586 to expose those opportunities would be significant.
7588 As jump threading opportunities are discovered, they are registered
7589 for later realization. */
7592 identify_jump_threads (void)
7599 /* Ugh. When substituting values earlier in this pass we can
7600 wipe the dominance information. So rebuild the dominator
7601 information as we need it within the jump threading code. */
7602 calculate_dominance_info (CDI_DOMINATORS);
7604 /* We do not allow VRP information to be used for jump threading
7605 across a back edge in the CFG. Otherwise it becomes too
7606 difficult to avoid eliminating loop exit tests. Of course
7607 EDGE_DFS_BACK is not accurate at this time so we have to
7609 mark_dfs_back_edges ();
7611 /* Do not thread across edges we are about to remove. Just marking
7612 them as EDGE_DFS_BACK will do. */
7613 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7614 e->flags |= EDGE_DFS_BACK;
7616 /* Allocate our unwinder stack to unwind any temporary equivalences
7617 that might be recorded. */
7618 stack = VEC_alloc (tree, heap, 20);
7620 /* To avoid lots of silly node creation, we create a single
7621 conditional and just modify it in-place when attempting to
7623 dummy = gimple_build_cond (EQ_EXPR,
7624 integer_zero_node, integer_zero_node,
7627 /* Walk through all the blocks finding those which present a
7628 potential jump threading opportunity. We could set this up
7629 as a dominator walker and record data during the walk, but
7630 I doubt it's worth the effort for the classes of jump
7631 threading opportunities we are trying to identify at this
7632 point in compilation. */
7637 /* If the generic jump threading code does not find this block
7638 interesting, then there is nothing to do. */
7639 if (! potentially_threadable_block (bb))
7642 /* We only care about blocks ending in a COND_EXPR. While there
7643 may be some value in handling SWITCH_EXPR here, I doubt it's
7644 terribly important. */
7645 last = gsi_stmt (gsi_last_bb (bb));
7647 /* We're basically looking for a switch or any kind of conditional with
7648 integral or pointer type arguments. Note the type of the second
7649 argument will be the same as the first argument, so no need to
7650 check it explicitly. */
7651 if (gimple_code (last) == GIMPLE_SWITCH
7652 || (gimple_code (last) == GIMPLE_COND
7653 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
7654 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
7655 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
7656 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
7657 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
7661 /* We've got a block with multiple predecessors and multiple
7662 successors which also ends in a suitable conditional or
7663 switch statement. For each predecessor, see if we can thread
7664 it to a specific successor. */
7665 FOR_EACH_EDGE (e, ei, bb->preds)
7667 /* Do not thread across back edges or abnormal edges
7669 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
7672 thread_across_edge (dummy, e, true, &stack,
7673 simplify_stmt_for_jump_threading);
7678 /* We do not actually update the CFG or SSA graphs at this point as
7679 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7680 handle ASSERT_EXPRs gracefully. */
7683 /* We identified all the jump threading opportunities earlier, but could
7684 not transform the CFG at that time. This routine transforms the
7685 CFG and arranges for the dominator tree to be rebuilt if necessary.
7687 Note the SSA graph update will occur during the normal TODO
7688 processing by the pass manager. */
7690 finalize_jump_threads (void)
7692 thread_through_all_blocks (false);
7693 VEC_free (tree, heap, stack);
7697 /* Traverse all the blocks folding conditionals with known ranges. */
7703 unsigned num = num_ssa_names;
7707 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
7708 dump_all_value_ranges (dump_file);
7709 fprintf (dump_file, "\n");
7712 substitute_and_fold (op_with_constant_singleton_value_range,
7713 vrp_fold_stmt, false);
7715 if (warn_array_bounds)
7716 check_all_array_refs ();
7718 /* We must identify jump threading opportunities before we release
7719 the datastructures built by VRP. */
7720 identify_jump_threads ();
7722 /* Free allocated memory. */
7723 for (i = 0; i < num; i++)
7726 BITMAP_FREE (vr_value[i]->equiv);
7731 free (vr_phi_edge_counts);
7733 /* So that we can distinguish between VRP data being available
7734 and not available. */
7736 vr_phi_edge_counts = NULL;
7740 /* Main entry point to VRP (Value Range Propagation). This pass is
7741 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7742 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7743 Programming Language Design and Implementation, pp. 67-78, 1995.
7744 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7746 This is essentially an SSA-CCP pass modified to deal with ranges
7747 instead of constants.
7749 While propagating ranges, we may find that two or more SSA name
7750 have equivalent, though distinct ranges. For instance,
7753 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7755 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7759 In the code above, pointer p_5 has range [q_2, q_2], but from the
7760 code we can also determine that p_5 cannot be NULL and, if q_2 had
7761 a non-varying range, p_5's range should also be compatible with it.
7763 These equivalences are created by two expressions: ASSERT_EXPR and
7764 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7765 result of another assertion, then we can use the fact that p_5 and
7766 p_4 are equivalent when evaluating p_5's range.
7768 Together with value ranges, we also propagate these equivalences
7769 between names so that we can take advantage of information from
7770 multiple ranges when doing final replacement. Note that this
7771 equivalency relation is transitive but not symmetric.
7773 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7774 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7775 in contexts where that assertion does not hold (e.g., in line 6).
7777 TODO, the main difference between this pass and Patterson's is that
7778 we do not propagate edge probabilities. We only compute whether
7779 edges can be taken or not. That is, instead of having a spectrum
7780 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7781 DON'T KNOW. In the future, it may be worthwhile to propagate
7782 probabilities to aid branch prediction. */
7791 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
7792 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
7795 insert_range_assertions ();
7797 /* Estimate number of iterations - but do not use undefined behavior
7798 for this. We can't do this lazily as other functions may compute
7799 this using undefined behavior. */
7800 free_numbers_of_iterations_estimates ();
7801 estimate_numbers_of_iterations (false);
7803 to_remove_edges = VEC_alloc (edge, heap, 10);
7804 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
7805 threadedge_initialize_values ();
7808 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
7811 free_numbers_of_iterations_estimates ();
7813 /* ASSERT_EXPRs must be removed before finalizing jump threads
7814 as finalizing jump threads calls the CFG cleanup code which
7815 does not properly handle ASSERT_EXPRs. */
7816 remove_range_assertions ();
7818 /* If we exposed any new variables, go ahead and put them into
7819 SSA form now, before we handle jump threading. This simplifies
7820 interactions between rewriting of _DECL nodes into SSA form
7821 and rewriting SSA_NAME nodes into SSA form after block
7822 duplication and CFG manipulation. */
7823 update_ssa (TODO_update_ssa);
7825 finalize_jump_threads ();
7827 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
7828 CFG in a broken state and requires a cfg_cleanup run. */
7829 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7831 /* Update SWITCH_EXPR case label vector. */
7832 FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su)
7835 size_t n = TREE_VEC_LENGTH (su->vec);
7837 gimple_switch_set_num_labels (su->stmt, n);
7838 for (j = 0; j < n; j++)
7839 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7840 /* As we may have replaced the default label with a regular one
7841 make sure to make it a real default label again. This ensures
7842 optimal expansion. */
7843 label = gimple_switch_default_label (su->stmt);
7844 CASE_LOW (label) = NULL_TREE;
7845 CASE_HIGH (label) = NULL_TREE;
7848 if (VEC_length (edge, to_remove_edges) > 0)
7849 free_dominance_info (CDI_DOMINATORS);
7851 VEC_free (edge, heap, to_remove_edges);
7852 VEC_free (switch_update, heap, to_update_switch_stmts);
7853 threadedge_finalize_values ();
7856 loop_optimizer_finalize ();
7863 return flag_tree_vrp != 0;
7866 struct gimple_opt_pass pass_vrp =
7871 gate_vrp, /* gate */
7872 execute_vrp, /* execute */
7875 0, /* static_pass_number */
7876 TV_TREE_VRP, /* tv_id */
7877 PROP_ssa, /* properties_required */
7878 0, /* properties_provided */
7879 0, /* properties_destroyed */
7880 0, /* todo_flags_start */
7885 | TODO_ggc_collect /* todo_flags_finish */