static tree fold_binary_op_with_conditional_arg (enum tree_code, tree,
tree, tree,
tree, tree, int);
-static bool fold_real_zero_addition_p (const_tree, const_tree, int);
static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
tree, tree, tree);
static tree fold_inf_compare (enum tree_code, tree, tree, tree);
|| TREE_CODE (in) == FIXED_CST)
*litp = in;
else if (TREE_CODE (in) == code
- || (! FLOAT_TYPE_P (TREE_TYPE (in))
+ || ((! FLOAT_TYPE_P (TREE_TYPE (in)) || flag_associative_math)
&& ! SAT_FIXED_POINT_TYPE_P (TREE_TYPE (in))
/* We can associate addition and subtraction together (even
though the C standard doesn't say so) for integers because
t = force_fit_type_double (type, TREE_INT_CST_LOW (arg1),
TREE_INT_CST_HIGH (arg1),
/* Don't set the overflow when
- converting a pointer */
- !POINTER_TYPE_P (TREE_TYPE (arg1)),
+ converting from a pointer, */
+ !POINTER_TYPE_P (TREE_TYPE (arg1))
+ /* or to a sizetype with same signedness
+ and the precision is unchanged.
+ ??? sizetype is always sign-extended,
+ but its signedness depends on the
+ frontend. Thus we see spurious overflows
+ here if we do not check this. */
+ && !((TYPE_PRECISION (TREE_TYPE (arg1))
+ == TYPE_PRECISION (type))
+ && (TYPE_UNSIGNED (TREE_TYPE (arg1))
+ == TYPE_UNSIGNED (type))
+ && ((TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (TREE_TYPE (arg1)))
+ || (TREE_CODE (type) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (type)))),
(TREE_INT_CST_HIGH (arg1) < 0
&& (TYPE_UNSIGNED (type)
< TYPE_UNSIGNED (TREE_TYPE (arg1))))
return (TREE_CODE (orig) == VECTOR_TYPE
&& tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
- default:
+ case REAL_TYPE:
+ case FIXED_POINT_TYPE:
+ case COMPLEX_TYPE:
+ case VECTOR_TYPE:
+ case VOID_TYPE:
return TREE_CODE (type) == TREE_CODE (orig);
+
+ default:
+ return false;
}
}
Note that all these transformations are correct if A is
NaN, since the two alternatives (A and -A) are also NaNs. */
- if ((FLOAT_TYPE_P (TREE_TYPE (arg01))
- ? real_zerop (arg01)
- : integer_zerop (arg01))
+ if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ && (FLOAT_TYPE_P (TREE_TYPE (arg01))
+ ? real_zerop (arg01)
+ : integer_zerop (arg01))
&& ((TREE_CODE (arg2) == NEGATE_EXPR
&& operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
/* In the case that A is of the form X-Y, '-A' (arg2) may
both transformations are correct when A is NaN: A != 0
is then true, and A == 0 is false. */
- if (integer_zerop (arg01) && integer_zerop (arg2))
+ if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ && integer_zerop (arg01) && integer_zerop (arg2))
{
if (comp_code == NE_EXPR)
return pedantic_non_lvalue (fold_convert (type, arg1));
a number and A is not. The conditions in the original
expressions will be false, so all four give B. The min()
and max() versions would give a NaN instead. */
- if (operand_equal_for_comparison_p (arg01, arg2, arg00)
+ if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ && operand_equal_for_comparison_p (arg01, arg2, arg00)
/* Avoid these transformations if the COND_EXPR may be used
as an lvalue in the C++ front-end. PR c++/19199. */
&& (in_gimple_form
}
break;
}
+ /* If the constant is negative, we cannot simplify this. */
+ if (tree_int_cst_sgn (c) == -1)
+ break;
/* FALLTHROUGH */
case NEGATE_EXPR:
if ((t1 = extract_muldiv (op0, c, code, wide_type, strict_overflow_p))
X - 0 is not the same as X because 0 - 0 is -0. In other rounding
modes, X + 0 is not the same as X because -0 + 0 is 0. */
-static bool
+bool
fold_real_zero_addition_p (const_tree type, const_tree addend, int negate)
{
if (!real_zerop (addend))
if (TYPE_PRECISION (TREE_TYPE (arg0)) <= TYPE_PRECISION (shorter_type))
return NULL_TREE;
- arg1_unw = get_unwidened (arg1, shorter_type);
+ arg1_unw = get_unwidened (arg1, NULL_TREE);
/* If possible, express the comparison in the shorter mode. */
if ((code == EQ_EXPR || code == NE_EXPR
|| TYPE_UNSIGNED (TREE_TYPE (arg0)) == TYPE_UNSIGNED (shorter_type))
&& (TREE_TYPE (arg1_unw) == shorter_type
+ || (TYPE_PRECISION (shorter_type)
+ >= TYPE_PRECISION (TREE_TYPE (arg1_unw)))
|| (TREE_CODE (arg1_unw) == INTEGER_CST
&& (TREE_CODE (shorter_type) == INTEGER_TYPE
|| TREE_CODE (shorter_type) == BOOLEAN_TYPE)
switch (code)
{
+ case PAREN_EXPR:
+ /* Re-association barriers around constants and other re-association
+ barriers can be removed. */
+ if (CONSTANT_CLASS_P (op0)
+ || TREE_CODE (op0) == PAREN_EXPR)
+ return fold_convert (type, op0);
+ return NULL_TREE;
+
case NOP_EXPR:
case FLOAT_EXPR:
case CONVERT_EXPR:
return op0;
if (TREE_CODE (op0) == VIEW_CONVERT_EXPR)
return fold_build1 (VIEW_CONVERT_EXPR, type, TREE_OPERAND (op0, 0));
+
+ /* For integral conversions with the same precision or pointer
+ conversions use a NOP_EXPR instead. */
+ if ((INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (op0))
+ /* Do not muck with VIEW_CONVERT_EXPRs that convert from
+ a sub-type to its base type as generated by the Ada FE. */
+ && !TREE_TYPE (TREE_TYPE (op0)))
+ || (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE (op0))))
+ return fold_convert (type, op0);
+
+ /* Strip inner integral conversions that do not change the precision. */
+ if ((TREE_CODE (op0) == NOP_EXPR
+ || TREE_CODE (op0) == CONVERT_EXPR)
+ && INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (op0, 0)))
+ && (TYPE_PRECISION (TREE_TYPE (op0))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op0, 0)))))
+ return fold_build1 (VIEW_CONVERT_EXPR, type, TREE_OPERAND (op0, 0));
+
return fold_view_convert_expr (type, op0);
case NEGATE_EXPR:
if (TREE_CODE (arg0) == INTEGER_CST)
return fold_not_const (arg0, type);
else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
- return TREE_OPERAND (op0, 0);
+ return fold_convert (type, TREE_OPERAND (arg0, 0));
/* Convert ~ (-A) to A - 1. */
else if (INTEGRAL_TYPE_P (type) && TREE_CODE (arg0) == NEGATE_EXPR)
return fold_build2 (MINUS_EXPR, type,
is a rotate of A by B bits. */
{
enum tree_code code0, code1;
+ tree rtype;
code0 = TREE_CODE (arg0);
code1 = TREE_CODE (arg1);
if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR)
|| (code1 == RSHIFT_EXPR && code0 == LSHIFT_EXPR))
&& operand_equal_p (TREE_OPERAND (arg0, 0),
TREE_OPERAND (arg1, 0), 0)
- && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
+ && (rtype = TREE_TYPE (TREE_OPERAND (arg0, 0)),
+ TYPE_UNSIGNED (rtype))
+ /* Only create rotates in complete modes. Other cases are not
+ expanded properly. */
+ && TYPE_PRECISION (rtype) == GET_MODE_PRECISION (TYPE_MODE (rtype)))
{
tree tree01, tree11;
enum tree_code code01, code11;
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT hi1, lo1, hi2, lo2, mlo, mhi;
- int width = TYPE_PRECISION (type);
+ unsigned HOST_WIDE_INT hi1, lo1, hi2, lo2, hi3, lo3, mlo, mhi;
+ int width = TYPE_PRECISION (type), w;
hi1 = TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1));
lo1 = TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1));
hi2 = TREE_INT_CST_HIGH (arg1);
return fold_build2 (BIT_IOR_EXPR, type,
TREE_OPERAND (arg0, 0), arg1);
- /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
+ /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2,
+ unless (C1 & ~C2) | (C2 & C3) for some C3 is a mask of some
+ mode which allows further optimizations. */
hi1 &= mhi;
lo1 &= mlo;
- if ((hi1 & ~hi2) != hi1 || (lo1 & ~lo2) != lo1)
+ hi2 &= mhi;
+ lo2 &= mlo;
+ hi3 = hi1 & ~hi2;
+ lo3 = lo1 & ~lo2;
+ for (w = BITS_PER_UNIT;
+ w <= width && w <= HOST_BITS_PER_WIDE_INT;
+ w <<= 1)
+ {
+ unsigned HOST_WIDE_INT mask
+ = (unsigned HOST_WIDE_INT) -1 >> (HOST_BITS_PER_WIDE_INT - w);
+ if (((lo1 | lo2) & mask) == mask
+ && (lo1 & ~mask) == 0 && hi1 == 0)
+ {
+ hi3 = 0;
+ lo3 = mask;
+ break;
+ }
+ }
+ if (hi3 != hi1 || lo3 != lo1)
return fold_build2 (BIT_IOR_EXPR, type,
fold_build2 (BIT_AND_EXPR, type,
TREE_OPERAND (arg0, 0),
build_int_cst_wide (type,
- lo1 & ~lo2,
- hi1 & ~hi2)),
+ lo3, hi3)),
arg1);
}
return build_int_cst (type, residue & low);
}
+ /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
+ (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
+ if the new mask might be further optimized. */
+ if ((TREE_CODE (arg0) == LSHIFT_EXPR
+ || TREE_CODE (arg0) == RSHIFT_EXPR)
+ && host_integerp (TREE_OPERAND (arg0, 1), 1)
+ && host_integerp (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1)))
+ && tree_low_cst (TREE_OPERAND (arg0, 1), 1)
+ < TYPE_PRECISION (TREE_TYPE (arg0))
+ && TYPE_PRECISION (TREE_TYPE (arg0)) <= HOST_BITS_PER_WIDE_INT
+ && tree_low_cst (TREE_OPERAND (arg0, 1), 1) > 0)
+ {
+ unsigned int shiftc = tree_low_cst (TREE_OPERAND (arg0, 1), 1);
+ unsigned HOST_WIDE_INT mask
+ = tree_low_cst (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1)));
+ unsigned HOST_WIDE_INT newmask, zerobits = 0;
+ tree shift_type = TREE_TYPE (arg0);
+
+ if (TREE_CODE (arg0) == LSHIFT_EXPR)
+ zerobits = ((((unsigned HOST_WIDE_INT) 1) << shiftc) - 1);
+ else if (TREE_CODE (arg0) == RSHIFT_EXPR
+ && TYPE_PRECISION (TREE_TYPE (arg0))
+ == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (arg0))))
+ {
+ unsigned int prec = TYPE_PRECISION (TREE_TYPE (arg0));
+ tree arg00 = TREE_OPERAND (arg0, 0);
+ /* See if more bits can be proven as zero because of
+ zero extension. */
+ if (TREE_CODE (arg00) == NOP_EXPR
+ && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg00, 0))))
+ {
+ tree inner_type = TREE_TYPE (TREE_OPERAND (arg00, 0));
+ if (TYPE_PRECISION (inner_type)
+ == GET_MODE_BITSIZE (TYPE_MODE (inner_type))
+ && TYPE_PRECISION (inner_type) < prec)
+ {
+ prec = TYPE_PRECISION (inner_type);
+ /* See if we can shorten the right shift. */
+ if (shiftc < prec)
+ shift_type = inner_type;
+ }
+ }
+ zerobits = ~(unsigned HOST_WIDE_INT) 0;
+ zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
+ zerobits <<= prec - shiftc;
+ /* For arithmetic shift if sign bit could be set, zerobits
+ can contain actually sign bits, so no transformation is
+ possible, unless MASK masks them all away. In that
+ case the shift needs to be converted into logical shift. */
+ if (!TYPE_UNSIGNED (TREE_TYPE (arg0))
+ && prec == TYPE_PRECISION (TREE_TYPE (arg0)))
+ {
+ if ((mask & zerobits) == 0)
+ shift_type = unsigned_type_for (TREE_TYPE (arg0));
+ else
+ zerobits = 0;
+ }
+ }
+
+ /* ((X << 16) & 0xff00) is (X, 0). */
+ if ((mask & zerobits) == mask)
+ return omit_one_operand (type, build_int_cst (type, 0), arg0);
+
+ newmask = mask | zerobits;
+ if (newmask != mask && (newmask & (newmask + 1)) == 0)
+ {
+ unsigned int prec;
+
+ /* Only do the transformation if NEWMASK is some integer
+ mode's mask. */
+ for (prec = BITS_PER_UNIT;
+ prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
+ if (newmask == (((unsigned HOST_WIDE_INT) 1) << prec) - 1)
+ break;
+ if (prec < HOST_BITS_PER_WIDE_INT
+ || newmask == ~(unsigned HOST_WIDE_INT) 0)
+ {
+ if (shift_type != TREE_TYPE (arg0))
+ {
+ tem = fold_build2 (TREE_CODE (arg0), shift_type,
+ fold_convert (shift_type,
+ TREE_OPERAND (arg0, 0)),
+ TREE_OPERAND (arg0, 1));
+ tem = fold_convert (type, tem);
+ }
+ else
+ tem = op0;
+ return fold_build2 (BIT_AND_EXPR, type, tem,
+ build_int_cst_type (TREE_TYPE (op1),
+ newmask));
+ }
+ }
+ }
+
goto associate;
case RDIV_EXPR:
if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST)
{
tree tem = build_int_cst (TREE_TYPE (arg1),
- GET_MODE_BITSIZE (TYPE_MODE (type)));
+ TYPE_PRECISION (type));
tem = const_binop (MINUS_EXPR, tem, arg1, 0);
return fold_build2 (RROTATE_EXPR, type, op0, tem);
}
fold_build2 (code, type,
TREE_OPERAND (arg0, 1), arg1));
- /* Two consecutive rotates adding up to the width of the mode can
- be ignored. */
+ /* Two consecutive rotates adding up to the precision of the
+ type can be ignored. */
if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (arg0) == RROTATE_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
&& TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
&& ((TREE_INT_CST_LOW (arg1)
+ TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
- == (unsigned int) GET_MODE_BITSIZE (TYPE_MODE (type))))
+ == (unsigned int) TYPE_PRECISION (type)))
return TREE_OPERAND (arg0, 0);
+ /* Fold (X & C2) << C1 into (X << C1) & (C2 << C1)
+ (X & C2) >> C1 into (X >> C1) & (C2 >> C1)
+ if the latter can be further optimized. */
+ if ((code == LSHIFT_EXPR || code == RSHIFT_EXPR)
+ && TREE_CODE (arg0) == BIT_AND_EXPR
+ && TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
+ {
+ tree mask = fold_build2 (code, type,
+ fold_convert (type, TREE_OPERAND (arg0, 1)),
+ arg1);
+ tree shift = fold_build2 (code, type,
+ fold_convert (type, TREE_OPERAND (arg0, 0)),
+ arg1);
+ tem = fold_binary (BIT_AND_EXPR, type, shift, mask);
+ if (tem)
+ return tem;
+ }
+
return NULL_TREE;
case MIN_EXPR:
tree arg01 = TREE_OPERAND (arg0, 1);
if (TREE_CODE (arg00) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (arg00, 0)))
- return
- fold_build2 (code, type,
- build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
- build2 (RSHIFT_EXPR, TREE_TYPE (arg00),
- arg01, TREE_OPERAND (arg00, 1)),
- fold_convert (TREE_TYPE (arg0),
- integer_one_node)),
- arg1);
- else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
- && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
- return
- fold_build2 (code, type,
- build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
- build2 (RSHIFT_EXPR, TREE_TYPE (arg01),
- arg00, TREE_OPERAND (arg01, 1)),
- fold_convert (TREE_TYPE (arg0),
- integer_one_node)),
- arg1);
+ {
+ tree tem = fold_build2 (RSHIFT_EXPR, TREE_TYPE (arg00),
+ arg01, TREE_OPERAND (arg00, 1));
+ tem = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0), tem,
+ build_int_cst (TREE_TYPE (arg0), 1));
+ return fold_build2 (code, type,
+ fold_convert (TREE_TYPE (arg1), tem), arg1);
+ }
+ else if (TREE_CODE (arg01) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (arg01, 0)))
+ {
+ tree tem = fold_build2 (RSHIFT_EXPR, TREE_TYPE (arg01),
+ arg00, TREE_OPERAND (arg01, 1));
+ tem = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0), tem,
+ build_int_cst (TREE_TYPE (arg0), 1));
+ return fold_build2 (code, type,
+ fold_convert (TREE_TYPE (arg1), tem), arg1);
+ }
}
/* If this is an NE or EQ comparison of zero against the result of a
}
}
-/* Return true if `t' is known to be non-negative. If the return
+/* Return true if CODE or TYPE is known to be non-negative. */
+
+static bool
+tree_simple_nonnegative_warnv_p (enum tree_code code, tree type)
+{
+ if ((TYPE_PRECISION (type) != 1 || TYPE_UNSIGNED (type))
+ && truth_value_p (code))
+ /* Truth values evaluate to 0 or 1, which is nonnegative unless we
+ have a signed:1 type (where the value is -1 and 0). */
+ return true;
+ return false;
+}
+
+/* Return true if (CODE OP0) is known to be non-negative. If the return
value is based on the assumption that signed overflow is undefined,
set *STRICT_OVERFLOW_P to true; otherwise, don't change
*STRICT_OVERFLOW_P. */
-bool
-tree_expr_nonnegative_warnv_p (tree t, bool *strict_overflow_p)
+static bool
+tree_unary_nonnegative_warnv_p (enum tree_code code, tree type, tree op0,
+ bool *strict_overflow_p)
{
- if (t == error_mark_node)
- return false;
-
- if (TYPE_UNSIGNED (TREE_TYPE (t)))
+ if (TYPE_UNSIGNED (type))
return true;
- switch (TREE_CODE (t))
+ switch (code)
{
- case SSA_NAME:
- /* Query VRP to see if it has recorded any information about
- the range of this object. */
- return ssa_name_nonnegative_p (t);
-
case ABS_EXPR:
/* We can't return 1 if flag_wrapv is set because
ABS_EXPR<INT_MIN> = INT_MIN. */
- if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
+ if (!INTEGRAL_TYPE_P (type))
return true;
- if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
+ if (TYPE_OVERFLOW_UNDEFINED (type))
{
*strict_overflow_p = true;
return true;
}
break;
- case INTEGER_CST:
- return tree_int_cst_sgn (t) >= 0;
+ case NON_LVALUE_EXPR:
+ case FLOAT_EXPR:
+ case FIX_TRUNC_EXPR:
+ return tree_expr_nonnegative_warnv_p (op0,
+ strict_overflow_p);
- case REAL_CST:
- return ! REAL_VALUE_NEGATIVE (TREE_REAL_CST (t));
+ case NOP_EXPR:
+ {
+ tree inner_type = TREE_TYPE (op0);
+ tree outer_type = type;
- case FIXED_CST:
- return ! FIXED_VALUE_NEGATIVE (TREE_FIXED_CST (t));
+ if (TREE_CODE (outer_type) == REAL_TYPE)
+ {
+ if (TREE_CODE (inner_type) == REAL_TYPE)
+ return tree_expr_nonnegative_warnv_p (op0,
+ strict_overflow_p);
+ if (TREE_CODE (inner_type) == INTEGER_TYPE)
+ {
+ if (TYPE_UNSIGNED (inner_type))
+ return true;
+ return tree_expr_nonnegative_warnv_p (op0,
+ strict_overflow_p);
+ }
+ }
+ else if (TREE_CODE (outer_type) == INTEGER_TYPE)
+ {
+ if (TREE_CODE (inner_type) == REAL_TYPE)
+ return tree_expr_nonnegative_warnv_p (op0,
+ strict_overflow_p);
+ if (TREE_CODE (inner_type) == INTEGER_TYPE)
+ return TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)
+ && TYPE_UNSIGNED (inner_type);
+ }
+ }
+ break;
+
+ default:
+ return tree_simple_nonnegative_warnv_p (code, type);
+ }
+
+ /* We don't know sign of `t', so be conservative and return false. */
+ return false;
+}
+/* Return true if (CODE OP0 OP1) is known to be non-negative. If the return
+ value is based on the assumption that signed overflow is undefined,
+ set *STRICT_OVERFLOW_P to true; otherwise, don't change
+ *STRICT_OVERFLOW_P. */
+
+static bool
+tree_binary_nonnegative_warnv_p (enum tree_code code, tree type, tree op0,
+ tree op1, bool *strict_overflow_p)
+{
+ if (TYPE_UNSIGNED (type))
+ return true;
+
+ switch (code)
+ {
case POINTER_PLUS_EXPR:
case PLUS_EXPR:
- if (FLOAT_TYPE_P (TREE_TYPE (t)))
- return (tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
+ if (FLOAT_TYPE_P (type))
+ return (tree_expr_nonnegative_warnv_p (op0,
strict_overflow_p)
- && tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
+ && tree_expr_nonnegative_warnv_p (op1,
strict_overflow_p));
/* zero_extend(x) + zero_extend(y) is non-negative if x and y are
both unsigned and at least 2 bits shorter than the result. */
- if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
- && TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
- && TREE_CODE (TREE_OPERAND (t, 1)) == NOP_EXPR)
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && TREE_CODE (op0) == NOP_EXPR
+ && TREE_CODE (op1) == NOP_EXPR)
{
- tree inner1 = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
- tree inner2 = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0));
+ tree inner1 = TREE_TYPE (TREE_OPERAND (op0, 0));
+ tree inner2 = TREE_TYPE (TREE_OPERAND (op1, 0));
if (TREE_CODE (inner1) == INTEGER_TYPE && TYPE_UNSIGNED (inner1)
&& TREE_CODE (inner2) == INTEGER_TYPE && TYPE_UNSIGNED (inner2))
{
unsigned int prec = MAX (TYPE_PRECISION (inner1),
TYPE_PRECISION (inner2)) + 1;
- return prec < TYPE_PRECISION (TREE_TYPE (t));
+ return prec < TYPE_PRECISION (type);
}
}
break;
case MULT_EXPR:
- if (FLOAT_TYPE_P (TREE_TYPE (t)))
+ if (FLOAT_TYPE_P (type))
{
/* x * x for floating point x is always non-negative. */
- if (operand_equal_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1), 0))
+ if (operand_equal_p (op0, op1, 0))
return true;
- return (tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
+ return (tree_expr_nonnegative_warnv_p (op0,
strict_overflow_p)
- && tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
+ && tree_expr_nonnegative_warnv_p (op1,
strict_overflow_p));
}
/* zero_extend(x) * zero_extend(y) is non-negative if x and y are
both unsigned and their total bits is shorter than the result. */
- if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
- && TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
- && TREE_CODE (TREE_OPERAND (t, 1)) == NOP_EXPR)
+ if (TREE_CODE (type) == INTEGER_TYPE
+ && TREE_CODE (op0) == NOP_EXPR
+ && TREE_CODE (op1) == NOP_EXPR)
{
- tree inner1 = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
- tree inner2 = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0));
+ tree inner1 = TREE_TYPE (TREE_OPERAND (op0, 0));
+ tree inner2 = TREE_TYPE (TREE_OPERAND (op1, 0));
if (TREE_CODE (inner1) == INTEGER_TYPE && TYPE_UNSIGNED (inner1)
&& TREE_CODE (inner2) == INTEGER_TYPE && TYPE_UNSIGNED (inner2))
return TYPE_PRECISION (inner1) + TYPE_PRECISION (inner2)
- < TYPE_PRECISION (TREE_TYPE (t));
+ < TYPE_PRECISION (type);
}
return false;
case BIT_AND_EXPR:
case MAX_EXPR:
- return (tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
+ return (tree_expr_nonnegative_warnv_p (op0,
strict_overflow_p)
- || tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
+ || tree_expr_nonnegative_warnv_p (op1,
strict_overflow_p));
case BIT_IOR_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
- return (tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
+ return (tree_expr_nonnegative_warnv_p (op0,
strict_overflow_p)
- && tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
+ && tree_expr_nonnegative_warnv_p (op1,
strict_overflow_p));
case TRUNC_MOD_EXPR:
case CEIL_MOD_EXPR:
case FLOOR_MOD_EXPR:
case ROUND_MOD_EXPR:
- case SAVE_EXPR:
- case NON_LVALUE_EXPR:
- case FLOAT_EXPR:
- case FIX_TRUNC_EXPR:
- return tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
+ return tree_expr_nonnegative_warnv_p (op0,
strict_overflow_p);
+ default:
+ return tree_simple_nonnegative_warnv_p (code, type);
+ }
- case COMPOUND_EXPR:
- case MODIFY_EXPR:
- case GIMPLE_MODIFY_STMT:
- return tree_expr_nonnegative_warnv_p (GENERIC_TREE_OPERAND (t, 1),
- strict_overflow_p);
+ /* We don't know sign of `t', so be conservative and return false. */
+ return false;
+}
- case BIND_EXPR:
- return tree_expr_nonnegative_warnv_p (expr_last (TREE_OPERAND (t, 1)),
- strict_overflow_p);
+/* Return true if T is known to be non-negative. If the return
+ value is based on the assumption that signed overflow is undefined,
+ set *STRICT_OVERFLOW_P to true; otherwise, don't change
+ *STRICT_OVERFLOW_P. */
+
+static bool
+tree_single_nonnegative_warnv_p (tree t, bool *strict_overflow_p)
+{
+ if (TYPE_UNSIGNED (TREE_TYPE (t)))
+ return true;
+
+ enum tree_code code = TREE_CODE (t);
+ switch (code)
+ {
+ case SSA_NAME:
+ /* Query VRP to see if it has recorded any information about
+ the range of this object. */
+ return ssa_name_nonnegative_p (t);
+
+ case INTEGER_CST:
+ return tree_int_cst_sgn (t) >= 0;
+
+ case REAL_CST:
+ return ! REAL_VALUE_NEGATIVE (TREE_REAL_CST (t));
+
+ case FIXED_CST:
+ return ! FIXED_VALUE_NEGATIVE (TREE_FIXED_CST (t));
case COND_EXPR:
return (tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
strict_overflow_p)
&& tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 2),
strict_overflow_p));
+ default:
+ return tree_simple_nonnegative_warnv_p (TREE_CODE (t),
+ TREE_TYPE (t));
+ }
+ /* We don't know sign of `t', so be conservative and return false. */
+ return false;
+}
- case NOP_EXPR:
- {
- tree inner_type = TREE_TYPE (TREE_OPERAND (t, 0));
- tree outer_type = TREE_TYPE (t);
+/* Return true if T is known to be non-negative. If the return
+ value is based on the assumption that signed overflow is undefined,
+ set *STRICT_OVERFLOW_P to true; otherwise, don't change
+ *STRICT_OVERFLOW_P. */
- if (TREE_CODE (outer_type) == REAL_TYPE)
- {
- if (TREE_CODE (inner_type) == REAL_TYPE)
- return tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
- strict_overflow_p);
- if (TREE_CODE (inner_type) == INTEGER_TYPE)
- {
- if (TYPE_UNSIGNED (inner_type))
- return true;
- return tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
- strict_overflow_p);
- }
- }
- else if (TREE_CODE (outer_type) == INTEGER_TYPE)
- {
- if (TREE_CODE (inner_type) == REAL_TYPE)
- return tree_expr_nonnegative_warnv_p (TREE_OPERAND (t,0),
- strict_overflow_p);
- if (TREE_CODE (inner_type) == INTEGER_TYPE)
- return TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type)
- && TYPE_UNSIGNED (inner_type);
- }
- }
- break;
+static bool
+tree_invalid_nonnegative_warnv_p (tree t, bool *strict_overflow_p)
+{
+ if (TYPE_UNSIGNED (TREE_TYPE (t)))
+ return true;
+ enum tree_code code = TREE_CODE (t);
+ switch (code)
+ {
case TARGET_EXPR:
{
tree temp = TARGET_EXPR_SLOT (t);
default:
break;
}
+ return tree_simple_nonnegative_warnv_p (TREE_CODE (t),
+ TREE_TYPE (t));
}
+ break;
- /* ... fall through ... */
+ case COMPOUND_EXPR:
+ case MODIFY_EXPR:
+ case GIMPLE_MODIFY_STMT:
+ return tree_expr_nonnegative_warnv_p (GENERIC_TREE_OPERAND (t, 1),
+ strict_overflow_p);
+ case BIND_EXPR:
+ return tree_expr_nonnegative_warnv_p (expr_last (TREE_OPERAND (t, 1)),
+ strict_overflow_p);
+ case SAVE_EXPR:
+ return tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
+ strict_overflow_p);
default:
- {
- tree type = TREE_TYPE (t);
- if ((TYPE_PRECISION (type) != 1 || TYPE_UNSIGNED (type))
- && truth_value_p (TREE_CODE (t)))
- /* Truth values evaluate to 0 or 1, which is nonnegative unless we
- have a signed:1 type (where the value is -1 and 0). */
- return true;
- }
+ return tree_simple_nonnegative_warnv_p (TREE_CODE (t),
+ TREE_TYPE (t));
}
/* We don't know sign of `t', so be conservative and return false. */
return false;
}
+/* Return true if T is known to be non-negative. If the return
+ value is based on the assumption that signed overflow is undefined,
+ set *STRICT_OVERFLOW_P to true; otherwise, don't change
+ *STRICT_OVERFLOW_P. */
+
+bool
+tree_expr_nonnegative_warnv_p (tree t, bool *strict_overflow_p)
+{
+ enum tree_code code;
+ if (t == error_mark_node)
+ return false;
+
+ code = TREE_CODE (t);
+ switch (TREE_CODE_CLASS (code))
+ {
+ case tcc_binary:
+ case tcc_comparison:
+ return tree_binary_nonnegative_warnv_p (TREE_CODE (t),
+ TREE_TYPE (t),
+ TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1),
+ strict_overflow_p);
+
+ case tcc_unary:
+ return tree_unary_nonnegative_warnv_p (TREE_CODE (t),
+ TREE_TYPE (t),
+ TREE_OPERAND (t, 0),
+ strict_overflow_p);
+
+ case tcc_constant:
+ case tcc_declaration:
+ case tcc_reference:
+ return tree_single_nonnegative_warnv_p (t, strict_overflow_p);
+
+ default:
+ break;
+ }
+
+ switch (code)
+ {
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_XOR_EXPR:
+ return tree_binary_nonnegative_warnv_p (TREE_CODE (t),
+ TREE_TYPE (t),
+ TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1),
+ strict_overflow_p);
+ case TRUTH_NOT_EXPR:
+ return tree_unary_nonnegative_warnv_p (TREE_CODE (t),
+ TREE_TYPE (t),
+ TREE_OPERAND (t, 0),
+ strict_overflow_p);
+
+ case COND_EXPR:
+ case CONSTRUCTOR:
+ case OBJ_TYPE_REF:
+ case ASSERT_EXPR:
+ case ADDR_EXPR:
+ case WITH_SIZE_EXPR:
+ case EXC_PTR_EXPR:
+ case SSA_NAME:
+ case FILTER_EXPR:
+ return tree_single_nonnegative_warnv_p (t, strict_overflow_p);
+
+ default:
+ return tree_invalid_nonnegative_warnv_p (t, strict_overflow_p);
+ }
+}
+
/* Return true if `t' is known to be non-negative. Handle warnings
about undefined signed overflow. */
return ret;
}
-/* Return true when T is an address and is known to be nonzero.
+
+/* Return true when (CODE OP0) is an address and is known to be nonzero.
For floating point we further ensure that T is not denormal.
Similar logic is present in nonzero_address in rtlanal.h.
is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't
change *STRICT_OVERFLOW_P. */
-bool
-tree_expr_nonzero_warnv_p (tree t, bool *strict_overflow_p)
+static bool
+tree_unary_nonzero_warnv_p (enum tree_code code, tree type, tree op0,
+ bool *strict_overflow_p)
{
- tree type = TREE_TYPE (t);
- bool sub_strict_overflow_p;
+ switch (code)
+ {
+ case ABS_EXPR:
+ return tree_expr_nonzero_warnv_p (op0,
+ strict_overflow_p);
- /* Doing something useful for floating point would need more work. */
- if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
- return false;
+ case NOP_EXPR:
+ {
+ tree inner_type = TREE_TYPE (op0);
+ tree outer_type = type;
- switch (TREE_CODE (t))
- {
- case SSA_NAME:
- /* Query VRP to see if it has recorded any information about
- the range of this object. */
- return ssa_name_nonzero_p (t);
+ return (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
+ && tree_expr_nonzero_warnv_p (op0,
+ strict_overflow_p));
+ }
+ break;
- case ABS_EXPR:
- return tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
+ case NON_LVALUE_EXPR:
+ return tree_expr_nonzero_warnv_p (op0,
strict_overflow_p);
- case INTEGER_CST:
- return !integer_zerop (t);
+ default:
+ break;
+ }
+ return false;
+}
+
+/* Return true when (CODE OP0 OP1) is an address and is known to be nonzero.
+ For floating point we further ensure that T is not denormal.
+ Similar logic is present in nonzero_address in rtlanal.h.
+
+ If the return value is based on the assumption that signed overflow
+ is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't
+ change *STRICT_OVERFLOW_P. */
+
+static bool
+tree_binary_nonzero_warnv_p (enum tree_code code,
+ tree type,
+ tree op0,
+ tree op1, bool *strict_overflow_p)
+{
+ bool sub_strict_overflow_p;
+ switch (code)
+ {
case POINTER_PLUS_EXPR:
case PLUS_EXPR:
if (TYPE_OVERFLOW_UNDEFINED (type))
/* With the presence of negative values it is hard
to say something. */
sub_strict_overflow_p = false;
- if (!tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
+ if (!tree_expr_nonnegative_warnv_p (op0,
&sub_strict_overflow_p)
- || !tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
+ || !tree_expr_nonnegative_warnv_p (op1,
&sub_strict_overflow_p))
return false;
/* One of operands must be positive and the other non-negative. */
/* We don't set *STRICT_OVERFLOW_P here: even if this value
overflows, on a twos-complement machine the sum of two
nonnegative numbers can never be zero. */
- return (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
+ return (tree_expr_nonzero_warnv_p (op0,
strict_overflow_p)
- || tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
+ || tree_expr_nonzero_warnv_p (op1,
strict_overflow_p));
}
break;
case MULT_EXPR:
if (TYPE_OVERFLOW_UNDEFINED (type))
{
- if (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
+ if (tree_expr_nonzero_warnv_p (op0,
strict_overflow_p)
- && tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
+ && tree_expr_nonzero_warnv_p (op1,
strict_overflow_p))
{
*strict_overflow_p = true;
}
break;
- case NOP_EXPR:
- {
- tree inner_type = TREE_TYPE (TREE_OPERAND (t, 0));
- tree outer_type = TREE_TYPE (t);
+ case MIN_EXPR:
+ sub_strict_overflow_p = false;
+ if (tree_expr_nonzero_warnv_p (op0,
+ &sub_strict_overflow_p)
+ && tree_expr_nonzero_warnv_p (op1,
+ &sub_strict_overflow_p))
+ {
+ if (sub_strict_overflow_p)
+ *strict_overflow_p = true;
+ }
+ break;
- return (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
- && tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
- strict_overflow_p));
- }
+ case MAX_EXPR:
+ sub_strict_overflow_p = false;
+ if (tree_expr_nonzero_warnv_p (op0,
+ &sub_strict_overflow_p))
+ {
+ if (sub_strict_overflow_p)
+ *strict_overflow_p = true;
+
+ /* When both operands are nonzero, then MAX must be too. */
+ if (tree_expr_nonzero_warnv_p (op1,
+ strict_overflow_p))
+ return true;
+
+ /* MAX where operand 0 is positive is positive. */
+ return tree_expr_nonnegative_warnv_p (op0,
+ strict_overflow_p);
+ }
+ /* MAX where operand 1 is positive is positive. */
+ else if (tree_expr_nonzero_warnv_p (op1,
+ &sub_strict_overflow_p)
+ && tree_expr_nonnegative_warnv_p (op1,
+ &sub_strict_overflow_p))
+ {
+ if (sub_strict_overflow_p)
+ *strict_overflow_p = true;
+ return true;
+ }
+ break;
+
+ case BIT_IOR_EXPR:
+ return (tree_expr_nonzero_warnv_p (op1,
+ strict_overflow_p)
+ || tree_expr_nonzero_warnv_p (op0,
+ strict_overflow_p));
+
+ default:
break;
+ }
+
+ return false;
+}
+
+/* Return true when T is an address and is known to be nonzero.
+ For floating point we further ensure that T is not denormal.
+ Similar logic is present in nonzero_address in rtlanal.h.
+
+ If the return value is based on the assumption that signed overflow
+ is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't
+ change *STRICT_OVERFLOW_P. */
+
+static bool
+tree_single_nonzero_warnv_p (tree t, bool *strict_overflow_p)
+{
+ bool sub_strict_overflow_p;
+ switch (TREE_CODE (t))
+ {
+ case SSA_NAME:
+ /* Query VRP to see if it has recorded any information about
+ the range of this object. */
+ return ssa_name_nonzero_p (t);
- case ADDR_EXPR:
+ case INTEGER_CST:
+ return !integer_zerop (t);
+
+ case ADDR_EXPR:
{
tree base = get_base_address (TREE_OPERAND (t, 0));
}
break;
- case MIN_EXPR:
- sub_strict_overflow_p = false;
- if (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
- &sub_strict_overflow_p)
- && tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
- &sub_strict_overflow_p))
- {
- if (sub_strict_overflow_p)
- *strict_overflow_p = true;
- }
+ default:
break;
+ }
+ return false;
+}
- case MAX_EXPR:
- sub_strict_overflow_p = false;
- if (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
- &sub_strict_overflow_p))
- {
- if (sub_strict_overflow_p)
- *strict_overflow_p = true;
+/* Return true when T is an address and is known to be nonzero.
+ For floating point we further ensure that T is not denormal.
+ Similar logic is present in nonzero_address in rtlanal.h.
- /* When both operands are nonzero, then MAX must be too. */
- if (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
- strict_overflow_p))
- return true;
+ If the return value is based on the assumption that signed overflow
+ is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't
+ change *STRICT_OVERFLOW_P. */
- /* MAX where operand 0 is positive is positive. */
- return tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
+bool
+tree_expr_nonzero_warnv_p (tree t, bool *strict_overflow_p)
+{
+ tree type = TREE_TYPE (t);
+ enum tree_code code;
+
+ /* Doing something useful for floating point would need more work. */
+ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
+ return false;
+
+ code = TREE_CODE (t);
+ switch (TREE_CODE_CLASS (code))
+ {
+ case tcc_unary:
+ return tree_unary_nonzero_warnv_p (code, type, TREE_OPERAND (t, 0),
+ strict_overflow_p);
+ case tcc_binary:
+ case tcc_comparison:
+ return tree_binary_nonzero_warnv_p (code, type,
+ TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1),
strict_overflow_p);
- }
- /* MAX where operand 1 is positive is positive. */
- else if (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
- &sub_strict_overflow_p)
- && tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
- &sub_strict_overflow_p))
- {
- if (sub_strict_overflow_p)
- *strict_overflow_p = true;
- return true;
- }
+ case tcc_constant:
+ case tcc_declaration:
+ case tcc_reference:
+ return tree_single_nonzero_warnv_p (t, strict_overflow_p);
+
+ default:
break;
+ }
+
+ switch (code)
+ {
+ case TRUTH_NOT_EXPR:
+ return tree_unary_nonzero_warnv_p (code, type, TREE_OPERAND (t, 0),
+ strict_overflow_p);
+
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_XOR_EXPR:
+ return tree_binary_nonzero_warnv_p (code, type,
+ TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1),
+ strict_overflow_p);
+
+ case COND_EXPR:
+ case CONSTRUCTOR:
+ case OBJ_TYPE_REF:
+ case ASSERT_EXPR:
+ case ADDR_EXPR:
+ case WITH_SIZE_EXPR:
+ case EXC_PTR_EXPR:
+ case SSA_NAME:
+ case FILTER_EXPR:
+ return tree_single_nonzero_warnv_p (t, strict_overflow_p);
case COMPOUND_EXPR:
case MODIFY_EXPR:
strict_overflow_p);
case SAVE_EXPR:
- case NON_LVALUE_EXPR:
return tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
strict_overflow_p);
- case BIT_IOR_EXPR:
- return (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
- strict_overflow_p)
- || tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
- strict_overflow_p));
-
case CALL_EXPR:
return alloca_call_p (t);