static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
enum machine_mode *, int *, int *,
tree *, tree *);
-static int all_ones_mask_p (tree, int);
-static tree sign_bit_p (tree, tree);
-static int simple_operand_p (tree);
+static int all_ones_mask_p (const_tree, int);
+static tree sign_bit_p (tree, const_tree);
+static int simple_operand_p (const_tree);
static tree range_binop (enum tree_code, tree, tree, int, tree, int);
static tree range_predecessor (tree);
static tree range_successor (tree);
static tree fold_binary_op_with_conditional_arg (enum tree_code, tree,
tree, tree,
tree, tree, int);
-static bool fold_real_zero_addition_p (tree, tree, int);
static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
tree, tree, tree);
static tree fold_inf_compare (enum tree_code, tree, tree, tree);
static tree fold_div_compare (enum tree_code, tree, tree, tree);
-static bool reorder_operands_p (tree, tree);
+static bool reorder_operands_p (const_tree, const_tree);
static tree fold_negate_const (tree, tree);
static tree fold_not_const (tree, tree);
static tree fold_relational_const (enum tree_code, tree, tree, tree);
Otherwise returns NULL_TREE. */
static tree
-div_if_zero_remainder (enum tree_code code, tree arg1, tree arg2)
+div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
{
unsigned HOST_WIDE_INT int1l, int2l;
HOST_WIDE_INT int1h, int2h;
deferred code. */
void
-fold_undefer_overflow_warnings (bool issue, tree stmt, int code)
+fold_undefer_overflow_warnings (bool issue, const_tree stmt, int code)
{
const char *warnmsg;
location_t locus;
if (!issue || warnmsg == NULL)
return;
+ if (stmt != NULL_TREE && TREE_NO_WARNING (stmt))
+ return;
+
/* Use the smallest code level when deciding to issue the
warning. */
if (code == 0 || code > (int) fold_deferred_overflow_code)
|| TREE_CODE (in) == FIXED_CST)
*litp = in;
else if (TREE_CODE (in) == code
- || (! FLOAT_TYPE_P (TREE_TYPE (in))
+ || ((! FLOAT_TYPE_P (TREE_TYPE (in)) || flag_associative_math)
&& ! SAT_FIXED_POINT_TYPE_P (TREE_TYPE (in))
/* We can associate addition and subtraction together (even
though the C standard doesn't say so) for integers because
for use in int_const_binop, size_binop and size_diffop. */
static bool
-int_binop_types_match_p (enum tree_code code, tree type1, tree type2)
+int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2)
{
if (TREE_CODE (type1) != INTEGER_TYPE && !POINTER_TYPE_P (type1))
return false;
INTEGER_CST to another integer type. */
static tree
-fold_convert_const_int_from_int (tree type, tree arg1)
+fold_convert_const_int_from_int (tree type, const_tree arg1)
{
tree t;
t = force_fit_type_double (type, TREE_INT_CST_LOW (arg1),
TREE_INT_CST_HIGH (arg1),
/* Don't set the overflow when
- converting a pointer */
- !POINTER_TYPE_P (TREE_TYPE (arg1)),
+ converting from a pointer, */
+ !POINTER_TYPE_P (TREE_TYPE (arg1))
+ /* or to a sizetype with same signedness
+ and the precision is unchanged.
+ ??? sizetype is always sign-extended,
+ but its signedness depends on the
+ frontend. Thus we see spurious overflows
+ here if we do not check this. */
+ && !((TYPE_PRECISION (TREE_TYPE (arg1))
+ == TYPE_PRECISION (type))
+ && (TYPE_UNSIGNED (TREE_TYPE (arg1))
+ == TYPE_UNSIGNED (type))
+ && ((TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (TREE_TYPE (arg1)))
+ || (TREE_CODE (type) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (type)))),
(TREE_INT_CST_HIGH (arg1) < 0
&& (TYPE_UNSIGNED (type)
< TYPE_UNSIGNED (TREE_TYPE (arg1))))
to an integer type. */
static tree
-fold_convert_const_int_from_real (enum tree_code code, tree type, tree arg1)
+fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1)
{
int overflow = 0;
tree t;
FIXED_CST to an integer type. */
static tree
-fold_convert_const_int_from_fixed (tree type, tree arg1)
+fold_convert_const_int_from_fixed (tree type, const_tree arg1)
{
tree t;
double_int temp, temp_trunc;
to another floating point type. */
static tree
-fold_convert_const_real_from_real (tree type, tree arg1)
+fold_convert_const_real_from_real (tree type, const_tree arg1)
{
REAL_VALUE_TYPE value;
tree t;
to a floating point type. */
static tree
-fold_convert_const_real_from_fixed (tree type, tree arg1)
+fold_convert_const_real_from_fixed (tree type, const_tree arg1)
{
REAL_VALUE_TYPE value;
tree t;
to another fixed-point type. */
static tree
-fold_convert_const_fixed_from_fixed (tree type, tree arg1)
+fold_convert_const_fixed_from_fixed (tree type, const_tree arg1)
{
FIXED_VALUE_TYPE value;
tree t;
to a fixed-point type. */
static tree
-fold_convert_const_fixed_from_int (tree type, tree arg1)
+fold_convert_const_fixed_from_int (tree type, const_tree arg1)
{
FIXED_VALUE_TYPE value;
tree t;
to a fixed-point type. */
static tree
-fold_convert_const_fixed_from_real (tree type, tree arg1)
+fold_convert_const_fixed_from_real (tree type, const_tree arg1)
{
FIXED_VALUE_TYPE value;
tree t;
return (TREE_CODE (orig) == VECTOR_TYPE
&& tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
- default:
+ case REAL_TYPE:
+ case FIXED_POINT_TYPE:
+ case COMPLEX_TYPE:
+ case VECTOR_TYPE:
+ case VOID_TYPE:
return TREE_CODE (type) == TREE_CODE (orig);
+
+ default:
+ return false;
}
}
otherwise. */
static bool
-maybe_lvalue_p (tree x)
+maybe_lvalue_p (const_tree x)
{
/* We only need to wrap lvalue tree codes. */
switch (TREE_CODE (x))
{
tree t = fold_convert (type, result);
- /* If the resulting operand is an empty statement, just return the ommited
+ /* If the resulting operand is an empty statement, just return the omitted
statement casted to void. */
if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
return build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
{
tree t = fold_convert (type, result);
- /* If the resulting operand is an empty statement, just return the ommited
+ /* If the resulting operand is an empty statement, just return the omitted
statement casted to void. */
if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
return build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
bit positions. */
static int
-all_ones_mask_p (tree mask, int size)
+all_ones_mask_p (const_tree mask, int size)
{
tree type = TREE_TYPE (mask);
unsigned int precision = TYPE_PRECISION (type);
or NULL_TREE otherwise. */
static tree
-sign_bit_p (tree exp, tree val)
+sign_bit_p (tree exp, const_tree val)
{
unsigned HOST_WIDE_INT mask_lo, lo;
HOST_WIDE_INT mask_hi, hi;
to be evaluated unconditionally. */
static int
-simple_operand_p (tree exp)
+simple_operand_p (const_tree exp)
{
/* Strip any conversions that don't change the machine mode. */
STRIP_NOPS (exp);
Note that all these transformations are correct if A is
NaN, since the two alternatives (A and -A) are also NaNs. */
- if ((FLOAT_TYPE_P (TREE_TYPE (arg01))
- ? real_zerop (arg01)
- : integer_zerop (arg01))
+ if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ && (FLOAT_TYPE_P (TREE_TYPE (arg01))
+ ? real_zerop (arg01)
+ : integer_zerop (arg01))
&& ((TREE_CODE (arg2) == NEGATE_EXPR
&& operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
/* In the case that A is of the form X-Y, '-A' (arg2) may
both transformations are correct when A is NaN: A != 0
is then true, and A == 0 is false. */
- if (integer_zerop (arg01) && integer_zerop (arg2))
+ if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ && integer_zerop (arg01) && integer_zerop (arg2))
{
if (comp_code == NE_EXPR)
return pedantic_non_lvalue (fold_convert (type, arg1));
a number and A is not. The conditions in the original
expressions will be false, so all four give B. The min()
and max() versions would give a NaN instead. */
- if (operand_equal_for_comparison_p (arg01, arg2, arg00)
+ if (!HONOR_SIGNED_ZEROS (TYPE_MODE (type))
+ && operand_equal_for_comparison_p (arg01, arg2, arg00)
/* Avoid these transformations if the COND_EXPR may be used
as an lvalue in the C++ front-end. PR c++/19199. */
&& (in_gimple_form
then we cannot pass through this conversion. */
|| (code != MULT_EXPR
&& (TYPE_UNSIGNED (ctype)
- != TYPE_UNSIGNED (TREE_TYPE (op0))))))
+ != TYPE_UNSIGNED (TREE_TYPE (op0))))
+ /* ... or has undefined overflow while the converted to
+ type has not, we cannot do the operation in the inner type
+ as that would introduce undefined overflow. */
+ || (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))
+ && !TYPE_OVERFLOW_UNDEFINED (type))))
break;
/* Pass the constant down and see if we can make a simplification. If
}
break;
}
+ /* If the constant is negative, we cannot simplify this. */
+ if (tree_int_cst_sgn (c) == -1)
+ break;
/* FALLTHROUGH */
case NEGATE_EXPR:
if ((t1 = extract_muldiv (op0, c, code, wide_type, strict_overflow_p))
&& ((code == MULT_EXPR && tcode == EXACT_DIV_EXPR)
|| (tcode == MULT_EXPR
&& code != TRUNC_MOD_EXPR && code != CEIL_MOD_EXPR
- && code != FLOOR_MOD_EXPR && code != ROUND_MOD_EXPR)))
+ && code != FLOOR_MOD_EXPR && code != ROUND_MOD_EXPR
+ && code != MULT_EXPR)))
{
if (integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
{
}
-/* Return true if expr looks like an ARRAY_REF and set base and
- offset to the appropriate trees. If there is no offset,
- offset is set to NULL_TREE. Base will be canonicalized to
- something you can get the element type from using
- TREE_TYPE (TREE_TYPE (base)). Offset will be the offset
- in bytes to the base in sizetype. */
-
-static bool
-extract_array_ref (tree expr, tree *base, tree *offset)
-{
- /* One canonical form is a PLUS_EXPR with the first
- argument being an ADDR_EXPR with a possible NOP_EXPR
- attached. */
- if (TREE_CODE (expr) == POINTER_PLUS_EXPR)
- {
- tree op0 = TREE_OPERAND (expr, 0);
- tree inner_base, dummy1;
- /* Strip NOP_EXPRs here because the C frontends and/or
- folders present us (int *)&x.a p+ 4 possibly. */
- STRIP_NOPS (op0);
- if (extract_array_ref (op0, &inner_base, &dummy1))
- {
- *base = inner_base;
- *offset = fold_convert (sizetype, TREE_OPERAND (expr, 1));
- if (dummy1 != NULL_TREE)
- *offset = fold_build2 (PLUS_EXPR, sizetype,
- dummy1, *offset);
- return true;
- }
- }
- /* Other canonical form is an ADDR_EXPR of an ARRAY_REF,
- which we transform into an ADDR_EXPR with appropriate
- offset. For other arguments to the ADDR_EXPR we assume
- zero offset and as such do not care about the ADDR_EXPR
- type and strip possible nops from it. */
- else if (TREE_CODE (expr) == ADDR_EXPR)
- {
- tree op0 = TREE_OPERAND (expr, 0);
- if (TREE_CODE (op0) == ARRAY_REF)
- {
- tree idx = TREE_OPERAND (op0, 1);
- *base = TREE_OPERAND (op0, 0);
- *offset = fold_build2 (MULT_EXPR, TREE_TYPE (idx), idx,
- array_ref_element_size (op0));
- *offset = fold_convert (sizetype, *offset);
- }
- else
- {
- /* Handle array-to-pointer decay as &a. */
- if (TREE_CODE (TREE_TYPE (op0)) == ARRAY_TYPE)
- *base = TREE_OPERAND (expr, 0);
- else
- *base = expr;
- *offset = NULL_TREE;
- }
- return true;
- }
- /* The next canonical form is a VAR_DECL with POINTER_TYPE. */
- else if (SSA_VAR_P (expr)
- && TREE_CODE (TREE_TYPE (expr)) == POINTER_TYPE)
- {
- *base = expr;
- *offset = NULL_TREE;
- return true;
- }
-
- return false;
-}
-
-
/* Transform `a + (b ? x : y)' into `b ? (a + x) : (a + y)'.
Transform, `a + (x < y)' into `(x < y) ? (a + 1) : (a + 0)'. Here
CODE corresponds to the `+', COND to the `(b ? x : y)' or `(x < y)'
X - 0 is not the same as X because 0 - 0 is -0. In other rounding
modes, X + 0 is not the same as X because -0 + 0 is 0. */
-static bool
-fold_real_zero_addition_p (tree type, tree addend, int negate)
+bool
+fold_real_zero_addition_p (const_tree type, const_tree addend, int negate)
{
if (!real_zerop (addend))
return false;
such that the evaluation of arg1 occurs before arg0. */
static bool
-reorder_operands_p (tree arg0, tree arg1)
+reorder_operands_p (const_tree arg0, const_tree arg1)
{
if (! flag_evaluation_order)
return true;
if (TYPE_PRECISION (TREE_TYPE (arg0)) <= TYPE_PRECISION (shorter_type))
return NULL_TREE;
- arg1_unw = get_unwidened (arg1, shorter_type);
+ arg1_unw = get_unwidened (arg1, NULL_TREE);
/* If possible, express the comparison in the shorter mode. */
if ((code == EQ_EXPR || code == NE_EXPR
|| TYPE_UNSIGNED (TREE_TYPE (arg0)) == TYPE_UNSIGNED (shorter_type))
&& (TREE_TYPE (arg1_unw) == shorter_type
+ || (TYPE_PRECISION (shorter_type)
+ >= TYPE_PRECISION (TREE_TYPE (arg1_unw)))
|| (TREE_CODE (arg1_unw) == INTEGER_CST
&& (TREE_CODE (shorter_type) == INTEGER_TYPE
|| TREE_CODE (shorter_type) == BOOLEAN_TYPE)
switch (code)
{
+ case PAREN_EXPR:
+ /* Re-association barriers around constants and other re-association
+ barriers can be removed. */
+ if (CONSTANT_CLASS_P (op0)
+ || TREE_CODE (op0) == PAREN_EXPR)
+ return fold_convert (type, op0);
+ return NULL_TREE;
+
case NOP_EXPR:
case FLOAT_EXPR:
case CONVERT_EXPR:
(for integers). Avoid this if the final type is a pointer
since then we sometimes need the inner conversion. Likewise if
the outer has a precision not equal to the size of its mode. */
- if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
+ if (((inter_int && inside_int)
|| (inter_float && inside_float)
|| (inter_vec && inside_vec))
&& inter_prec >= inside_prec
intermediate and final types differ, or
- the final type is a pointer type and the precisions of the
initial and intermediate types differ.
- - the final type is a pointer type and the initial type not
- the initial type is a pointer to an array and the final type
not. */
if (! inside_float && ! inter_float && ! final_float
&& ! (final_ptr && inside_prec != inter_prec)
&& ! (final_prec != GET_MODE_BITSIZE (TYPE_MODE (type))
&& TYPE_MODE (type) == TYPE_MODE (inter_type))
- && final_ptr == inside_ptr
- && ! (inside_ptr
+ && ! (inside_ptr && final_ptr
&& TREE_CODE (TREE_TYPE (inside_type)) == ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (type)) != ARRAY_TYPE))
return fold_build1 (code, type, TREE_OPERAND (op0, 0));
return op0;
if (TREE_CODE (op0) == VIEW_CONVERT_EXPR)
return fold_build1 (VIEW_CONVERT_EXPR, type, TREE_OPERAND (op0, 0));
+
+ /* For integral conversions with the same precision or pointer
+ conversions use a NOP_EXPR instead. */
+ if ((INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (op0))
+ /* Do not muck with VIEW_CONVERT_EXPRs that convert from
+ a sub-type to its base type as generated by the Ada FE. */
+ && !TREE_TYPE (TREE_TYPE (op0)))
+ || (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE (op0))))
+ return fold_convert (type, op0);
+
+ /* Strip inner integral conversions that do not change the precision. */
+ if ((TREE_CODE (op0) == NOP_EXPR
+ || TREE_CODE (op0) == CONVERT_EXPR)
+ && INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (op0, 0)))
+ && (TYPE_PRECISION (TREE_TYPE (op0))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op0, 0)))))
+ return fold_build1 (VIEW_CONVERT_EXPR, type, TREE_OPERAND (op0, 0));
+
return fold_view_convert_expr (type, op0);
case NEGATE_EXPR:
if (TREE_CODE (arg0) == INTEGER_CST)
return fold_not_const (arg0, type);
else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
- return TREE_OPERAND (arg0, 0);
+ return fold_convert (type, TREE_OPERAND (arg0, 0));
/* Convert ~ (-A) to A - 1. */
else if (INTEGRAL_TYPE_P (type) && TREE_CODE (arg0) == NEGATE_EXPR)
- return fold_build2 (MINUS_EXPR, type, TREE_OPERAND (arg0, 0),
+ return fold_build2 (MINUS_EXPR, type,
+ fold_convert (type, TREE_OPERAND (arg0, 0)),
build_int_cst (type, 1));
/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
else if (INTEGRAL_TYPE_P (type)
&& integer_onep (TREE_OPERAND (arg0, 1)))
|| (TREE_CODE (arg0) == PLUS_EXPR
&& integer_all_onesp (TREE_OPERAND (arg0, 1)))))
- return fold_build1 (NEGATE_EXPR, type, TREE_OPERAND (arg0, 0));
+ return fold_build1 (NEGATE_EXPR, type,
+ fold_convert (type, TREE_OPERAND (arg0, 0)));
/* Convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
else if (TREE_CODE (arg0) == BIT_XOR_EXPR
&& (tem = fold_unary (BIT_NOT_EXPR, type,
TREE_OPERAND (arg0, 1)))))
return fold_build2 (BIT_XOR_EXPR, type,
fold_convert (type, TREE_OPERAND (arg0, 0)), tem);
+ /* Perform BIT_NOT_EXPR on each element individually. */
+ else if (TREE_CODE (arg0) == VECTOR_CST)
+ {
+ tree elements = TREE_VECTOR_CST_ELTS (arg0), elem, list = NULL_TREE;
+ int count = TYPE_VECTOR_SUBPARTS (type), i;
+
+ for (i = 0; i < count; i++)
+ {
+ if (elements)
+ {
+ elem = TREE_VALUE (elements);
+ elem = fold_unary (BIT_NOT_EXPR, TREE_TYPE (type), elem);
+ if (elem == NULL_TREE)
+ break;
+ elements = TREE_CHAIN (elements);
+ }
+ else
+ elem = build_int_cst (TREE_TYPE (type), -1);
+ list = tree_cons (NULL_TREE, elem, list);
+ }
+ if (i == count)
+ return build_vector (type, nreverse (list));
+ }
return NULL_TREE;
if (TREE_CODE (arg0) == CALL_EXPR)
{
tree fn = get_callee_fndecl (arg0);
- if (DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
+ if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fn))
{
CASE_FLT_FN (BUILT_IN_CEXPI):
if (TREE_CODE (arg0) == CALL_EXPR)
{
tree fn = get_callee_fndecl (arg0);
- if (DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
+ if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fn))
{
CASE_FLT_FN (BUILT_IN_CEXPI):
/* For comparisons of pointers we can decompose it to a compile time
comparison of the base objects and the offsets into the object.
- This requires at least one operand being an ADDR_EXPR to do more
- than the operand_equal_p test below. */
+ This requires at least one operand being an ADDR_EXPR or a
+ POINTER_PLUS_EXPR to do more than the operand_equal_p test below. */
if (POINTER_TYPE_P (TREE_TYPE (arg0))
&& (TREE_CODE (arg0) == ADDR_EXPR
- || TREE_CODE (arg1) == ADDR_EXPR))
+ || TREE_CODE (arg1) == ADDR_EXPR
+ || TREE_CODE (arg0) == POINTER_PLUS_EXPR
+ || TREE_CODE (arg1) == POINTER_PLUS_EXPR))
{
tree base0, base1, offset0 = NULL_TREE, offset1 = NULL_TREE;
HOST_WIDE_INT bitsize, bitpos0 = 0, bitpos1 = 0;
else
indirect_base0 = true;
}
+ else if (TREE_CODE (arg0) == POINTER_PLUS_EXPR)
+ {
+ base0 = TREE_OPERAND (arg0, 0);
+ offset0 = TREE_OPERAND (arg0, 1);
+ }
base1 = arg1;
if (TREE_CODE (arg1) == ADDR_EXPR)
else if (!indirect_base0)
base1 = NULL_TREE;
}
+ else if (TREE_CODE (arg1) == POINTER_PLUS_EXPR)
+ {
+ base1 = TREE_OPERAND (arg1, 0);
+ offset1 = TREE_OPERAND (arg1, 1);
+ }
else if (indirect_base0)
base1 = NULL_TREE;
}
}
- /* If this is a comparison of two exprs that look like an ARRAY_REF of the
- same object, then we can fold this to a comparison of the two offsets in
- signed size type. This is possible because pointer arithmetic is
- restricted to retain within an object and overflow on pointer differences
- is undefined as of 6.5.6/8 and /9 with respect to the signed ptrdiff_t.
-
- We check flag_wrapv directly because pointers types are unsigned,
- and therefore TYPE_OVERFLOW_WRAPS returns true for them. That is
- normally what we want to avoid certain odd overflow cases, but
- not here. */
- if (POINTER_TYPE_P (TREE_TYPE (arg0))
- && !flag_wrapv
- && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (arg0)))
- {
- tree base0, offset0, base1, offset1;
-
- if (extract_array_ref (arg0, &base0, &offset0)
- && extract_array_ref (arg1, &base1, &offset1)
- && operand_equal_p (base0, base1, 0))
- {
- tree signed_size_type_node;
- signed_size_type_node = signed_type_for (size_type_node);
-
- /* By converting to signed size type we cover middle-end pointer
- arithmetic which operates on unsigned pointer types of size
- type size and ARRAY_REF offsets which are properly sign or
- zero extended from their type in case it is narrower than
- size type. */
- if (offset0 == NULL_TREE)
- offset0 = build_int_cst (signed_size_type_node, 0);
- else
- offset0 = fold_convert (signed_size_type_node, offset0);
- if (offset1 == NULL_TREE)
- offset1 = build_int_cst (signed_size_type_node, 0);
- else
- offset1 = fold_convert (signed_size_type_node, offset1);
-
- return fold_build2 (code, type, offset0, offset1);
- }
- }
-
/* Transform comparisons of the form X +- C1 CMP Y +- C2 to
X CMP Y +- C2 +- C1 for signed X, Y. This is valid if
the resulting offset is smaller in absolute value than the
/* Likewise, we can simplify a comparison of a real constant with
a MINUS_EXPR whose first operand is also a real constant, i.e.
- (c1 - x) < c2 becomes x > c1-c2. */
- if (flag_unsafe_math_optimizations
+ (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
+ floating-point types only if -fassociative-math is set. */
+ if (flag_associative_math
&& TREE_CODE (arg1) == REAL_CST
&& TREE_CODE (arg0) == MINUS_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 0)) == REAL_CST
}
+/* Subroutine of fold_binary. If P is the value of EXPR, computes
+ power-of-two M and (arbitrary) N such that M divides (P-N). This condition
+ guarantees that P and N have the same least significant log2(M) bits.
+ N is not otherwise constrained. In particular, N is not normalized to
+ 0 <= N < M as is common. In general, the precise value of P is unknown.
+ M is chosen as large as possible such that constant N can be determined.
+
+ Returns M and sets *RESIDUE to N. */
+
+static unsigned HOST_WIDE_INT
+get_pointer_modulus_and_residue (tree expr, unsigned HOST_WIDE_INT *residue)
+{
+ enum tree_code code;
+
+ *residue = 0;
+
+ code = TREE_CODE (expr);
+ if (code == ADDR_EXPR)
+ {
+ expr = TREE_OPERAND (expr, 0);
+ if (handled_component_p (expr))
+ {
+ HOST_WIDE_INT bitsize, bitpos;
+ tree offset;
+ enum machine_mode mode;
+ int unsignedp, volatilep;
+
+ expr = get_inner_reference (expr, &bitsize, &bitpos, &offset,
+ &mode, &unsignedp, &volatilep, false);
+ *residue = bitpos / BITS_PER_UNIT;
+ if (offset)
+ {
+ if (TREE_CODE (offset) == INTEGER_CST)
+ *residue += TREE_INT_CST_LOW (offset);
+ else
+ /* We don't handle more complicated offset expressions. */
+ return 1;
+ }
+ }
+
+ if (DECL_P (expr))
+ return DECL_ALIGN_UNIT (expr);
+ }
+ else if (code == POINTER_PLUS_EXPR)
+ {
+ tree op0, op1;
+ unsigned HOST_WIDE_INT modulus;
+ enum tree_code inner_code;
+
+ op0 = TREE_OPERAND (expr, 0);
+ STRIP_NOPS (op0);
+ modulus = get_pointer_modulus_and_residue (op0, residue);
+
+ op1 = TREE_OPERAND (expr, 1);
+ STRIP_NOPS (op1);
+ inner_code = TREE_CODE (op1);
+ if (inner_code == INTEGER_CST)
+ {
+ *residue += TREE_INT_CST_LOW (op1);
+ return modulus;
+ }
+ else if (inner_code == MULT_EXPR)
+ {
+ op1 = TREE_OPERAND (op1, 1);
+ if (TREE_CODE (op1) == INTEGER_CST)
+ {
+ unsigned HOST_WIDE_INT align;
+
+ /* Compute the greatest power-of-2 divisor of op1. */
+ align = TREE_INT_CST_LOW (op1);
+ align &= -align;
+
+ /* If align is non-zero and less than *modulus, replace
+ *modulus with align., If align is 0, then either op1 is 0
+ or the greatest power-of-2 divisor of op1 doesn't fit in an
+ unsigned HOST_WIDE_INT. In either case, no additional
+ constraint is imposed. */
+ if (align)
+ modulus = MIN (modulus, align);
+
+ return modulus;
+ }
+ }
+ }
+
+ /* If we get here, we were unable to determine anything useful about the
+ expression. */
+ return 1;
+}
+
+
/* Fold a binary expression of code CODE and type TYPE with operands
OP0 and OP1. Return the folded expression if folding is
successful. Otherwise, return NULL_TREE. */
if (TREE_CODE (arg0) == COMPOUND_EXPR)
return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
fold_build2 (code, type,
- TREE_OPERAND (arg0, 1), op1));
+ fold_convert (TREE_TYPE (op0),
+ TREE_OPERAND (arg0, 1)),
+ op1));
if (TREE_CODE (arg1) == COMPOUND_EXPR
&& reorder_operands_p (arg0, TREE_OPERAND (arg1, 0)))
return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
- fold_build2 (code, type,
- op0, TREE_OPERAND (arg1, 1)));
+ fold_build2 (code, type, op0,
+ fold_convert (TREE_TYPE (op1),
+ TREE_OPERAND (arg1, 1))));
if (TREE_CODE (arg0) == COND_EXPR || COMPARISON_CLASS_P (arg0))
{
if (POINTER_TYPE_P (TREE_TYPE (arg1))
&& INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
return fold_build2 (POINTER_PLUS_EXPR, type,
- fold_convert (type, arg1), fold_convert (sizetype, arg0));
+ fold_convert (type, arg1),
+ fold_convert (sizetype, arg0));
/* (PTR +p B) +p A -> PTR +p (B + A) */
if (TREE_CODE (arg0) == POINTER_PLUS_EXPR)
tree inner;
tree arg01 = fold_convert (sizetype, TREE_OPERAND (arg0, 1));
tree arg00 = TREE_OPERAND (arg0, 0);
- inner = fold_build2 (PLUS_EXPR, sizetype, arg01, fold_convert (sizetype, arg1));
- return fold_build2 (POINTER_PLUS_EXPR, type, arg00, inner);
+ inner = fold_build2 (PLUS_EXPR, sizetype,
+ arg01, fold_convert (sizetype, arg1));
+ return fold_convert (type,
+ fold_build2 (POINTER_PLUS_EXPR,
+ TREE_TYPE (arg00), arg00, inner));
}
/* PTR_CST +p CST -> CST1 */
}
return NULL_TREE;
+
case PLUS_EXPR:
/* PTR + INT -> (INT)(PTR p+ INT) */
if (POINTER_TYPE_P (TREE_TYPE (arg0))
return omit_one_operand (type, t1, arg0);
}
}
+
+ /* X + (X / CST) * -CST is X % CST. */
+ if (TREE_CODE (arg1) == MULT_EXPR
+ && TREE_CODE (TREE_OPERAND (arg1, 0)) == TRUNC_DIV_EXPR
+ && operand_equal_p (arg0,
+ TREE_OPERAND (TREE_OPERAND (arg1, 0), 0), 0))
+ {
+ tree cst0 = TREE_OPERAND (TREE_OPERAND (arg1, 0), 1);
+ tree cst1 = TREE_OPERAND (arg1, 1);
+ tree sum = fold_binary (PLUS_EXPR, TREE_TYPE (cst1), cst1, cst0);
+ if (sum && integer_zerop (sum))
+ return fold_convert (type,
+ fold_build2 (TRUNC_MOD_EXPR,
+ TREE_TYPE (arg0), arg0, cst0));
+ }
}
/* Handle (A1 * C1) + (A2 * C2) with A1, A2 or C1, C2 being the
- same or one. Make sure type is not saturating. */
+ same or one. Make sure type is not saturating.
+ fold_plusminus_mult_expr will re-associate. */
if ((TREE_CODE (arg0) == MULT_EXPR
|| TREE_CODE (arg1) == MULT_EXPR)
&& !TYPE_SATURATING (type)
- && (!FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations))
+ && (!FLOAT_TYPE_P (type) || flag_associative_math))
{
tree tem = fold_plusminus_mult_expr (code, type, arg0, arg1);
if (tem)
return fold_build2 (MULT_EXPR, type, arg0,
build_real (type, dconst2));
- /* Convert a + (b*c + d*e) into (a + b*c) + d*e. */
- if (flag_unsafe_math_optimizations
+ /* Convert a + (b*c + d*e) into (a + b*c) + d*e.
+ We associate floats only if the user has specified
+ -fassociative-math. */
+ if (flag_associative_math
&& TREE_CODE (arg1) == PLUS_EXPR
&& TREE_CODE (arg0) != MULT_EXPR)
{
return fold_build2 (PLUS_EXPR, type, tree0, tree11);
}
}
- /* Convert (b*c + d*e) + a into b*c + (d*e +a). */
- if (flag_unsafe_math_optimizations
+ /* Convert (b*c + d*e) + a into b*c + (d*e +a).
+ We associate floats only if the user has specified
+ -fassociative-math. */
+ if (flag_associative_math
&& TREE_CODE (arg0) == PLUS_EXPR
&& TREE_CODE (arg1) != MULT_EXPR)
{
is a rotate of A by B bits. */
{
enum tree_code code0, code1;
+ tree rtype;
code0 = TREE_CODE (arg0);
code1 = TREE_CODE (arg1);
if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR)
|| (code1 == RSHIFT_EXPR && code0 == LSHIFT_EXPR))
&& operand_equal_p (TREE_OPERAND (arg0, 0),
TREE_OPERAND (arg1, 0), 0)
- && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
+ && (rtype = TREE_TYPE (TREE_OPERAND (arg0, 0)),
+ TYPE_UNSIGNED (rtype))
+ /* Only create rotates in complete modes. Other cases are not
+ expanded properly. */
+ && TYPE_PRECISION (rtype) == GET_MODE_PRECISION (TYPE_MODE (rtype)))
{
tree tree01, tree11;
enum tree_code code01, code11;
/* In most languages, can't associate operations on floats through
parentheses. Rather than remember where the parentheses were, we
don't associate floats at all, unless the user has specified
- -funsafe-math-optimizations.
+ -fassociative-math.
And, we need to make sure type is not saturating. */
- if ((! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
+ if ((! FLOAT_TYPE_P (type) || flag_associative_math)
&& !TYPE_SATURATING (type))
{
tree var0, con0, lit0, minus_lit0;
}
/* A - (-B) -> A + B */
if (TREE_CODE (arg1) == NEGATE_EXPR)
- return fold_build2 (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0));
+ return fold_build2 (PLUS_EXPR, type, op0,
+ fold_convert (type, TREE_OPERAND (arg1, 0)));
/* (-A) - B -> (-B) - A where B is easily negated and we can swap. */
if (TREE_CODE (arg0) == NEGATE_EXPR
&& (FLOAT_TYPE_P (type)
|| INTEGRAL_TYPE_P (type))
&& negate_expr_p (arg1)
&& reorder_operands_p (arg0, arg1))
- return fold_build2 (MINUS_EXPR, type, negate_expr (arg1),
- TREE_OPERAND (arg0, 0));
+ return fold_build2 (MINUS_EXPR, type,
+ fold_convert (type, negate_expr (arg1)),
+ fold_convert (type, TREE_OPERAND (arg0, 0)));
/* Convert -A - 1 to ~A. */
if (INTEGRAL_TYPE_P (type)
&& TREE_CODE (arg0) == NEGATE_EXPR
&& integer_all_onesp (arg0))
return fold_build1 (BIT_NOT_EXPR, type, op1);
+
+ /* X - (X / CST) * CST is X % CST. */
+ if (INTEGRAL_TYPE_P (type)
+ && TREE_CODE (arg1) == MULT_EXPR
+ && TREE_CODE (TREE_OPERAND (arg1, 0)) == TRUNC_DIV_EXPR
+ && operand_equal_p (arg0,
+ TREE_OPERAND (TREE_OPERAND (arg1, 0), 0), 0)
+ && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg1, 0), 1),
+ TREE_OPERAND (arg1, 1), 0))
+ return fold_convert (type,
+ fold_build2 (TRUNC_MOD_EXPR, TREE_TYPE (arg0),
+ arg0, TREE_OPERAND (arg1, 1)));
+
if (! FLOAT_TYPE_P (type))
{
if (integer_zerop (arg0))
Also note that operand_equal_p is always false if an operand
is volatile. */
- if ((! FLOAT_TYPE_P (type)
- || (flag_unsafe_math_optimizations
- && !HONOR_NANS (TYPE_MODE (type))
- && !HONOR_INFINITIES (TYPE_MODE (type))))
+ if ((!FLOAT_TYPE_P (type) || !HONOR_NANS (TYPE_MODE (type)))
&& operand_equal_p (arg0, arg1, 0))
return fold_convert (type, integer_zero_node);
return tem;
/* Handle (A1 * C1) - (A2 * C2) with A1, A2 or C1, C2 being the
- same or one. Make sure type is not saturating. */
+ same or one. Make sure type is not saturating.
+ fold_plusminus_mult_expr will re-associate. */
if ((TREE_CODE (arg0) == MULT_EXPR
|| TREE_CODE (arg1) == MULT_EXPR)
&& !TYPE_SATURATING (type)
- && (!FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations))
+ && (!FLOAT_TYPE_P (type) || flag_associative_math))
{
tree tem = fold_plusminus_mult_expr (code, type, arg0, arg1);
if (tem)
return omit_one_operand (type, arg1, arg0);
if (integer_onep (arg1))
return non_lvalue (fold_convert (type, arg0));
- /* Transform x * -1 into -x. */
+ /* Transform x * -1 into -x. Make sure to do the negation
+ on the original operand with conversions not stripped
+ because we can only strip non-sign-changing conversions. */
if (integer_all_onesp (arg1))
- return fold_convert (type, negate_expr (arg0));
+ return fold_convert (type, negate_expr (op0));
/* Transform x * -C into -x * C if x is easily negatable. */
if (TREE_CODE (arg1) == INTEGER_CST
&& tree_int_cst_sgn (arg1) == -1
&& (tem = negate_expr (arg1)) != arg1
&& !TREE_OVERFLOW (tem))
return fold_build2 (MULT_EXPR, type,
- negate_expr (arg0), tem);
+ fold_convert (type, negate_expr (arg0)), tem);
/* (a * (1 << b)) is (a << b) */
if (TREE_CODE (arg1) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (arg1, 0)))
- return fold_build2 (LSHIFT_EXPR, type, arg0,
+ return fold_build2 (LSHIFT_EXPR, type, op0,
TREE_OPERAND (arg1, 1));
if (TREE_CODE (arg0) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (arg0, 0)))
- return fold_build2 (LSHIFT_EXPR, type, arg1,
+ return fold_build2 (LSHIFT_EXPR, type, op1,
TREE_OPERAND (arg0, 1));
strict_overflow_p = false;
if (TREE_CODE (arg1) == INTEGER_CST
- && 0 != (tem = extract_muldiv (op0,
- fold_convert (type, arg1),
- code, NULL_TREE,
+ && 0 != (tem = extract_muldiv (op0, arg1, code, NULL_TREE,
&strict_overflow_p)))
{
if (strict_overflow_p)
&& real_minus_onep (arg1))
return fold_convert (type, negate_expr (arg0));
- /* Convert (C1/X)*C2 into (C1*C2)/X. */
- if (flag_unsafe_math_optimizations
+ /* Convert (C1/X)*C2 into (C1*C2)/X. This transformation may change
+ the result for floating point types due to rounding so it is applied
+ only if -fassociative-math was specify. */
+ if (flag_associative_math
&& TREE_CODE (arg0) == RDIV_EXPR
&& TREE_CODE (arg1) == REAL_CST
&& TREE_CODE (TREE_OPERAND (arg0, 0)) == REAL_CST)
if (TREE_CODE (arg0) == BIT_NOT_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
{
- t1 = build_int_cst_type (type, -1);
+ t1 = fold_convert (type, integer_zero_node);
+ t1 = fold_unary (BIT_NOT_EXPR, type, t1);
return omit_one_operand (type, t1, arg1);
}
if (TREE_CODE (arg1) == BIT_NOT_EXPR
&& operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
{
- t1 = build_int_cst_type (type, -1);
+ t1 = fold_convert (type, integer_zero_node);
+ t1 = fold_unary (BIT_NOT_EXPR, type, t1);
return omit_one_operand (type, t1, arg0);
}
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT hi1, lo1, hi2, lo2, mlo, mhi;
- int width = TYPE_PRECISION (type);
+ unsigned HOST_WIDE_INT hi1, lo1, hi2, lo2, hi3, lo3, mlo, mhi;
+ int width = TYPE_PRECISION (type), w;
hi1 = TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1));
lo1 = TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1));
hi2 = TREE_INT_CST_HIGH (arg1);
return fold_build2 (BIT_IOR_EXPR, type,
TREE_OPERAND (arg0, 0), arg1);
- /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
+ /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2,
+ unless (C1 & ~C2) | (C2 & C3) for some C3 is a mask of some
+ mode which allows further optimizations. */
hi1 &= mhi;
lo1 &= mlo;
- if ((hi1 & ~hi2) != hi1 || (lo1 & ~lo2) != lo1)
+ hi2 &= mhi;
+ lo2 &= mlo;
+ hi3 = hi1 & ~hi2;
+ lo3 = lo1 & ~lo2;
+ for (w = BITS_PER_UNIT;
+ w <= width && w <= HOST_BITS_PER_WIDE_INT;
+ w <<= 1)
+ {
+ unsigned HOST_WIDE_INT mask
+ = (unsigned HOST_WIDE_INT) -1 >> (HOST_BITS_PER_WIDE_INT - w);
+ if (((lo1 | lo2) & mask) == mask
+ && (lo1 & ~mask) == 0 && hi1 == 0)
+ {
+ hi3 = 0;
+ lo3 = mask;
+ break;
+ }
+ }
+ if (hi3 != hi1 || lo3 != lo1)
return fold_build2 (BIT_IOR_EXPR, type,
fold_build2 (BIT_AND_EXPR, type,
TREE_OPERAND (arg0, 0),
build_int_cst_wide (type,
- lo1 & ~lo2,
- hi1 & ~hi2)),
+ lo3, hi3)),
arg1);
}
if (TREE_CODE (arg0) == BIT_NOT_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
{
- t1 = build_int_cst_type (type, -1);
+ t1 = fold_convert (type, integer_zero_node);
+ t1 = fold_unary (BIT_NOT_EXPR, type, t1);
return omit_one_operand (type, t1, arg1);
}
if (TREE_CODE (arg1) == BIT_NOT_EXPR
&& operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
{
- t1 = build_int_cst_type (type, -1);
+ t1 = fold_convert (type, integer_zero_node);
+ t1 = fold_unary (BIT_NOT_EXPR, type, t1);
return omit_one_operand (type, t1, arg0);
}
if (TREE_CODE (arg0) == BIT_IOR_EXPR
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
- return fold_build2 (BIT_IOR_EXPR, type,
- fold_build2 (BIT_AND_EXPR, type,
- TREE_OPERAND (arg0, 0), arg1),
- fold_build2 (BIT_AND_EXPR, type,
- TREE_OPERAND (arg0, 1), arg1));
+ {
+ tree tmp1 = fold_convert (TREE_TYPE (arg0), arg1);
+ tree tmp2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
+ TREE_OPERAND (arg0, 0), tmp1);
+ tree tmp3 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
+ TREE_OPERAND (arg0, 1), tmp1);
+ return fold_convert (type,
+ fold_build2 (BIT_IOR_EXPR, TREE_TYPE (arg0),
+ tmp2, tmp3));
+ }
/* (X | Y) & Y is (X, Y). */
if (TREE_CODE (arg0) == BIT_IOR_EXPR
{
return fold_build1 (BIT_NOT_EXPR, type,
build2 (BIT_IOR_EXPR, type,
- TREE_OPERAND (arg0, 0),
- TREE_OPERAND (arg1, 0)));
+ fold_convert (type,
+ TREE_OPERAND (arg0, 0)),
+ fold_convert (type,
+ TREE_OPERAND (arg1, 0))));
+ }
+
+ /* If arg0 is derived from the address of an object or function, we may
+ be able to fold this expression using the object or function's
+ alignment. */
+ if (POINTER_TYPE_P (TREE_TYPE (arg0)) && host_integerp (arg1, 1))
+ {
+ unsigned HOST_WIDE_INT modulus, residue;
+ unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg1);
+
+ modulus = get_pointer_modulus_and_residue (arg0, &residue);
+
+ /* This works because modulus is a power of 2. If this weren't the
+ case, we'd have to replace it by its greatest power-of-2
+ divisor: modulus & -modulus. */
+ if (low < modulus)
+ return build_int_cst (type, residue & low);
+ }
+
+ /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
+ (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
+ if the new mask might be further optimized. */
+ if ((TREE_CODE (arg0) == LSHIFT_EXPR
+ || TREE_CODE (arg0) == RSHIFT_EXPR)
+ && host_integerp (TREE_OPERAND (arg0, 1), 1)
+ && host_integerp (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1)))
+ && tree_low_cst (TREE_OPERAND (arg0, 1), 1)
+ < TYPE_PRECISION (TREE_TYPE (arg0))
+ && TYPE_PRECISION (TREE_TYPE (arg0)) <= HOST_BITS_PER_WIDE_INT
+ && tree_low_cst (TREE_OPERAND (arg0, 1), 1) > 0)
+ {
+ unsigned int shiftc = tree_low_cst (TREE_OPERAND (arg0, 1), 1);
+ unsigned HOST_WIDE_INT mask
+ = tree_low_cst (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1)));
+ unsigned HOST_WIDE_INT newmask, zerobits = 0;
+ tree shift_type = TREE_TYPE (arg0);
+
+ if (TREE_CODE (arg0) == LSHIFT_EXPR)
+ zerobits = ((((unsigned HOST_WIDE_INT) 1) << shiftc) - 1);
+ else if (TREE_CODE (arg0) == RSHIFT_EXPR
+ && TYPE_PRECISION (TREE_TYPE (arg0))
+ == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (arg0))))
+ {
+ unsigned int prec = TYPE_PRECISION (TREE_TYPE (arg0));
+ tree arg00 = TREE_OPERAND (arg0, 0);
+ /* See if more bits can be proven as zero because of
+ zero extension. */
+ if (TREE_CODE (arg00) == NOP_EXPR
+ && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg00, 0))))
+ {
+ tree inner_type = TREE_TYPE (TREE_OPERAND (arg00, 0));
+ if (TYPE_PRECISION (inner_type)
+ == GET_MODE_BITSIZE (TYPE_MODE (inner_type))
+ && TYPE_PRECISION (inner_type) < prec)
+ {
+ prec = TYPE_PRECISION (inner_type);
+ /* See if we can shorten the right shift. */
+ if (shiftc < prec)
+ shift_type = inner_type;
+ }
+ }
+ zerobits = ~(unsigned HOST_WIDE_INT) 0;
+ zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
+ zerobits <<= prec - shiftc;
+ /* For arithmetic shift if sign bit could be set, zerobits
+ can contain actually sign bits, so no transformation is
+ possible, unless MASK masks them all away. In that
+ case the shift needs to be converted into logical shift. */
+ if (!TYPE_UNSIGNED (TREE_TYPE (arg0))
+ && prec == TYPE_PRECISION (TREE_TYPE (arg0)))
+ {
+ if ((mask & zerobits) == 0)
+ shift_type = unsigned_type_for (TREE_TYPE (arg0));
+ else
+ zerobits = 0;
+ }
+ }
+
+ /* ((X << 16) & 0xff00) is (X, 0). */
+ if ((mask & zerobits) == mask)
+ return omit_one_operand (type, build_int_cst (type, 0), arg0);
+
+ newmask = mask | zerobits;
+ if (newmask != mask && (newmask & (newmask + 1)) == 0)
+ {
+ unsigned int prec;
+
+ /* Only do the transformation if NEWMASK is some integer
+ mode's mask. */
+ for (prec = BITS_PER_UNIT;
+ prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
+ if (newmask == (((unsigned HOST_WIDE_INT) 1) << prec) - 1)
+ break;
+ if (prec < HOST_BITS_PER_WIDE_INT
+ || newmask == ~(unsigned HOST_WIDE_INT) 0)
+ {
+ if (shift_type != TREE_TYPE (arg0))
+ {
+ tem = fold_build2 (TREE_CODE (arg0), shift_type,
+ fold_convert (shift_type,
+ TREE_OPERAND (arg0, 0)),
+ TREE_OPERAND (arg0, 1));
+ tem = fold_convert (type, tem);
+ }
+ else
+ tem = op0;
+ return fold_build2 (BIT_AND_EXPR, type, tem,
+ build_int_cst_type (TREE_TYPE (op1),
+ newmask));
+ }
+ }
}
goto associate;
/* If ARG1 is a constant, we can convert this to a multiply by the
reciprocal. This does not have the same rounding properties,
- so only do this if -funsafe-math-optimizations. We can actually
+ so only do this if -freciprocal-math. We can actually
always safely do it if ARG1 is a power of two, but it's hard to
tell if it is or not in a portable manner. */
if (TREE_CODE (arg1) == REAL_CST)
{
- if (flag_unsafe_math_optimizations
+ if (flag_reciprocal_math
&& 0 != (tem = const_binop (code, build_real (type, dconst1),
arg1, 0)))
return fold_build2 (MULT_EXPR, type, arg0, tem);
}
}
}
- /* Convert A/B/C to A/(B*C). */
- if (flag_unsafe_math_optimizations
+ /* Convert A/B/C to A/(B*C). */
+ if (flag_reciprocal_math
&& TREE_CODE (arg0) == RDIV_EXPR)
return fold_build2 (RDIV_EXPR, type, TREE_OPERAND (arg0, 0),
fold_build2 (MULT_EXPR, type,
TREE_OPERAND (arg0, 1), arg1));
/* Convert A/(B/C) to (A/B)*C. */
- if (flag_unsafe_math_optimizations
+ if (flag_reciprocal_math
&& TREE_CODE (arg1) == RDIV_EXPR)
return fold_build2 (MULT_EXPR, type,
fold_build2 (RDIV_EXPR, type, arg0,
TREE_OPERAND (arg1, 1));
/* Convert C1/(X*C2) into (C1/C2)/X. */
- if (flag_unsafe_math_optimizations
+ if (flag_reciprocal_math
&& TREE_CODE (arg1) == MULT_EXPR
&& TREE_CODE (arg0) == REAL_CST
&& TREE_CODE (TREE_OPERAND (arg1, 1)) == REAL_CST)
strict_overflow_p = false;
if (TREE_CODE (arg1) == LSHIFT_EXPR
&& (TYPE_UNSIGNED (type)
- || tree_expr_nonnegative_warnv_p (arg0, &strict_overflow_p)))
+ || tree_expr_nonnegative_warnv_p (op0, &strict_overflow_p)))
{
tree sval = TREE_OPERAND (arg1, 0);
if (integer_pow2p (sval) && tree_int_cst_sgn (sval) > 0)
fold_convert (type, arg0), sh_cnt);
}
}
+
+ /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
+ TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
+ if (INTEGRAL_TYPE_P (type)
+ && TYPE_UNSIGNED (type)
+ && code == FLOOR_DIV_EXPR)
+ return fold_build2 (TRUNC_DIV_EXPR, type, op0, op1);
+
/* Fall thru */
case ROUND_DIV_EXPR:
strict_overflow_p = false;
if ((code == TRUNC_MOD_EXPR || code == FLOOR_MOD_EXPR)
&& (TYPE_UNSIGNED (type)
- || tree_expr_nonnegative_warnv_p (arg0, &strict_overflow_p)))
+ || tree_expr_nonnegative_warnv_p (op0, &strict_overflow_p)))
{
tree c = arg1;
/* Also optimize A % (C << N) where C is a power of 2,
if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST)
{
tree tem = build_int_cst (TREE_TYPE (arg1),
- GET_MODE_BITSIZE (TYPE_MODE (type)));
+ TYPE_PRECISION (type));
tem = const_binop (MINUS_EXPR, tem, arg1, 0);
- return fold_build2 (RROTATE_EXPR, type, arg0, tem);
+ return fold_build2 (RROTATE_EXPR, type, op0, tem);
}
/* If we have a rotate of a bit operation with the rotate count and
fold_build2 (code, type,
TREE_OPERAND (arg0, 1), arg1));
- /* Two consecutive rotates adding up to the width of the mode can
- be ignored. */
+ /* Two consecutive rotates adding up to the precision of the
+ type can be ignored. */
if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (arg0) == RROTATE_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
&& TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
&& ((TREE_INT_CST_LOW (arg1)
+ TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
- == (unsigned int) GET_MODE_BITSIZE (TYPE_MODE (type))))
+ == (unsigned int) TYPE_PRECISION (type)))
return TREE_OPERAND (arg0, 0);
+ /* Fold (X & C2) << C1 into (X << C1) & (C2 << C1)
+ (X & C2) >> C1 into (X >> C1) & (C2 >> C1)
+ if the latter can be further optimized. */
+ if ((code == LSHIFT_EXPR || code == RSHIFT_EXPR)
+ && TREE_CODE (arg0) == BIT_AND_EXPR
+ && TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
+ {
+ tree mask = fold_build2 (code, type,
+ fold_convert (type, TREE_OPERAND (arg0, 1)),
+ arg1);
+ tree shift = fold_build2 (code, type,
+ fold_convert (type, TREE_OPERAND (arg0, 0)),
+ arg1);
+ tem = fold_binary (BIT_AND_EXPR, type, shift, mask);
+ if (tem)
+ return tem;
+ }
+
return NULL_TREE;
case MIN_EXPR:
/* bool_var != 1 becomes !bool_var. */
if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE && integer_onep (arg1)
&& code == NE_EXPR)
- return fold_build1 (TRUTH_NOT_EXPR, type, arg0);
+ return fold_build1 (TRUTH_NOT_EXPR, type, fold_convert (type, arg0));
/* bool_var == 0 becomes !bool_var. */
if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE && integer_zerop (arg1)
&& code == EQ_EXPR)
- return fold_build1 (TRUTH_NOT_EXPR, type, arg0);
+ return fold_build1 (TRUTH_NOT_EXPR, type, fold_convert (type, arg0));
/* If this is an equality comparison of the address of two non-weak,
unaliased symbols neither of which are extern (since we do not
tree arg01 = TREE_OPERAND (arg0, 1);
if (TREE_CODE (arg00) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (arg00, 0)))
- return
- fold_build2 (code, type,
- build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
- build2 (RSHIFT_EXPR, TREE_TYPE (arg00),
- arg01, TREE_OPERAND (arg00, 1)),
- fold_convert (TREE_TYPE (arg0),
- integer_one_node)),
- arg1);
- else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
- && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
- return
- fold_build2 (code, type,
- build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
- build2 (RSHIFT_EXPR, TREE_TYPE (arg01),
- arg00, TREE_OPERAND (arg01, 1)),
- fold_convert (TREE_TYPE (arg0),
- integer_one_node)),
- arg1);
+ {
+ tree tem = fold_build2 (RSHIFT_EXPR, TREE_TYPE (arg00),
+ arg01, TREE_OPERAND (arg00, 1));
+ tem = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0), tem,
+ build_int_cst (TREE_TYPE (arg0), 1));
+ return fold_build2 (code, type,
+ fold_convert (TREE_TYPE (arg1), tem), arg1);
+ }
+ else if (TREE_CODE (arg01) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (arg01, 0)))
+ {
+ tree tem = fold_build2 (RSHIFT_EXPR, TREE_TYPE (arg01),
+ arg00, TREE_OPERAND (arg01, 1));
+ tem = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0), tem,
+ build_int_cst (TREE_TYPE (arg0), 1));
+ return fold_build2 (code, type,
+ fold_convert (TREE_TYPE (arg1), tem), arg1);
+ }
}
/* If this is an NE or EQ comparison of zero against the result of a
#ifdef ENABLE_FOLD_CHECKING
#undef fold
-static void fold_checksum_tree (tree, struct md5_ctx *, htab_t);
-static void fold_check_failed (tree, tree);
-void print_fold_checksum (tree);
+static void fold_checksum_tree (const_tree, struct md5_ctx *, htab_t);
+static void fold_check_failed (const_tree, const_tree);
+void print_fold_checksum (const_tree);
/* When --enable-checking=fold, compute a digest of expr before
and after actual fold call to see if fold did not accidentally
}
void
-print_fold_checksum (tree expr)
+print_fold_checksum (const_tree expr)
{
struct md5_ctx ctx;
unsigned char checksum[16], cnt;
}
static void
-fold_check_failed (tree expr ATTRIBUTE_UNUSED, tree ret ATTRIBUTE_UNUSED)
+fold_check_failed (const_tree expr ATTRIBUTE_UNUSED, const_tree ret ATTRIBUTE_UNUSED)
{
internal_error ("fold check: original tree changed by fold");
}
static void
-fold_checksum_tree (tree expr, struct md5_ctx *ctx, htab_t ht)
+fold_checksum_tree (const_tree expr, struct md5_ctx *ctx, htab_t ht)
{
- void **slot;
+ const void **slot;
enum tree_code code;
struct tree_function_decl buf;
int i, len;
&& sizeof (struct tree_type) <= sizeof (struct tree_function_decl));
if (expr == NULL)
return;
- slot = htab_find_slot (ht, expr, INSERT);
+ slot = (const void **) htab_find_slot (ht, expr, INSERT);
if (*slot != NULL)
return;
*slot = expr;
{
/* Allow DECL_ASSEMBLER_NAME to be modified. */
memcpy ((char *) &buf, expr, tree_size (expr));
+ SET_DECL_ASSEMBLER_NAME ((tree)&buf, NULL);
expr = (tree) &buf;
- SET_DECL_ASSEMBLER_NAME (expr, NULL);
}
else if (TREE_CODE_CLASS (code) == tcc_type
&& (TYPE_POINTER_TO (expr) || TYPE_REFERENCE_TO (expr)
|| TYPE_CONTAINS_PLACEHOLDER_INTERNAL (expr)))
{
/* Allow these fields to be modified. */
+ tree tmp;
memcpy ((char *) &buf, expr, tree_size (expr));
- expr = (tree) &buf;
- TYPE_CONTAINS_PLACEHOLDER_INTERNAL (expr) = 0;
- TYPE_POINTER_TO (expr) = NULL;
- TYPE_REFERENCE_TO (expr) = NULL;
- if (TYPE_CACHED_VALUES_P (expr))
+ expr = tmp = (tree) &buf;
+ TYPE_CONTAINS_PLACEHOLDER_INTERNAL (tmp) = 0;
+ TYPE_POINTER_TO (tmp) = NULL;
+ TYPE_REFERENCE_TO (tmp) = NULL;
+ if (TYPE_CACHED_VALUES_P (tmp))
{
- TYPE_CACHED_VALUES_P (expr) = 0;
- TYPE_CACHED_VALUES (expr) = NULL;
+ TYPE_CACHED_VALUES_P (tmp) = 0;
+ TYPE_CACHED_VALUES (tmp) = NULL;
}
}
md5_process_bytes (expr, tree_size (expr), ctx);
outputs differ. */
void
-debug_fold_checksum (tree t)
+debug_fold_checksum (const_tree t)
{
int i;
unsigned char checksum[16];
transformed version). */
int
-multiple_of_p (tree type, tree top, tree bottom)
+multiple_of_p (tree type, const_tree top, const_tree bottom)
{
if (operand_equal_p (top, bottom, 0))
return 1;
return ret;
}
-/* Return true when T is an address and is known to be nonzero.
+
+/* Return true when (CODE OP0) is an address and is known to be nonzero.
For floating point we further ensure that T is not denormal.
Similar logic is present in nonzero_address in rtlanal.h.
is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't
change *STRICT_OVERFLOW_P. */
-bool
-tree_expr_nonzero_warnv_p (tree t, bool *strict_overflow_p)
+static bool
+tree_unary_nonzero_warnv_p (enum tree_code code, tree type, tree op0,
+ bool *strict_overflow_p)
{
- tree type = TREE_TYPE (t);
- bool sub_strict_overflow_p;
+ switch (code)
+ {
+ case ABS_EXPR:
+ return tree_expr_nonzero_warnv_p (op0,
+ strict_overflow_p);
- /* Doing something useful for floating point would need more work. */
- if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
- return false;
+ case NOP_EXPR:
+ {
+ tree inner_type = TREE_TYPE (op0);
+ tree outer_type = type;
- switch (TREE_CODE (t))
- {
- case SSA_NAME:
- /* Query VRP to see if it has recorded any information about
- the range of this object. */
- return ssa_name_nonzero_p (t);
+ return (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
+ && tree_expr_nonzero_warnv_p (op0,
+ strict_overflow_p));
+ }
+ break;
- case ABS_EXPR:
- return tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
+ case NON_LVALUE_EXPR:
+ return tree_expr_nonzero_warnv_p (op0,
strict_overflow_p);
- case INTEGER_CST:
- return !integer_zerop (t);
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* Return true when (CODE OP0 OP1) is an address and is known to be nonzero.
+ For floating point we further ensure that T is not denormal.
+ Similar logic is present in nonzero_address in rtlanal.h.
+
+ If the return value is based on the assumption that signed overflow
+ is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't
+ change *STRICT_OVERFLOW_P. */
+static bool
+tree_binary_nonzero_warnv_p (enum tree_code code,
+ tree type,
+ tree op0,
+ tree op1, bool *strict_overflow_p)
+{
+ bool sub_strict_overflow_p;
+ switch (code)
+ {
case POINTER_PLUS_EXPR:
case PLUS_EXPR:
if (TYPE_OVERFLOW_UNDEFINED (type))
/* With the presence of negative values it is hard
to say something. */
sub_strict_overflow_p = false;
- if (!tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
+ if (!tree_expr_nonnegative_warnv_p (op0,
&sub_strict_overflow_p)
- || !tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
+ || !tree_expr_nonnegative_warnv_p (op1,
&sub_strict_overflow_p))
return false;
/* One of operands must be positive and the other non-negative. */
/* We don't set *STRICT_OVERFLOW_P here: even if this value
overflows, on a twos-complement machine the sum of two
nonnegative numbers can never be zero. */
- return (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
+ return (tree_expr_nonzero_warnv_p (op0,
strict_overflow_p)
- || tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
+ || tree_expr_nonzero_warnv_p (op1,
strict_overflow_p));
}
break;
case MULT_EXPR:
if (TYPE_OVERFLOW_UNDEFINED (type))
{
- if (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
+ if (tree_expr_nonzero_warnv_p (op0,
strict_overflow_p)
- && tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
+ && tree_expr_nonzero_warnv_p (op1,
strict_overflow_p))
{
*strict_overflow_p = true;
}
break;
- case NOP_EXPR:
- {
- tree inner_type = TREE_TYPE (TREE_OPERAND (t, 0));
- tree outer_type = TREE_TYPE (t);
+ case MIN_EXPR:
+ sub_strict_overflow_p = false;
+ if (tree_expr_nonzero_warnv_p (op0,
+ &sub_strict_overflow_p)
+ && tree_expr_nonzero_warnv_p (op1,
+ &sub_strict_overflow_p))
+ {
+ if (sub_strict_overflow_p)
+ *strict_overflow_p = true;
+ }
+ break;
- return (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
- && tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
- strict_overflow_p));
- }
+ case MAX_EXPR:
+ sub_strict_overflow_p = false;
+ if (tree_expr_nonzero_warnv_p (op0,
+ &sub_strict_overflow_p))
+ {
+ if (sub_strict_overflow_p)
+ *strict_overflow_p = true;
+
+ /* When both operands are nonzero, then MAX must be too. */
+ if (tree_expr_nonzero_warnv_p (op1,
+ strict_overflow_p))
+ return true;
+
+ /* MAX where operand 0 is positive is positive. */
+ return tree_expr_nonnegative_warnv_p (op0,
+ strict_overflow_p);
+ }
+ /* MAX where operand 1 is positive is positive. */
+ else if (tree_expr_nonzero_warnv_p (op1,
+ &sub_strict_overflow_p)
+ && tree_expr_nonnegative_warnv_p (op1,
+ &sub_strict_overflow_p))
+ {
+ if (sub_strict_overflow_p)
+ *strict_overflow_p = true;
+ return true;
+ }
break;
- case ADDR_EXPR:
+ case BIT_IOR_EXPR:
+ return (tree_expr_nonzero_warnv_p (op1,
+ strict_overflow_p)
+ || tree_expr_nonzero_warnv_p (op0,
+ strict_overflow_p));
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* Return true when T is an address and is known to be nonzero.
+ For floating point we further ensure that T is not denormal.
+ Similar logic is present in nonzero_address in rtlanal.h.
+
+ If the return value is based on the assumption that signed overflow
+ is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't
+ change *STRICT_OVERFLOW_P. */
+
+static bool
+tree_single_nonzero_warnv_p (tree t, bool *strict_overflow_p)
+{
+ bool sub_strict_overflow_p;
+ switch (TREE_CODE (t))
+ {
+ case SSA_NAME:
+ /* Query VRP to see if it has recorded any information about
+ the range of this object. */
+ return ssa_name_nonzero_p (t);
+
+ case INTEGER_CST:
+ return !integer_zerop (t);
+
+ case ADDR_EXPR:
{
tree base = get_base_address (TREE_OPERAND (t, 0));
}
break;
- case MIN_EXPR:
- sub_strict_overflow_p = false;
- if (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
- &sub_strict_overflow_p)
- && tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
- &sub_strict_overflow_p))
- {
- if (sub_strict_overflow_p)
- *strict_overflow_p = true;
- }
+ default:
break;
+ }
+ return false;
+}
- case MAX_EXPR:
- sub_strict_overflow_p = false;
- if (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
- &sub_strict_overflow_p))
- {
- if (sub_strict_overflow_p)
- *strict_overflow_p = true;
+/* Return true when T is an address and is known to be nonzero.
+ For floating point we further ensure that T is not denormal.
+ Similar logic is present in nonzero_address in rtlanal.h.
- /* When both operands are nonzero, then MAX must be too. */
- if (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
- strict_overflow_p))
- return true;
+ If the return value is based on the assumption that signed overflow
+ is undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't
+ change *STRICT_OVERFLOW_P. */
- /* MAX where operand 0 is positive is positive. */
- return tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 0),
+bool
+tree_expr_nonzero_warnv_p (tree t, bool *strict_overflow_p)
+{
+ tree type = TREE_TYPE (t);
+ enum tree_code code;
+
+ /* Doing something useful for floating point would need more work. */
+ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
+ return false;
+
+ code = TREE_CODE (t);
+ switch (TREE_CODE_CLASS (code))
+ {
+ case tcc_unary:
+ return tree_unary_nonzero_warnv_p (code, type, TREE_OPERAND (t, 0),
+ strict_overflow_p);
+ case tcc_binary:
+ case tcc_comparison:
+ return tree_binary_nonzero_warnv_p (code, type,
+ TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1),
strict_overflow_p);
- }
- /* MAX where operand 1 is positive is positive. */
- else if (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
- &sub_strict_overflow_p)
- && tree_expr_nonnegative_warnv_p (TREE_OPERAND (t, 1),
- &sub_strict_overflow_p))
- {
- if (sub_strict_overflow_p)
- *strict_overflow_p = true;
- return true;
- }
+ case tcc_constant:
+ case tcc_declaration:
+ case tcc_reference:
+ return tree_single_nonzero_warnv_p (t, strict_overflow_p);
+
+ default:
break;
+ }
+
+ switch (code)
+ {
+ case TRUTH_NOT_EXPR:
+ return tree_unary_nonzero_warnv_p (code, type, TREE_OPERAND (t, 0),
+ strict_overflow_p);
+
+ case TRUTH_AND_EXPR:
+ case TRUTH_OR_EXPR:
+ case TRUTH_XOR_EXPR:
+ return tree_binary_nonzero_warnv_p (code, type,
+ TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1),
+ strict_overflow_p);
+
+ case COND_EXPR:
+ case CONSTRUCTOR:
+ case OBJ_TYPE_REF:
+ case ASSERT_EXPR:
+ case ADDR_EXPR:
+ case WITH_SIZE_EXPR:
+ case EXC_PTR_EXPR:
+ case SSA_NAME:
+ case FILTER_EXPR:
+ return tree_single_nonzero_warnv_p (t, strict_overflow_p);
case COMPOUND_EXPR:
case MODIFY_EXPR:
strict_overflow_p);
case SAVE_EXPR:
- case NON_LVALUE_EXPR:
return tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
strict_overflow_p);
- case BIT_IOR_EXPR:
- return (tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 1),
- strict_overflow_p)
- || tree_expr_nonzero_warnv_p (TREE_OPERAND (t, 0),
- strict_overflow_p));
-
case CALL_EXPR:
return alloca_call_p (t);