if (!issue || warnmsg == NULL)
return;
+ if (stmt != NULL_TREE && TREE_NO_WARNING (stmt))
+ return;
+
/* Use the smallest code level when deciding to issue the
warning. */
if (code == 0 || code > (int) fold_deferred_overflow_code)
then we cannot pass through this conversion. */
|| (code != MULT_EXPR
&& (TYPE_UNSIGNED (ctype)
- != TYPE_UNSIGNED (TREE_TYPE (op0))))))
+ != TYPE_UNSIGNED (TREE_TYPE (op0))))
+ /* ... or has undefined overflow while the converted to
+ type has not, we cannot do the operation in the inner type
+ as that would introduce undefined overflow. */
+ || (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0))
+ && !TYPE_OVERFLOW_UNDEFINED (type))))
break;
/* Pass the constant down and see if we can make a simplification. If
}
break;
}
+ /* If the constant is negative, we cannot simplify this. */
+ if (tree_int_cst_sgn (c) == -1)
+ break;
/* FALLTHROUGH */
case NEGATE_EXPR:
if ((t1 = extract_muldiv (op0, c, code, wide_type, strict_overflow_p))
}
-/* Return true if expr looks like an ARRAY_REF and set base and
- offset to the appropriate trees. If there is no offset,
- offset is set to NULL_TREE. Base will be canonicalized to
- something you can get the element type from using
- TREE_TYPE (TREE_TYPE (base)). Offset will be the offset
- in bytes to the base in sizetype. */
-
-static bool
-extract_array_ref (tree expr, tree *base, tree *offset)
-{
- /* One canonical form is a PLUS_EXPR with the first
- argument being an ADDR_EXPR with a possible NOP_EXPR
- attached. */
- if (TREE_CODE (expr) == POINTER_PLUS_EXPR)
- {
- tree op0 = TREE_OPERAND (expr, 0);
- tree inner_base, dummy1;
- /* Strip NOP_EXPRs here because the C frontends and/or
- folders present us (int *)&x.a p+ 4 possibly. */
- STRIP_NOPS (op0);
- if (extract_array_ref (op0, &inner_base, &dummy1))
- {
- *base = inner_base;
- *offset = fold_convert (sizetype, TREE_OPERAND (expr, 1));
- if (dummy1 != NULL_TREE)
- *offset = fold_build2 (PLUS_EXPR, sizetype,
- dummy1, *offset);
- return true;
- }
- }
- /* Other canonical form is an ADDR_EXPR of an ARRAY_REF,
- which we transform into an ADDR_EXPR with appropriate
- offset. For other arguments to the ADDR_EXPR we assume
- zero offset and as such do not care about the ADDR_EXPR
- type and strip possible nops from it. */
- else if (TREE_CODE (expr) == ADDR_EXPR)
- {
- tree op0 = TREE_OPERAND (expr, 0);
- if (TREE_CODE (op0) == ARRAY_REF)
- {
- tree idx = TREE_OPERAND (op0, 1);
- *base = TREE_OPERAND (op0, 0);
- *offset = fold_build2 (MULT_EXPR, TREE_TYPE (idx), idx,
- array_ref_element_size (op0));
- *offset = fold_convert (sizetype, *offset);
- }
- else
- {
- /* Handle array-to-pointer decay as &a. */
- if (TREE_CODE (TREE_TYPE (op0)) == ARRAY_TYPE)
- *base = TREE_OPERAND (expr, 0);
- else
- *base = expr;
- *offset = NULL_TREE;
- }
- return true;
- }
- /* The next canonical form is a VAR_DECL with POINTER_TYPE. */
- else if (SSA_VAR_P (expr)
- && TREE_CODE (TREE_TYPE (expr)) == POINTER_TYPE)
- {
- *base = expr;
- *offset = NULL_TREE;
- return true;
- }
-
- return false;
-}
-
-
/* Transform `a + (b ? x : y)' into `b ? (a + x) : (a + y)'.
Transform, `a + (x < y)' into `(x < y) ? (a + 1) : (a + 0)'. Here
CODE corresponds to the `+', COND to the `(b ? x : y)' or `(x < y)'
(for integers). Avoid this if the final type is a pointer
since then we sometimes need the inner conversion. Likewise if
the outer has a precision not equal to the size of its mode. */
- if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
+ if (((inter_int && inside_int)
|| (inter_float && inside_float)
|| (inter_vec && inside_vec))
&& inter_prec >= inside_prec
intermediate and final types differ, or
- the final type is a pointer type and the precisions of the
initial and intermediate types differ.
- - the final type is a pointer type and the initial type not
- the initial type is a pointer to an array and the final type
not. */
if (! inside_float && ! inter_float && ! final_float
&& ! (final_ptr && inside_prec != inter_prec)
&& ! (final_prec != GET_MODE_BITSIZE (TYPE_MODE (type))
&& TYPE_MODE (type) == TYPE_MODE (inter_type))
- && final_ptr == inside_ptr
- && ! (inside_ptr
+ && ! (inside_ptr && final_ptr
&& TREE_CODE (TREE_TYPE (inside_type)) == ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (type)) != ARRAY_TYPE))
return fold_build1 (code, type, TREE_OPERAND (op0, 0));
if (TREE_CODE (arg0) == INTEGER_CST)
return fold_not_const (arg0, type);
else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
- return TREE_OPERAND (arg0, 0);
+ return TREE_OPERAND (op0, 0);
/* Convert ~ (-A) to A - 1. */
else if (INTEGRAL_TYPE_P (type) && TREE_CODE (arg0) == NEGATE_EXPR)
- return fold_build2 (MINUS_EXPR, type, TREE_OPERAND (arg0, 0),
+ return fold_build2 (MINUS_EXPR, type,
+ fold_convert (type, TREE_OPERAND (arg0, 0)),
build_int_cst (type, 1));
/* Convert ~ (A - 1) or ~ (A + -1) to -A. */
else if (INTEGRAL_TYPE_P (type)
&& integer_onep (TREE_OPERAND (arg0, 1)))
|| (TREE_CODE (arg0) == PLUS_EXPR
&& integer_all_onesp (TREE_OPERAND (arg0, 1)))))
- return fold_build1 (NEGATE_EXPR, type, TREE_OPERAND (arg0, 0));
+ return fold_build1 (NEGATE_EXPR, type,
+ fold_convert (type, TREE_OPERAND (arg0, 0)));
/* Convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
else if (TREE_CODE (arg0) == BIT_XOR_EXPR
&& (tem = fold_unary (BIT_NOT_EXPR, type,
/* For comparisons of pointers we can decompose it to a compile time
comparison of the base objects and the offsets into the object.
- This requires at least one operand being an ADDR_EXPR to do more
- than the operand_equal_p test below. */
+ This requires at least one operand being an ADDR_EXPR or a
+ POINTER_PLUS_EXPR to do more than the operand_equal_p test below. */
if (POINTER_TYPE_P (TREE_TYPE (arg0))
&& (TREE_CODE (arg0) == ADDR_EXPR
- || TREE_CODE (arg1) == ADDR_EXPR))
+ || TREE_CODE (arg1) == ADDR_EXPR
+ || TREE_CODE (arg0) == POINTER_PLUS_EXPR
+ || TREE_CODE (arg1) == POINTER_PLUS_EXPR))
{
tree base0, base1, offset0 = NULL_TREE, offset1 = NULL_TREE;
HOST_WIDE_INT bitsize, bitpos0 = 0, bitpos1 = 0;
else
indirect_base0 = true;
}
+ else if (TREE_CODE (arg0) == POINTER_PLUS_EXPR)
+ {
+ base0 = TREE_OPERAND (arg0, 0);
+ offset0 = TREE_OPERAND (arg0, 1);
+ }
base1 = arg1;
if (TREE_CODE (arg1) == ADDR_EXPR)
else if (!indirect_base0)
base1 = NULL_TREE;
}
+ else if (TREE_CODE (arg1) == POINTER_PLUS_EXPR)
+ {
+ base1 = TREE_OPERAND (arg1, 0);
+ offset1 = TREE_OPERAND (arg1, 1);
+ }
else if (indirect_base0)
base1 = NULL_TREE;
}
}
- /* If this is a comparison of two exprs that look like an ARRAY_REF of the
- same object, then we can fold this to a comparison of the two offsets in
- signed size type. This is possible because pointer arithmetic is
- restricted to retain within an object and overflow on pointer differences
- is undefined as of 6.5.6/8 and /9 with respect to the signed ptrdiff_t.
-
- We check flag_wrapv directly because pointers types are unsigned,
- and therefore TYPE_OVERFLOW_WRAPS returns true for them. That is
- normally what we want to avoid certain odd overflow cases, but
- not here. */
- if (POINTER_TYPE_P (TREE_TYPE (arg0))
- && !flag_wrapv
- && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (arg0)))
- {
- tree base0, offset0, base1, offset1;
-
- if (extract_array_ref (arg0, &base0, &offset0)
- && extract_array_ref (arg1, &base1, &offset1)
- && operand_equal_p (base0, base1, 0))
- {
- tree signed_size_type_node;
- signed_size_type_node = signed_type_for (size_type_node);
-
- /* By converting to signed size type we cover middle-end pointer
- arithmetic which operates on unsigned pointer types of size
- type size and ARRAY_REF offsets which are properly sign or
- zero extended from their type in case it is narrower than
- size type. */
- if (offset0 == NULL_TREE)
- offset0 = build_int_cst (signed_size_type_node, 0);
- else
- offset0 = fold_convert (signed_size_type_node, offset0);
- if (offset1 == NULL_TREE)
- offset1 = build_int_cst (signed_size_type_node, 0);
- else
- offset1 = fold_convert (signed_size_type_node, offset1);
-
- return fold_build2 (code, type, offset0, offset1);
- }
- }
-
/* Transform comparisons of the form X +- C1 CMP Y +- C2 to
X CMP Y +- C2 +- C1 for signed X, Y. This is valid if
the resulting offset is smaller in absolute value than the
}
+/* Subroutine of fold_binary. If P is the value of EXPR, computes
+ power-of-two M and (arbitrary) N such that M divides (P-N). This condition
+ guarantees that P and N have the same least significant log2(M) bits.
+ N is not otherwise constrained. In particular, N is not normalized to
+ 0 <= N < M as is common. In general, the precise value of P is unknown.
+ M is chosen as large as possible such that constant N can be determined.
+
+ Returns M and sets *RESIDUE to N. */
+
+static unsigned HOST_WIDE_INT
+get_pointer_modulus_and_residue (tree expr, unsigned HOST_WIDE_INT *residue)
+{
+ enum tree_code code;
+
+ *residue = 0;
+
+ code = TREE_CODE (expr);
+ if (code == ADDR_EXPR)
+ {
+ expr = TREE_OPERAND (expr, 0);
+ if (handled_component_p (expr))
+ {
+ HOST_WIDE_INT bitsize, bitpos;
+ tree offset;
+ enum machine_mode mode;
+ int unsignedp, volatilep;
+
+ expr = get_inner_reference (expr, &bitsize, &bitpos, &offset,
+ &mode, &unsignedp, &volatilep, false);
+ *residue = bitpos / BITS_PER_UNIT;
+ if (offset)
+ {
+ if (TREE_CODE (offset) == INTEGER_CST)
+ *residue += TREE_INT_CST_LOW (offset);
+ else
+ /* We don't handle more complicated offset expressions. */
+ return 1;
+ }
+ }
+
+ if (DECL_P (expr))
+ return DECL_ALIGN_UNIT (expr);
+ }
+ else if (code == POINTER_PLUS_EXPR)
+ {
+ tree op0, op1;
+ unsigned HOST_WIDE_INT modulus;
+ enum tree_code inner_code;
+
+ op0 = TREE_OPERAND (expr, 0);
+ STRIP_NOPS (op0);
+ modulus = get_pointer_modulus_and_residue (op0, residue);
+
+ op1 = TREE_OPERAND (expr, 1);
+ STRIP_NOPS (op1);
+ inner_code = TREE_CODE (op1);
+ if (inner_code == INTEGER_CST)
+ {
+ *residue += TREE_INT_CST_LOW (op1);
+ return modulus;
+ }
+ else if (inner_code == MULT_EXPR)
+ {
+ op1 = TREE_OPERAND (op1, 1);
+ if (TREE_CODE (op1) == INTEGER_CST)
+ {
+ unsigned HOST_WIDE_INT align;
+
+ /* Compute the greatest power-of-2 divisor of op1. */
+ align = TREE_INT_CST_LOW (op1);
+ align &= -align;
+
+ /* If align is non-zero and less than *modulus, replace
+ *modulus with align., If align is 0, then either op1 is 0
+ or the greatest power-of-2 divisor of op1 doesn't fit in an
+ unsigned HOST_WIDE_INT. In either case, no additional
+ constraint is imposed. */
+ if (align)
+ modulus = MIN (modulus, align);
+
+ return modulus;
+ }
+ }
+ }
+
+ /* If we get here, we were unable to determine anything useful about the
+ expression. */
+ return 1;
+}
+
+
/* Fold a binary expression of code CODE and type TYPE with operands
OP0 and OP1. Return the folded expression if folding is
successful. Otherwise, return NULL_TREE. */
if (TREE_CODE (arg0) == COMPOUND_EXPR)
return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
fold_build2 (code, type,
- TREE_OPERAND (arg0, 1), op1));
+ fold_convert (TREE_TYPE (op0),
+ TREE_OPERAND (arg0, 1)),
+ op1));
if (TREE_CODE (arg1) == COMPOUND_EXPR
&& reorder_operands_p (arg0, TREE_OPERAND (arg1, 0)))
return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
- fold_build2 (code, type,
- op0, TREE_OPERAND (arg1, 1)));
+ fold_build2 (code, type, op0,
+ fold_convert (TREE_TYPE (op1),
+ TREE_OPERAND (arg1, 1))));
if (TREE_CODE (arg0) == COND_EXPR || COMPARISON_CLASS_P (arg0))
{
return omit_one_operand (type, t1, arg0);
}
}
+
+ /* X + (X / CST) * -CST is X % CST. */
+ if (TREE_CODE (arg1) == MULT_EXPR
+ && TREE_CODE (TREE_OPERAND (arg1, 0)) == TRUNC_DIV_EXPR
+ && operand_equal_p (arg0,
+ TREE_OPERAND (TREE_OPERAND (arg1, 0), 0), 0))
+ {
+ tree cst0 = TREE_OPERAND (TREE_OPERAND (arg1, 0), 1);
+ tree cst1 = TREE_OPERAND (arg1, 1);
+ tree sum = fold_binary (PLUS_EXPR, TREE_TYPE (cst1), cst1, cst0);
+ if (sum && integer_zerop (sum))
+ return fold_convert (type,
+ fold_build2 (TRUNC_MOD_EXPR,
+ TREE_TYPE (arg0), arg0, cst0));
+ }
}
/* Handle (A1 * C1) + (A2 * C2) with A1, A2 or C1, C2 being the
}
/* A - (-B) -> A + B */
if (TREE_CODE (arg1) == NEGATE_EXPR)
- return fold_build2 (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0));
+ return fold_build2 (PLUS_EXPR, type, op0,
+ fold_convert (type, TREE_OPERAND (arg1, 0)));
/* (-A) - B -> (-B) - A where B is easily negated and we can swap. */
if (TREE_CODE (arg0) == NEGATE_EXPR
&& (FLOAT_TYPE_P (type)
|| INTEGRAL_TYPE_P (type))
&& negate_expr_p (arg1)
&& reorder_operands_p (arg0, arg1))
- return fold_build2 (MINUS_EXPR, type, negate_expr (arg1),
- TREE_OPERAND (arg0, 0));
+ return fold_build2 (MINUS_EXPR, type,
+ fold_convert (type, negate_expr (arg1)),
+ fold_convert (type, TREE_OPERAND (arg0, 0)));
/* Convert -A - 1 to ~A. */
if (INTEGRAL_TYPE_P (type)
&& TREE_CODE (arg0) == NEGATE_EXPR
&& integer_all_onesp (arg0))
return fold_build1 (BIT_NOT_EXPR, type, op1);
+
+ /* X - (X / CST) * CST is X % CST. */
+ if (INTEGRAL_TYPE_P (type)
+ && TREE_CODE (arg1) == MULT_EXPR
+ && TREE_CODE (TREE_OPERAND (arg1, 0)) == TRUNC_DIV_EXPR
+ && operand_equal_p (arg0,
+ TREE_OPERAND (TREE_OPERAND (arg1, 0), 0), 0)
+ && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg1, 0), 1),
+ TREE_OPERAND (arg1, 1), 0))
+ return fold_convert (type,
+ fold_build2 (TRUNC_MOD_EXPR, TREE_TYPE (arg0),
+ arg0, TREE_OPERAND (arg1, 1)));
+
if (! FLOAT_TYPE_P (type))
{
if (integer_zerop (arg0))
&& (tem = negate_expr (arg1)) != arg1
&& !TREE_OVERFLOW (tem))
return fold_build2 (MULT_EXPR, type,
- negate_expr (arg0), tem);
+ fold_convert (type, negate_expr (arg0)), tem);
/* (a * (1 << b)) is (a << b) */
if (TREE_CODE (arg1) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (arg1, 0)))
- return fold_build2 (LSHIFT_EXPR, type, arg0,
+ return fold_build2 (LSHIFT_EXPR, type, op0,
TREE_OPERAND (arg1, 1));
if (TREE_CODE (arg0) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (arg0, 0)))
- return fold_build2 (LSHIFT_EXPR, type, arg1,
+ return fold_build2 (LSHIFT_EXPR, type, op1,
TREE_OPERAND (arg0, 1));
strict_overflow_p = false;
if (TREE_CODE (arg1) == INTEGER_CST
- && 0 != (tem = extract_muldiv (op0,
- fold_convert (type, arg1),
- code, NULL_TREE,
+ && 0 != (tem = extract_muldiv (op0, arg1, code, NULL_TREE,
&strict_overflow_p)))
{
if (strict_overflow_p)
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT hi1, lo1, hi2, lo2, mlo, mhi;
- int width = TYPE_PRECISION (type);
+ unsigned HOST_WIDE_INT hi1, lo1, hi2, lo2, hi3, lo3, mlo, mhi;
+ int width = TYPE_PRECISION (type), w;
hi1 = TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1));
lo1 = TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1));
hi2 = TREE_INT_CST_HIGH (arg1);
return fold_build2 (BIT_IOR_EXPR, type,
TREE_OPERAND (arg0, 0), arg1);
- /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
+ /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2,
+ unless (C1 & ~C2) | (C2 & C3) for some C3 is a mask of some
+ mode which allows further optimizations. */
hi1 &= mhi;
lo1 &= mlo;
- if ((hi1 & ~hi2) != hi1 || (lo1 & ~lo2) != lo1)
+ hi2 &= mhi;
+ lo2 &= mlo;
+ hi3 = hi1 & ~hi2;
+ lo3 = lo1 & ~lo2;
+ for (w = BITS_PER_UNIT;
+ w <= width && w <= HOST_BITS_PER_WIDE_INT;
+ w <<= 1)
+ {
+ unsigned HOST_WIDE_INT mask
+ = (unsigned HOST_WIDE_INT) -1 >> (HOST_BITS_PER_WIDE_INT - w);
+ if (((lo1 | lo2) & mask) == mask
+ && (lo1 & ~mask) == 0 && hi1 == 0)
+ {
+ hi3 = 0;
+ lo3 = mask;
+ break;
+ }
+ }
+ if (hi3 != hi1 || lo3 != lo1)
return fold_build2 (BIT_IOR_EXPR, type,
fold_build2 (BIT_AND_EXPR, type,
TREE_OPERAND (arg0, 0),
build_int_cst_wide (type,
- lo1 & ~lo2,
- hi1 & ~hi2)),
+ lo3, hi3)),
arg1);
}
if (TREE_CODE (arg0) == BIT_IOR_EXPR
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
- return fold_build2 (BIT_IOR_EXPR, type,
- fold_build2 (BIT_AND_EXPR, type,
- TREE_OPERAND (arg0, 0), arg1),
- fold_build2 (BIT_AND_EXPR, type,
- TREE_OPERAND (arg0, 1), arg1));
+ {
+ tree tmp1 = fold_convert (TREE_TYPE (arg0), arg1);
+ tree tmp2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
+ TREE_OPERAND (arg0, 0), tmp1);
+ tree tmp3 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
+ TREE_OPERAND (arg0, 1), tmp1);
+ return fold_convert (type,
+ fold_build2 (BIT_IOR_EXPR, TREE_TYPE (arg0),
+ tmp2, tmp3));
+ }
/* (X | Y) & Y is (X, Y). */
if (TREE_CODE (arg0) == BIT_IOR_EXPR
{
return fold_build1 (BIT_NOT_EXPR, type,
build2 (BIT_IOR_EXPR, type,
- TREE_OPERAND (arg0, 0),
- TREE_OPERAND (arg1, 0)));
+ fold_convert (type,
+ TREE_OPERAND (arg0, 0)),
+ fold_convert (type,
+ TREE_OPERAND (arg1, 0))));
+ }
+
+ /* If arg0 is derived from the address of an object or function, we may
+ be able to fold this expression using the object or function's
+ alignment. */
+ if (POINTER_TYPE_P (TREE_TYPE (arg0)) && host_integerp (arg1, 1))
+ {
+ unsigned HOST_WIDE_INT modulus, residue;
+ unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg1);
+
+ modulus = get_pointer_modulus_and_residue (arg0, &residue);
+
+ /* This works because modulus is a power of 2. If this weren't the
+ case, we'd have to replace it by its greatest power-of-2
+ divisor: modulus & -modulus. */
+ if (low < modulus)
+ return build_int_cst (type, residue & low);
+ }
+
+ /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
+ (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
+ if the new mask might be further optimized. */
+ if ((TREE_CODE (arg0) == LSHIFT_EXPR
+ || TREE_CODE (arg0) == RSHIFT_EXPR)
+ && host_integerp (TREE_OPERAND (arg0, 1), 1)
+ && host_integerp (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1)))
+ && tree_low_cst (TREE_OPERAND (arg0, 1), 1)
+ < TYPE_PRECISION (TREE_TYPE (arg0))
+ && TYPE_PRECISION (TREE_TYPE (arg0)) <= HOST_BITS_PER_WIDE_INT
+ && tree_low_cst (TREE_OPERAND (arg0, 1), 1) > 0)
+ {
+ unsigned int shiftc = tree_low_cst (TREE_OPERAND (arg0, 1), 1);
+ unsigned HOST_WIDE_INT mask
+ = tree_low_cst (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1)));
+ unsigned HOST_WIDE_INT newmask, zerobits = 0;
+ tree shift_type = TREE_TYPE (arg0);
+
+ if (TREE_CODE (arg0) == LSHIFT_EXPR)
+ zerobits = ((((unsigned HOST_WIDE_INT) 1) << shiftc) - 1);
+ else if (TREE_CODE (arg0) == RSHIFT_EXPR
+ && TYPE_PRECISION (TREE_TYPE (arg0))
+ == GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (arg0))))
+ {
+ unsigned int prec = TYPE_PRECISION (TREE_TYPE (arg0));
+ tree arg00 = TREE_OPERAND (arg0, 0);
+ /* See if more bits can be proven as zero because of
+ zero extension. */
+ if (TREE_CODE (arg00) == NOP_EXPR
+ && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg00, 0))))
+ {
+ tree inner_type = TREE_TYPE (TREE_OPERAND (arg00, 0));
+ if (TYPE_PRECISION (inner_type)
+ == GET_MODE_BITSIZE (TYPE_MODE (inner_type))
+ && TYPE_PRECISION (inner_type) < prec)
+ {
+ prec = TYPE_PRECISION (inner_type);
+ /* See if we can shorten the right shift. */
+ if (shiftc < prec)
+ shift_type = inner_type;
+ }
+ }
+ zerobits = ~(unsigned HOST_WIDE_INT) 0;
+ zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
+ zerobits <<= prec - shiftc;
+ /* For arithmetic shift if sign bit could be set, zerobits
+ can contain actually sign bits, so no transformation is
+ possible, unless MASK masks them all away. In that
+ case the shift needs to be converted into logical shift. */
+ if (!TYPE_UNSIGNED (TREE_TYPE (arg0))
+ && prec == TYPE_PRECISION (TREE_TYPE (arg0)))
+ {
+ if ((mask & zerobits) == 0)
+ shift_type = unsigned_type_for (TREE_TYPE (arg0));
+ else
+ zerobits = 0;
+ }
+ }
+
+ /* ((X << 16) & 0xff00) is (X, 0). */
+ if ((mask & zerobits) == mask)
+ return omit_one_operand (type, build_int_cst (type, 0), arg0);
+
+ newmask = mask | zerobits;
+ if (newmask != mask && (newmask & (newmask + 1)) == 0)
+ {
+ unsigned int prec;
+
+ /* Only do the transformation if NEWMASK is some integer
+ mode's mask. */
+ for (prec = BITS_PER_UNIT;
+ prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
+ if (newmask == (((unsigned HOST_WIDE_INT) 1) << prec) - 1)
+ break;
+ if (prec < HOST_BITS_PER_WIDE_INT
+ || newmask == ~(unsigned HOST_WIDE_INT) 0)
+ {
+ if (shift_type != TREE_TYPE (arg0))
+ {
+ tem = fold_build2 (TREE_CODE (arg0), shift_type,
+ fold_convert (shift_type,
+ TREE_OPERAND (arg0, 0)),
+ TREE_OPERAND (arg0, 1));
+ tem = fold_convert (type, tem);
+ }
+ else
+ tem = op0;
+ return fold_build2 (BIT_AND_EXPR, type, tem,
+ build_int_cst_type (TREE_TYPE (op1),
+ newmask));
+ }
+ }
}
goto associate;
strict_overflow_p = false;
if (TREE_CODE (arg1) == LSHIFT_EXPR
&& (TYPE_UNSIGNED (type)
- || tree_expr_nonnegative_warnv_p (arg0, &strict_overflow_p)))
+ || tree_expr_nonnegative_warnv_p (op0, &strict_overflow_p)))
{
tree sval = TREE_OPERAND (arg1, 0);
if (integer_pow2p (sval) && tree_int_cst_sgn (sval) > 0)
fold_convert (type, arg0), sh_cnt);
}
}
+
+ /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
+ TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
+ if (INTEGRAL_TYPE_P (type)
+ && TYPE_UNSIGNED (type)
+ && code == FLOOR_DIV_EXPR)
+ return fold_build2 (TRUNC_DIV_EXPR, type, op0, op1);
+
/* Fall thru */
case ROUND_DIV_EXPR:
strict_overflow_p = false;
if ((code == TRUNC_MOD_EXPR || code == FLOOR_MOD_EXPR)
&& (TYPE_UNSIGNED (type)
- || tree_expr_nonnegative_warnv_p (arg0, &strict_overflow_p)))
+ || tree_expr_nonnegative_warnv_p (op0, &strict_overflow_p)))
{
tree c = arg1;
/* Also optimize A % (C << N) where C is a power of 2,
tree tem = build_int_cst (TREE_TYPE (arg1),
GET_MODE_BITSIZE (TYPE_MODE (type)));
tem = const_binop (MINUS_EXPR, tem, arg1, 0);
- return fold_build2 (RROTATE_EXPR, type, arg0, tem);
+ return fold_build2 (RROTATE_EXPR, type, op0, tem);
}
/* If we have a rotate of a bit operation with the rotate count and
== (unsigned int) GET_MODE_BITSIZE (TYPE_MODE (type))))
return TREE_OPERAND (arg0, 0);
+ /* Fold (X & C2) << C1 into (X << C1) & (C2 << C1)
+ (X & C2) >> C1 into (X >> C1) & (C2 >> C1)
+ if the latter can be further optimized. */
+ if ((code == LSHIFT_EXPR || code == RSHIFT_EXPR)
+ && TREE_CODE (arg0) == BIT_AND_EXPR
+ && TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
+ {
+ tree mask = fold_build2 (code, type,
+ fold_convert (type, TREE_OPERAND (arg0, 1)),
+ arg1);
+ tree shift = fold_build2 (code, type,
+ fold_convert (type, TREE_OPERAND (arg0, 0)),
+ arg1);
+ tem = fold_binary (BIT_AND_EXPR, type, shift, mask);
+ if (tem)
+ return tem;
+ }
+
return NULL_TREE;
case MIN_EXPR:
/* bool_var != 1 becomes !bool_var. */
if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE && integer_onep (arg1)
&& code == NE_EXPR)
- return fold_build1 (TRUTH_NOT_EXPR, type, arg0);
+ return fold_build1 (TRUTH_NOT_EXPR, type, fold_convert (type, arg0));
/* bool_var == 0 becomes !bool_var. */
if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE && integer_zerop (arg1)
&& code == EQ_EXPR)
- return fold_build1 (TRUTH_NOT_EXPR, type, arg0);
+ return fold_build1 (TRUTH_NOT_EXPR, type, fold_convert (type, arg0));
/* If this is an equality comparison of the address of two non-weak,
unaliased symbols neither of which are extern (since we do not
tree arg01 = TREE_OPERAND (arg0, 1);
if (TREE_CODE (arg00) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (arg00, 0)))
- return
- fold_build2 (code, type,
- build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
- build2 (RSHIFT_EXPR, TREE_TYPE (arg00),
- arg01, TREE_OPERAND (arg00, 1)),
- fold_convert (TREE_TYPE (arg0),
- integer_one_node)),
- arg1);
- else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
- && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
- return
- fold_build2 (code, type,
- build2 (BIT_AND_EXPR, TREE_TYPE (arg0),
- build2 (RSHIFT_EXPR, TREE_TYPE (arg01),
- arg00, TREE_OPERAND (arg01, 1)),
- fold_convert (TREE_TYPE (arg0),
- integer_one_node)),
- arg1);
+ {
+ tree tem = fold_build2 (RSHIFT_EXPR, TREE_TYPE (arg00),
+ arg01, TREE_OPERAND (arg00, 1));
+ tem = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0), tem,
+ build_int_cst (TREE_TYPE (arg0), 1));
+ return fold_build2 (code, type,
+ fold_convert (TREE_TYPE (arg1), tem), arg1);
+ }
+ else if (TREE_CODE (arg01) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (arg01, 0)))
+ {
+ tree tem = fold_build2 (RSHIFT_EXPR, TREE_TYPE (arg01),
+ arg00, TREE_OPERAND (arg01, 1));
+ tem = fold_build2 (BIT_AND_EXPR, TREE_TYPE (arg0), tem,
+ build_int_cst (TREE_TYPE (arg0), 1));
+ return fold_build2 (code, type,
+ fold_convert (TREE_TYPE (arg1), tem), arg1);
+ }
}
/* If this is an NE or EQ comparison of zero against the result of a