/* Fold a constant sub-tree into a single node for C-compiler
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
- Free Software Foundation, Inc.
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
+ 2012 Free Software Foundation, Inc.
This file is part of GCC.
and actually traps on some architectures. But if overflow is
undefined, we can negate, because - (INT_MIN / 1) is an
overflow. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (t))
- && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
- break;
- return negate_expr_p (TREE_OPERAND (t, 1))
- || negate_expr_p (TREE_OPERAND (t, 0));
+ if (INTEGRAL_TYPE_P (TREE_TYPE (t)))
+ {
+ if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
+ break;
+ /* If overflow is undefined then we have to be careful because
+ we ask whether it's ok to associate the negate with the
+ division which is not ok for example for
+ -((a - b) / c) where (-(a - b)) / c may invoke undefined
+ overflow because of negating INT_MIN. So do not use
+ negate_expr_p here but open-code the two important cases. */
+ if (TREE_CODE (TREE_OPERAND (t, 0)) == NEGATE_EXPR
+ || (TREE_CODE (TREE_OPERAND (t, 0)) == INTEGER_CST
+ && may_negate_without_overflow_p (TREE_OPERAND (t, 0))))
+ return true;
+ }
+ else if (negate_expr_p (TREE_OPERAND (t, 0)))
+ return true;
+ return negate_expr_p (TREE_OPERAND (t, 1));
case NOP_EXPR:
/* Negate -((double)float) as (double)(-float). */
return fold_build2_loc (loc, TREE_CODE (t), type,
TREE_OPERAND (t, 0), negate_expr (tem));
}
+ /* If overflow is undefined then we have to be careful because
+ we ask whether it's ok to associate the negate with the
+ division which is not ok for example for
+ -((a - b) / c) where (-(a - b)) / c may invoke undefined
+ overflow because of negating INT_MIN. So do not use
+ negate_expr_p here but open-code the two important cases. */
tem = TREE_OPERAND (t, 0);
- if (negate_expr_p (tem))
- {
- if (INTEGRAL_TYPE_P (type)
- && (TREE_CODE (tem) != INTEGER_CST
- || tree_int_cst_equal (tem, TYPE_MIN_VALUE (type))))
- fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
- return fold_build2_loc (loc, TREE_CODE (t), type,
- negate_expr (tem), TREE_OPERAND (t, 1));
- }
+ if ((INTEGRAL_TYPE_P (type)
+ && (TREE_CODE (tem) == NEGATE_EXPR
+ || (TREE_CODE (tem) == INTEGER_CST
+ && may_negate_without_overflow_p (tem))))
+ || !INTEGRAL_TYPE_P (type))
+ return fold_build2_loc (loc, TREE_CODE (t), type,
+ negate_expr (tem), TREE_OPERAND (t, 1));
}
break;
switch (code)
{
case TRUTH_NOT_EXPR:
+ /* We can only do something if the range is testing for zero. */
+ if (low == NULL_TREE || high == NULL_TREE
+ || ! integer_zerop (low) || ! integer_zerop (high))
+ return NULL_TREE;
*p_in_p = ! in_p;
return arg0;
return arg0;
case NEGATE_EXPR:
+ /* If flag_wrapv and ARG0_TYPE is signed, make sure
+ low and high are non-NULL, then normalize will DTRT. */
+ if (!TYPE_UNSIGNED (arg0_type)
+ && !TYPE_OVERFLOW_UNDEFINED (arg0_type))
+ {
+ if (low == NULL_TREE)
+ low = TYPE_MIN_VALUE (arg0_type);
+ if (high == NULL_TREE)
+ high = TYPE_MAX_VALUE (arg0_type);
+ }
+
/* (-x) IN [a,b] -> x in [-b, -a] */
n_low = range_binop (MINUS_EXPR, exp_type,
build_int_cst (exp_type, 0),
int in0_p, in1_p, in_p;
tree low0, low1, low, high0, high1, high;
bool strict_overflow_p = false;
- tree lhs = make_range (op0, &in0_p, &low0, &high0, &strict_overflow_p);
- tree rhs = make_range (op1, &in1_p, &low1, &high1, &strict_overflow_p);
- tree tem;
+ tree tem, lhs, rhs;
const char * const warnmsg = G_("assuming signed overflow does not occur "
"when simplifying range test");
+ if (!INTEGRAL_TYPE_P (type))
+ return 0;
+
+ lhs = make_range (op0, &in0_p, &low0, &high0, &strict_overflow_p);
+ rhs = make_range (op1, &in1_p, &low1, &high1, &strict_overflow_p);
+
/* If this is an OR operation, invert both sides; we will invert
again at the end. */
if (or_op)
break;
/* FALLTHROUGH */
case NEGATE_EXPR:
+ /* For division and modulus, type can't be unsigned, as e.g.
+ (-(x / 2U)) / 2U isn't equal to -((x / 2U) / 2U) for x >= 2.
+ For signed types, even with wrapping overflow, this is fine. */
+ if (code != MULT_EXPR && TYPE_UNSIGNED (type))
+ break;
if ((t1 = extract_muldiv (op0, c, code, wide_type, strict_overflow_p))
!= 0)
return fold_build1 (tcode, ctype, fold_convert (ctype, t1));
}
/* This transformation is only worthwhile if we don't have to wrap ARG
- in a SAVE_EXPR and the operation can be simplified on at least one
- of the branches once its pushed inside the COND_EXPR. */
+ in a SAVE_EXPR and the operation can be simplified without recursing
+ on at least one of the branches once its pushed inside the COND_EXPR. */
if (!TREE_CONSTANT (arg)
&& (TREE_SIDE_EFFECTS (arg)
+ || TREE_CODE (arg) == COND_EXPR || TREE_CODE (arg) == VEC_COND_EXPR
|| TREE_CONSTANT (true_value) || TREE_CONSTANT (false_value)))
return NULL_TREE;
&& TREE_TYPE (TREE_OPERAND (arg1, 0)) == inner_type))
return NULL_TREE;
- if ((TYPE_UNSIGNED (inner_type) != TYPE_UNSIGNED (outer_type)
- || POINTER_TYPE_P (inner_type) != POINTER_TYPE_P (outer_type))
+ if (TYPE_UNSIGNED (inner_type) != TYPE_UNSIGNED (outer_type)
&& code != NE_EXPR
&& code != EQ_EXPR)
return NULL_TREE;
+ if (POINTER_TYPE_P (inner_type) != POINTER_TYPE_P (outer_type))
+ return NULL_TREE;
+
if (TREE_CODE (arg1) == INTEGER_CST)
arg1 = force_fit_type_double (inner_type, tree_to_double_int (arg1),
0, TREE_OVERFLOW (arg1));
s = integer_one_node;
}
- for (;; ref = TREE_OPERAND (ref, 0))
+ /* Handle &x.array the same as we would handle &x.array[0]. */
+ if (TREE_CODE (ref) == COMPONENT_REF
+ && TREE_CODE (TREE_TYPE (ref)) == ARRAY_TYPE)
{
- if (TREE_CODE (ref) == ARRAY_REF)
+ tree domain;
+
+ /* Remember if this was a multi-dimensional array. */
+ if (TREE_CODE (TREE_OPERAND (ref, 0)) == ARRAY_REF)
+ mdim = true;
+
+ domain = TYPE_DOMAIN (TREE_TYPE (ref));
+ if (! domain)
+ goto cont;
+ itype = TREE_TYPE (domain);
+
+ step = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ref)));
+ if (TREE_CODE (step) != INTEGER_CST)
+ goto cont;
+
+ if (s)
{
- tree domain;
+ if (! tree_int_cst_equal (step, s))
+ goto cont;
+ }
+ else
+ {
+ /* Try if delta is a multiple of step. */
+ tree tmp = div_if_zero_remainder (EXACT_DIV_EXPR, op1, step);
+ if (! tmp)
+ goto cont;
+ delta = tmp;
+ }
- /* Remember if this was a multi-dimensional array. */
- if (TREE_CODE (TREE_OPERAND (ref, 0)) == ARRAY_REF)
- mdim = true;
+ /* Only fold here if we can verify we do not overflow one
+ dimension of a multi-dimensional array. */
+ if (mdim)
+ {
+ tree tmp;
- domain = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (ref, 0)));
- if (! domain)
- continue;
- itype = TREE_TYPE (domain);
+ if (!TYPE_MIN_VALUE (domain)
+ || !TYPE_MAX_VALUE (domain)
+ || TREE_CODE (TYPE_MAX_VALUE (domain)) != INTEGER_CST)
+ goto cont;
- step = array_ref_element_size (ref);
- if (TREE_CODE (step) != INTEGER_CST)
- continue;
+ tmp = fold_binary_loc (loc, PLUS_EXPR, itype,
+ fold_convert_loc (loc, itype,
+ TYPE_MIN_VALUE (domain)),
+ fold_convert_loc (loc, itype, delta));
+ if (TREE_CODE (tmp) != INTEGER_CST
+ || tree_int_cst_lt (TYPE_MAX_VALUE (domain), tmp))
+ goto cont;
+ }
- if (s)
- {
- if (! tree_int_cst_equal (step, s))
- continue;
- }
- else
- {
- /* Try if delta is a multiple of step. */
- tree tmp = div_if_zero_remainder (EXACT_DIV_EXPR, op1, step);
- if (! tmp)
- continue;
- delta = tmp;
- }
+ /* We found a suitable component reference. */
- /* Only fold here if we can verify we do not overflow one
- dimension of a multi-dimensional array. */
- if (mdim)
- {
- tree tmp;
+ pref = TREE_OPERAND (addr, 0);
+ ret = copy_node (pref);
+ SET_EXPR_LOCATION (ret, loc);
- if (TREE_CODE (TREE_OPERAND (ref, 1)) != INTEGER_CST
- || !TYPE_MAX_VALUE (domain)
- || TREE_CODE (TYPE_MAX_VALUE (domain)) != INTEGER_CST)
- continue;
+ ret = build4_loc (loc, ARRAY_REF, TREE_TYPE (TREE_TYPE (ref)), ret,
+ fold_build2_loc
+ (loc, PLUS_EXPR, itype,
+ fold_convert_loc (loc, itype,
+ TYPE_MIN_VALUE
+ (TYPE_DOMAIN (TREE_TYPE (ref)))),
+ fold_convert_loc (loc, itype, delta)),
+ NULL_TREE, NULL_TREE);
+ return build_fold_addr_expr_loc (loc, ret);
+ }
- tmp = fold_binary_loc (loc, PLUS_EXPR, itype,
- fold_convert_loc (loc, itype,
- TREE_OPERAND (ref, 1)),
- fold_convert_loc (loc, itype, delta));
- if (!tmp
- || TREE_CODE (tmp) != INTEGER_CST
- || tree_int_cst_lt (TYPE_MAX_VALUE (domain), tmp))
- continue;
- }
+cont:
- break;
- }
- else if (TREE_CODE (ref) == COMPONENT_REF
- && TREE_CODE (TREE_TYPE (ref)) == ARRAY_TYPE)
+ for (;; ref = TREE_OPERAND (ref, 0))
+ {
+ if (TREE_CODE (ref) == ARRAY_REF)
{
tree domain;
if (TREE_CODE (TREE_OPERAND (ref, 0)) == ARRAY_REF)
mdim = true;
- domain = TYPE_DOMAIN (TREE_TYPE (ref));
+ domain = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (ref, 0)));
if (! domain)
continue;
itype = TREE_TYPE (domain);
- step = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ref)));
+ step = array_ref_element_size (ref);
if (TREE_CODE (step) != INTEGER_CST)
continue;
{
tree tmp;
- if (!TYPE_MIN_VALUE (domain)
+ if (TREE_CODE (TREE_OPERAND (ref, 1)) != INTEGER_CST
|| !TYPE_MAX_VALUE (domain)
|| TREE_CODE (TYPE_MAX_VALUE (domain)) != INTEGER_CST)
continue;
tmp = fold_binary_loc (loc, PLUS_EXPR, itype,
fold_convert_loc (loc, itype,
- TYPE_MIN_VALUE (domain)),
+ TREE_OPERAND (ref, 1)),
fold_convert_loc (loc, itype, delta));
- if (TREE_CODE (tmp) != INTEGER_CST
+ if (!tmp
+ || TREE_CODE (tmp) != INTEGER_CST
|| tree_int_cst_lt (TYPE_MAX_VALUE (domain), tmp))
continue;
}
pos = TREE_OPERAND (pos, 0);
}
- if (TREE_CODE (ref) == ARRAY_REF)
- {
- TREE_OPERAND (pos, 1)
- = fold_build2_loc (loc, PLUS_EXPR, itype,
- fold_convert_loc (loc, itype, TREE_OPERAND (pos, 1)),
- fold_convert_loc (loc, itype, delta));
- return fold_build1_loc (loc, ADDR_EXPR, TREE_TYPE (addr), ret);
- }
- else if (TREE_CODE (ref) == COMPONENT_REF)
- {
- gcc_assert (ret == pos);
- ret = build4_loc (loc, ARRAY_REF, TREE_TYPE (TREE_TYPE (ref)), ret,
- fold_build2_loc
- (loc, PLUS_EXPR, itype,
- fold_convert_loc (loc, itype,
- TYPE_MIN_VALUE
- (TYPE_DOMAIN (TREE_TYPE (ref)))),
- fold_convert_loc (loc, itype, delta)),
- NULL_TREE, NULL_TREE);
- return build_fold_addr_expr_loc (loc, ret);
- }
- else
- gcc_unreachable ();
+ TREE_OPERAND (pos, 1)
+ = fold_build2_loc (loc, PLUS_EXPR, itype,
+ fold_convert_loc (loc, itype, TREE_OPERAND (pos, 1)),
+ fold_convert_loc (loc, itype, delta));
+ return fold_build1_loc (loc, ADDR_EXPR, TREE_TYPE (addr), ret);
}
indirect_base0 = true;
}
offset0 = TREE_OPERAND (arg0, 1);
+ if (host_integerp (offset0, 0))
+ {
+ HOST_WIDE_INT off = size_low_cst (offset0);
+ if ((HOST_WIDE_INT) (((unsigned HOST_WIDE_INT) off)
+ * BITS_PER_UNIT)
+ / BITS_PER_UNIT == (HOST_WIDE_INT) off)
+ {
+ bitpos0 = off * BITS_PER_UNIT;
+ offset0 = NULL_TREE;
+ }
+ }
}
base1 = arg1;
indirect_base1 = true;
}
offset1 = TREE_OPERAND (arg1, 1);
+ if (host_integerp (offset1, 0))
+ {
+ HOST_WIDE_INT off = size_low_cst (offset1);
+ if ((HOST_WIDE_INT) (((unsigned HOST_WIDE_INT) off)
+ * BITS_PER_UNIT)
+ / BITS_PER_UNIT == (HOST_WIDE_INT) off)
+ {
+ bitpos1 = off * BITS_PER_UNIT;
+ offset1 = NULL_TREE;
+ }
+ }
}
/* A local variable can never be pointed to by
}
}
+/* Try to fold a pointer difference of type TYPE two address expressions of
+ array references AREF0 and AREF1 using location LOC. Return a
+ simplified expression for the difference or NULL_TREE. */
+
+static tree
+fold_addr_of_array_ref_difference (location_t loc, tree type,
+ tree aref0, tree aref1)
+{
+ tree base0 = TREE_OPERAND (aref0, 0);
+ tree base1 = TREE_OPERAND (aref1, 0);
+ tree base_offset = build_int_cst (type, 0);
+
+ /* If the bases are array references as well, recurse. If the bases
+ are pointer indirections compute the difference of the pointers.
+ If the bases are equal, we are set. */
+ if ((TREE_CODE (base0) == ARRAY_REF
+ && TREE_CODE (base1) == ARRAY_REF
+ && (base_offset
+ = fold_addr_of_array_ref_difference (loc, type, base0, base1)))
+ || (INDIRECT_REF_P (base0)
+ && INDIRECT_REF_P (base1)
+ && (base_offset = fold_binary_loc (loc, MINUS_EXPR, type,
+ TREE_OPERAND (base0, 0),
+ TREE_OPERAND (base1, 0))))
+ || operand_equal_p (base0, base1, 0))
+ {
+ tree op0 = fold_convert_loc (loc, type, TREE_OPERAND (aref0, 1));
+ tree op1 = fold_convert_loc (loc, type, TREE_OPERAND (aref1, 1));
+ tree esz = fold_convert_loc (loc, type, array_ref_element_size (aref0));
+ tree diff = build2 (MINUS_EXPR, type, op0, op1);
+ return fold_build2_loc (loc, PLUS_EXPR, type,
+ base_offset,
+ fold_build2_loc (loc, MULT_EXPR, type,
+ diff, esz));
+ }
+ return NULL_TREE;
+}
+
/* Fold a binary expression of code CODE and type TYPE with operands
OP0 and OP1. LOC is the location of the resulting expression.
Return the folded expression if folding is successful. Otherwise,
}
}
- /* Handle (A1 * C1) + (A2 * C2) with A1, A2 or C1, C2 being the
- same or one. Make sure type is not saturating.
- fold_plusminus_mult_expr will re-associate. */
+ /* Handle (A1 * C1) + (A2 * C2) with A1, A2 or C1, C2 being the same or
+ one. Make sure the type is not saturating and has the signedness of
+ the stripped operands, as fold_plusminus_mult_expr will re-associate.
+ ??? The latter condition should use TYPE_OVERFLOW_* flags instead. */
if ((TREE_CODE (arg0) == MULT_EXPR
|| TREE_CODE (arg1) == MULT_EXPR)
&& !TYPE_SATURATING (type)
+ && TYPE_UNSIGNED (type) == TYPE_UNSIGNED (TREE_TYPE (arg0))
+ && TYPE_UNSIGNED (type) == TYPE_UNSIGNED (TREE_TYPE (arg1))
&& (!FLOAT_TYPE_P (type) || flag_associative_math))
{
tree tem = fold_plusminus_mult_expr (loc, code, type, arg0, arg1);
&& TREE_CODE (arg1) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (arg1, 0)) == ARRAY_REF)
{
- tree aref0 = TREE_OPERAND (arg0, 0);
- tree aref1 = TREE_OPERAND (arg1, 0);
- if (operand_equal_p (TREE_OPERAND (aref0, 0),
- TREE_OPERAND (aref1, 0), 0))
- {
- tree op0 = fold_convert_loc (loc, type, TREE_OPERAND (aref0, 1));
- tree op1 = fold_convert_loc (loc, type, TREE_OPERAND (aref1, 1));
- tree esz = array_ref_element_size (aref0);
- tree diff = build2 (MINUS_EXPR, type, op0, op1);
- return fold_build2_loc (loc, MULT_EXPR, type, diff,
- fold_convert_loc (loc, type, esz));
-
- }
+ tree tem = fold_addr_of_array_ref_difference (loc, type,
+ TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0));
+ if (tem)
+ return tem;
}
if (FLOAT_TYPE_P (type)
&& (tem = distribute_real_division (loc, code, type, arg0, arg1)))
return tem;
- /* Handle (A1 * C1) - (A2 * C2) with A1, A2 or C1, C2 being the
- same or one. Make sure type is not saturating.
- fold_plusminus_mult_expr will re-associate. */
+ /* Handle (A1 * C1) - (A2 * C2) with A1, A2 or C1, C2 being the same or
+ one. Make sure the type is not saturating and has the signedness of
+ the stripped operands, as fold_plusminus_mult_expr will re-associate.
+ ??? The latter condition should use TYPE_OVERFLOW_* flags instead. */
if ((TREE_CODE (arg0) == MULT_EXPR
|| TREE_CODE (arg1) == MULT_EXPR)
&& !TYPE_SATURATING (type)
+ && TYPE_UNSIGNED (type) == TYPE_UNSIGNED (TREE_TYPE (arg0))
+ && TYPE_UNSIGNED (type) == TYPE_UNSIGNED (TREE_TYPE (arg1))
&& (!FLOAT_TYPE_P (type) || flag_associative_math))
{
tree tem = fold_plusminus_mult_expr (loc, code, type, arg0, arg1);
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT hi1, lo1, hi2, lo2, hi3, lo3, mlo, mhi;
+ double_int c1, c2, c3, msk;
int width = TYPE_PRECISION (type), w;
- hi1 = TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1));
- lo1 = TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1));
- hi2 = TREE_INT_CST_HIGH (arg1);
- lo2 = TREE_INT_CST_LOW (arg1);
+ c1 = tree_to_double_int (TREE_OPERAND (arg0, 1));
+ c2 = tree_to_double_int (arg1);
/* If (C1&C2) == C1, then (X&C1)|C2 becomes (X,C2). */
- if ((hi1 & hi2) == hi1 && (lo1 & lo2) == lo1)
+ if (double_int_equal_p (double_int_and (c1, c2), c1))
return omit_one_operand_loc (loc, type, arg1,
- TREE_OPERAND (arg0, 0));
+ TREE_OPERAND (arg0, 0));
- if (width > HOST_BITS_PER_WIDE_INT)
- {
- mhi = (unsigned HOST_WIDE_INT) -1
- >> (2 * HOST_BITS_PER_WIDE_INT - width);
- mlo = -1;
- }
- else
- {
- mhi = 0;
- mlo = (unsigned HOST_WIDE_INT) -1
- >> (HOST_BITS_PER_WIDE_INT - width);
- }
+ msk = double_int_mask (width);
/* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
- if ((~(hi1 | hi2) & mhi) == 0 && (~(lo1 | lo2) & mlo) == 0)
+ if (double_int_zero_p (double_int_and_not (msk,
+ double_int_ior (c1, c2))))
return fold_build2_loc (loc, BIT_IOR_EXPR, type,
- TREE_OPERAND (arg0, 0), arg1);
+ TREE_OPERAND (arg0, 0), arg1);
/* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2,
unless (C1 & ~C2) | (C2 & C3) for some C3 is a mask of some
mode which allows further optimizations. */
- hi1 &= mhi;
- lo1 &= mlo;
- hi2 &= mhi;
- lo2 &= mlo;
- hi3 = hi1 & ~hi2;
- lo3 = lo1 & ~lo2;
+ c1 = double_int_and (c1, msk);
+ c2 = double_int_and (c2, msk);
+ c3 = double_int_and_not (c1, c2);
for (w = BITS_PER_UNIT;
w <= width && w <= HOST_BITS_PER_WIDE_INT;
w <<= 1)
{
unsigned HOST_WIDE_INT mask
= (unsigned HOST_WIDE_INT) -1 >> (HOST_BITS_PER_WIDE_INT - w);
- if (((lo1 | lo2) & mask) == mask
- && (lo1 & ~mask) == 0 && hi1 == 0)
+ if (((c1.low | c2.low) & mask) == mask
+ && (c1.low & ~mask) == 0 && c1.high == 0)
{
- hi3 = 0;
- lo3 = mask;
+ c3 = uhwi_to_double_int (mask);
break;
}
}
- if (hi3 != hi1 || lo3 != lo1)
+ if (!double_int_equal_p (c3, c1))
return fold_build2_loc (loc, BIT_IOR_EXPR, type,
- fold_build2_loc (loc, BIT_AND_EXPR, type,
- TREE_OPERAND (arg0, 0),
- build_int_cst_wide (type,
- lo3, hi3)),
- arg1);
+ fold_build2_loc (loc, BIT_AND_EXPR, type,
+ TREE_OPERAND (arg0, 0),
+ double_int_to_tree (type,
+ c3)),
+ arg1);
}
/* (X & Y) | Y is (X, Y). */
if (TREE_CODE (arg0) == BIT_XOR_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 0),
- build_int_cst (TREE_TYPE (arg0), 0));
+ build_zero_cst (TREE_TYPE (arg0)));
/* Likewise (X ^ Y) == X becomes Y == 0. X has no side-effects. */
if (TREE_CODE (arg0) == BIT_XOR_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)
&& reorder_operands_p (TREE_OPERAND (arg0, 1), arg1))
return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 1),
- build_int_cst (TREE_TYPE (arg0), 0));
+ build_zero_cst (TREE_TYPE (arg0)));
/* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
if (TREE_CODE (arg0) == BIT_XOR_EXPR
BIT_XOR_EXPR, itype,
arg00, arg10),
arg01),
- build_int_cst (itype, 0));
+ build_zero_cst (itype));
if (operand_equal_p (arg01, arg10, 0))
return fold_build2_loc (loc, code, type,
BIT_XOR_EXPR, itype,
arg00, arg11),
arg01),
- build_int_cst (itype, 0));
+ build_zero_cst (itype));
if (operand_equal_p (arg00, arg11, 0))
return fold_build2_loc (loc, code, type,
BIT_XOR_EXPR, itype,
arg01, arg10),
arg00),
- build_int_cst (itype, 0));
+ build_zero_cst (itype));
if (operand_equal_p (arg00, arg10, 0))
return fold_build2_loc (loc, code, type,
BIT_XOR_EXPR, itype,
arg01, arg11),
arg00),
- build_int_cst (itype, 0));
+ build_zero_cst (itype));
}
if (TREE_CODE (arg0) == BIT_XOR_EXPR
TREE_OPERAND (arg1, 1)),
build_int_cst (TREE_TYPE (arg0), 0));
+ /* Similarly for X < (cast) (1 << Y). But cast can't be narrowing,
+ otherwise Y might be >= # of bits in X's type and thus e.g.
+ (unsigned char) (1 << Y) for Y 15 might be 0.
+ If the cast is widening, then 1 << Y should have unsigned type,
+ otherwise if Y is number of bits in the signed shift type minus 1,
+ we can't optimize this. E.g. (unsigned long long) (1 << Y) for Y
+ 31 might be 0xffffffff80000000. */
if ((code == LT_EXPR || code == GE_EXPR)
&& TYPE_UNSIGNED (TREE_TYPE (arg0))
&& CONVERT_EXPR_P (arg1)
&& TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
+ && (TYPE_PRECISION (TREE_TYPE (arg1))
+ >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0))))
+ && (TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0)))
+ || (TYPE_PRECISION (TREE_TYPE (arg1))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)))))
&& integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
{
tem = build2 (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
/* An ASSERT_EXPR should never be passed to fold_binary. */
gcc_unreachable ();
- case VEC_EXTRACT_EVEN_EXPR:
- case VEC_EXTRACT_ODD_EXPR:
- if ((TREE_CODE (arg0) == VECTOR_CST
- || TREE_CODE (arg0) == CONSTRUCTOR)
- && (TREE_CODE (arg1) == VECTOR_CST
- || TREE_CODE (arg1) == CONSTRUCTOR))
- {
- unsigned int nelts = TYPE_VECTOR_SUBPARTS (type), i;
- unsigned char *sel = XALLOCAVEC (unsigned char, nelts);
-
- for (i = 0; i < nelts; i++)
- switch (code)
- {
- case VEC_EXTRACT_EVEN_EXPR:
- sel[i] = i * 2;
- break;
- case VEC_EXTRACT_ODD_EXPR:
- sel[i] = i * 2 + 1;
- break;
- default:
- gcc_unreachable ();
- }
-
- return fold_vec_perm (type, arg0, arg1, sel);
- }
- return NULL_TREE;
-
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_FIX_TRUNC_EXPR:
{
&& integer_zerop (op2)
&& (tem = sign_bit_p (TREE_OPERAND (arg0, 0), arg1)))
{
+ /* sign_bit_p looks through both zero and sign extensions,
+ but for this optimization only sign extensions are
+ usable. */
+ tree tem2 = TREE_OPERAND (arg0, 0);
+ while (tem != tem2)
+ {
+ if (TREE_CODE (tem2) != NOP_EXPR
+ || TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (tem2, 0))))
+ {
+ tem = NULL_TREE;
+ break;
+ }
+ tem2 = TREE_OPERAND (tem2, 0);
+ }
/* sign_bit_p only checks ARG1 bits within A's precision.
If <sign bit of A> has wider type than A, bits outside
of A's precision in <sign bit of A> need to be checked.
If they are all 0, this optimization needs to be done
in unsigned A's type, if they are all 1 in signed A's type,
otherwise this can't be done. */
- if (TYPE_PRECISION (TREE_TYPE (tem))
- < TYPE_PRECISION (TREE_TYPE (arg1))
+ if (tem
+ && TYPE_PRECISION (TREE_TYPE (tem))
+ < TYPE_PRECISION (TREE_TYPE (arg1))
&& TYPE_PRECISION (TREE_TYPE (tem))
< TYPE_PRECISION (type))
{