/* Fold a constant sub-tree into a single node for C-compiler
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
@@ This would also make life easier when this technology is used
@@ for cross-compilers. */
-/* The entry points in this file are fold, size_int_wide, size_binop
- and force_fit_type_double.
+/* The entry points in this file are fold, size_int_wide and size_binop.
fold takes a tree as argument and returns a simplified tree.
size_int takes an integer value, and creates a tree constant
with type from `sizetype'.
- force_fit_type_double takes a constant, an overflowable flag and a
- prior overflow indicator. It forces the value to fit the type and
- sets TREE_OVERFLOW.
-
Note: Since the folders get called on non-gimple code as well as
gimple code, we need to handle GIMPLE tuples as well as their
corresponding tree equivalents. */
#include "tm.h"
#include "flags.h"
#include "tree.h"
-#include "real.h"
-#include "fixed-value.h"
+#include "realmpfr.h"
#include "rtl.h"
#include "expr.h"
#include "tm_p.h"
#include "target.h"
-#include "toplev.h"
+#include "diagnostic-core.h"
#include "intl.h"
#include "ggc.h"
#include "hashtab.h"
#include "langhooks.h"
#include "md5.h"
#include "gimple.h"
+#include "tree-flow.h"
/* Nonzero if we are folding constants inside an initializer; zero
otherwise. */
COMPCODE_TRUE = 15
};
-static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
-static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
static bool negate_mathfn_p (enum built_in_function);
static bool negate_expr_p (tree);
static tree negate_expr (tree);
static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
static tree associate_trees (location_t, tree, tree, enum tree_code, tree);
-static tree const_binop (enum tree_code, tree, tree, int);
+static tree const_binop (enum tree_code, tree, tree);
static enum comparison_code comparison_to_compcode (enum tree_code);
static enum tree_code compcode_to_comparison (enum comparison_code);
static int operand_equal_for_comparison_p (tree, tree, tree);
static tree fold_div_compare (location_t, enum tree_code, tree, tree, tree);
static bool reorder_operands_p (const_tree, const_tree);
static tree fold_negate_const (tree, tree);
-static tree fold_not_const (tree, tree);
+static tree fold_not_const (const_tree, tree);
static tree fold_relational_const (enum tree_code, tree, tree, tree);
static tree fold_convert_const (enum tree_code, tree, tree);
+/* Return EXPR_LOCATION of T if it is not UNKNOWN_LOCATION.
+ Otherwise, return LOC. */
-/* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
- overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
- and SUM1. Then this yields nonzero if overflow occurred during the
- addition.
-
- Overflow occurs if A and B have the same sign, but A and SUM differ in
- sign. Use `^' to test whether signs differ, and `< 0' to isolate the
- sign. */
-#define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
-\f
-/* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
- We do that by representing the two-word integer in 4 words, with only
- HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
- number. The value of the word is LOWPART + HIGHPART * BASE. */
-
-#define LOWPART(x) \
- ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
-#define HIGHPART(x) \
- ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
-#define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
-
-/* Unpack a two-word integer into 4 words.
- LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
- WORDS points to the array of HOST_WIDE_INTs. */
-
-static void
-encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
+static location_t
+expr_location_or (tree t, location_t loc)
{
- words[0] = LOWPART (low);
- words[1] = HIGHPART (low);
- words[2] = LOWPART (hi);
- words[3] = HIGHPART (hi);
-}
-
-/* Pack an array of 4 words into a two-word integer.
- WORDS points to the array of words.
- The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
-
-static void
-decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
- HOST_WIDE_INT *hi)
-{
- *low = words[0] + words[1] * BASE;
- *hi = words[2] + words[3] * BASE;
-}
-\f
-/* Force the double-word integer L1, H1 to be within the range of the
- integer type TYPE. Stores the properly truncated and sign-extended
- double-word integer in *LV, *HV. Returns true if the operation
- overflows, that is, argument and result are different. */
-
-int
-fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
- unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, const_tree type)
-{
- unsigned HOST_WIDE_INT low0 = l1;
- HOST_WIDE_INT high0 = h1;
- unsigned int prec;
- int sign_extended_type;
-
- if (POINTER_TYPE_P (type)
- || TREE_CODE (type) == OFFSET_TYPE)
- prec = POINTER_SIZE;
- else
- prec = TYPE_PRECISION (type);
-
- /* Size types *are* sign extended. */
- sign_extended_type = (!TYPE_UNSIGNED (type)
- || (TREE_CODE (type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (type)));
-
- /* First clear all bits that are beyond the type's precision. */
- if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
- ;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
- else
- {
- h1 = 0;
- if (prec < HOST_BITS_PER_WIDE_INT)
- l1 &= ~((HOST_WIDE_INT) (-1) << prec);
- }
-
- /* Then do sign extension if necessary. */
- if (!sign_extended_type)
- /* No sign extension */;
- else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
- /* Correct width already. */;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- {
- /* Sign extend top half? */
- if (h1 & ((unsigned HOST_WIDE_INT)1
- << (prec - HOST_BITS_PER_WIDE_INT - 1)))
- h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
- }
- else if (prec == HOST_BITS_PER_WIDE_INT)
- {
- if ((HOST_WIDE_INT)l1 < 0)
- h1 = -1;
- }
- else
- {
- /* Sign extend bottom half? */
- if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
- {
- h1 = -1;
- l1 |= (HOST_WIDE_INT)(-1) << prec;
- }
- }
-
- *lv = l1;
- *hv = h1;
-
- /* If the value didn't fit, signal overflow. */
- return l1 != low0 || h1 != high0;
+ location_t tloc = EXPR_LOCATION (t);
+ return tloc != UNKNOWN_LOCATION ? tloc : loc;
}
-/* We force the double-int HIGH:LOW to the range of the type TYPE by
- sign or zero extending it.
- OVERFLOWABLE indicates if we are interested
- in overflow of the value, when >0 we are only interested in signed
- overflow, for <0 we are interested in any overflow. OVERFLOWED
- indicates whether overflow has already occurred. CONST_OVERFLOWED
- indicates whether constant overflow has already occurred. We force
- T's value to be within range of T's type (by setting to 0 or 1 all
- the bits outside the type's range). We set TREE_OVERFLOWED if,
- OVERFLOWED is nonzero,
- or OVERFLOWABLE is >0 and signed overflow occurs
- or OVERFLOWABLE is <0 and any overflow occurs
- We return a new tree node for the extended double-int. The node
- is shared if no overflow flags are set. */
+/* Similar to protected_set_expr_location, but never modify x in place,
+ if location can and needs to be set, unshare it. */
-tree
-force_fit_type_double (tree type, unsigned HOST_WIDE_INT low,
- HOST_WIDE_INT high, int overflowable,
- bool overflowed)
+static inline tree
+protected_set_expr_location_unshare (tree x, location_t loc)
{
- int sign_extended_type;
- bool overflow;
-
- /* Size types *are* sign extended. */
- sign_extended_type = (!TYPE_UNSIGNED (type)
- || (TREE_CODE (type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (type)));
-
- overflow = fit_double_type (low, high, &low, &high, type);
-
- /* If we need to set overflow flags, return a new unshared node. */
- if (overflowed || overflow)
- {
- if (overflowed
- || overflowable < 0
- || (overflowable > 0 && sign_extended_type))
- {
- tree t = make_node (INTEGER_CST);
- TREE_INT_CST_LOW (t) = low;
- TREE_INT_CST_HIGH (t) = high;
- TREE_TYPE (t) = type;
- TREE_OVERFLOW (t) = 1;
- return t;
- }
- }
-
- /* Else build a shared node. */
- return build_int_cst_wide (type, low, high);
-}
-\f
-/* Add two doubleword integers with doubleword result.
- Return nonzero if the operation overflows according to UNSIGNED_P.
- Each argument is given as two `HOST_WIDE_INT' pieces.
- One argument is L1 and H1; the other, L2 and H2.
- The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
-
-int
-add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
- unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
- unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
- bool unsigned_p)
-{
- unsigned HOST_WIDE_INT l;
- HOST_WIDE_INT h;
-
- l = l1 + l2;
- h = h1 + h2 + (l < l1);
-
- *lv = l;
- *hv = h;
-
- if (unsigned_p)
- return (unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1;
- else
- return OVERFLOW_SUM_SIGN (h1, h2, h);
-}
-
-/* Negate a doubleword integer with doubleword result.
- Return nonzero if the operation overflows, assuming it's signed.
- The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
- The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
-
-int
-neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
- unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
-{
- if (l1 == 0)
- {
- *lv = 0;
- *hv = - h1;
- return (*hv & h1) < 0;
- }
- else
- {
- *lv = -l1;
- *hv = ~h1;
- return 0;
- }
-}
-\f
-/* Multiply two doubleword integers with doubleword result.
- Return nonzero if the operation overflows according to UNSIGNED_P.
- Each argument is given as two `HOST_WIDE_INT' pieces.
- One argument is L1 and H1; the other, L2 and H2.
- The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
-
-int
-mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
- unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
- unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
- bool unsigned_p)
-{
- HOST_WIDE_INT arg1[4];
- HOST_WIDE_INT arg2[4];
- HOST_WIDE_INT prod[4 * 2];
- unsigned HOST_WIDE_INT carry;
- int i, j, k;
- unsigned HOST_WIDE_INT toplow, neglow;
- HOST_WIDE_INT tophigh, neghigh;
-
- encode (arg1, l1, h1);
- encode (arg2, l2, h2);
-
- memset (prod, 0, sizeof prod);
-
- for (i = 0; i < 4; i++)
- {
- carry = 0;
- for (j = 0; j < 4; j++)
- {
- k = i + j;
- /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
- carry += arg1[i] * arg2[j];
- /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
- carry += prod[k];
- prod[k] = LOWPART (carry);
- carry = HIGHPART (carry);
- }
- prod[i + 4] = carry;
- }
-
- decode (prod, lv, hv);
- decode (prod + 4, &toplow, &tophigh);
-
- /* Unsigned overflow is immediate. */
- if (unsigned_p)
- return (toplow | tophigh) != 0;
-
- /* Check for signed overflow by calculating the signed representation of the
- top half of the result; it should agree with the low half's sign bit. */
- if (h1 < 0)
- {
- neg_double (l2, h2, &neglow, &neghigh);
- add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
- }
- if (h2 < 0)
- {
- neg_double (l1, h1, &neglow, &neghigh);
- add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
- }
- return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
-}
-\f
-/* Shift the doubleword integer in L1, H1 left by COUNT places
- keeping only PREC bits of result.
- Shift right if COUNT is negative.
- ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
- Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
-
-void
-lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
- HOST_WIDE_INT count, unsigned int prec,
- unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
-{
- unsigned HOST_WIDE_INT signmask;
-
- if (count < 0)
- {
- rshift_double (l1, h1, -count, prec, lv, hv, arith);
- return;
- }
-
- if (SHIFT_COUNT_TRUNCATED)
- count %= prec;
-
- if (count >= 2 * HOST_BITS_PER_WIDE_INT)
- {
- /* Shifting by the host word size is undefined according to the
- ANSI standard, so we must handle this as a special case. */
- *hv = 0;
- *lv = 0;
- }
- else if (count >= HOST_BITS_PER_WIDE_INT)
- {
- *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
- *lv = 0;
- }
- else
- {
- *hv = (((unsigned HOST_WIDE_INT) h1 << count)
- | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
- *lv = l1 << count;
- }
-
- /* Sign extend all bits that are beyond the precision. */
-
- signmask = -((prec > HOST_BITS_PER_WIDE_INT
- ? ((unsigned HOST_WIDE_INT) *hv
- >> (prec - HOST_BITS_PER_WIDE_INT - 1))
- : (*lv >> (prec - 1))) & 1);
-
- if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
- ;
- else if (prec >= HOST_BITS_PER_WIDE_INT)
- {
- *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
- *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
- }
- else
- {
- *hv = signmask;
- *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
- *lv |= signmask << prec;
- }
-}
-
-/* Shift the doubleword integer in L1, H1 right by COUNT places
- keeping only PREC bits of result. COUNT must be positive.
- ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
- Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
-
-void
-rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
- HOST_WIDE_INT count, unsigned int prec,
- unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
- int arith)
-{
- unsigned HOST_WIDE_INT signmask;
-
- signmask = (arith
- ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
- : 0);
-
- if (SHIFT_COUNT_TRUNCATED)
- count %= prec;
-
- if (count >= 2 * HOST_BITS_PER_WIDE_INT)
- {
- /* Shifting by the host word size is undefined according to the
- ANSI standard, so we must handle this as a special case. */
- *hv = 0;
- *lv = 0;
- }
- else if (count >= HOST_BITS_PER_WIDE_INT)
+ if (CAN_HAVE_LOCATION_P (x)
+ && EXPR_LOCATION (x) != loc
+ && !(TREE_CODE (x) == SAVE_EXPR
+ || TREE_CODE (x) == TARGET_EXPR
+ || TREE_CODE (x) == BIND_EXPR))
{
- *hv = 0;
- *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
+ x = copy_node (x);
+ SET_EXPR_LOCATION (x, loc);
}
- else
- {
- *hv = (unsigned HOST_WIDE_INT) h1 >> count;
- *lv = ((l1 >> count)
- | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
- }
-
- /* Zero / sign extend all bits that are beyond the precision. */
-
- if (count >= (HOST_WIDE_INT)prec)
- {
- *hv = signmask;
- *lv = signmask;
- }
- else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
- ;
- else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
- {
- *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
- *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
- }
- else
- {
- *hv = signmask;
- *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
- *lv |= signmask << (prec - count);
- }
-}
-\f
-/* Rotate the doubleword integer in L1, H1 left by COUNT places
- keeping only PREC bits of result.
- Rotate right if COUNT is negative.
- Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
-
-void
-lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
- HOST_WIDE_INT count, unsigned int prec,
- unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
-{
- unsigned HOST_WIDE_INT s1l, s2l;
- HOST_WIDE_INT s1h, s2h;
-
- count %= prec;
- if (count < 0)
- count += prec;
-
- lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
- rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
- *lv = s1l | s2l;
- *hv = s1h | s2h;
+ return x;
}
-/* Rotate the doubleword integer in L1, H1 left by COUNT places
- keeping only PREC bits of result. COUNT must be positive.
- Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
-void
-rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
- HOST_WIDE_INT count, unsigned int prec,
- unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
-{
- unsigned HOST_WIDE_INT s1l, s2l;
- HOST_WIDE_INT s1h, s2h;
-
- count %= prec;
- if (count < 0)
- count += prec;
+/* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
+ overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
+ and SUM1. Then this yields nonzero if overflow occurred during the
+ addition.
- rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
- lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
- *lv = s1l | s2l;
- *hv = s1h | s2h;
-}
+ Overflow occurs if A and B have the same sign, but A and SUM differ in
+ sign. Use `^' to test whether signs differ, and `< 0' to isolate the
+ sign. */
+#define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
\f
-/* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
- for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
- CODE is a tree code for a kind of division, one of
- TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
- or EXACT_DIV_EXPR
- It controls how the quotient is rounded to an integer.
- Return nonzero if the operation overflows.
- UNS nonzero says do unsigned division. */
-
-int
-div_and_round_double (enum tree_code code, int uns,
- unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
- HOST_WIDE_INT hnum_orig,
- unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
- HOST_WIDE_INT hden_orig,
- unsigned HOST_WIDE_INT *lquo,
- HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
- HOST_WIDE_INT *hrem)
-{
- int quo_neg = 0;
- HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
- HOST_WIDE_INT den[4], quo[4];
- int i, j;
- unsigned HOST_WIDE_INT work;
- unsigned HOST_WIDE_INT carry = 0;
- unsigned HOST_WIDE_INT lnum = lnum_orig;
- HOST_WIDE_INT hnum = hnum_orig;
- unsigned HOST_WIDE_INT lden = lden_orig;
- HOST_WIDE_INT hden = hden_orig;
- int overflow = 0;
-
- if (hden == 0 && lden == 0)
- overflow = 1, lden = 1;
-
- /* Calculate quotient sign and convert operands to unsigned. */
- if (!uns)
- {
- if (hnum < 0)
- {
- quo_neg = ~ quo_neg;
- /* (minimum integer) / (-1) is the only overflow case. */
- if (neg_double (lnum, hnum, &lnum, &hnum)
- && ((HOST_WIDE_INT) lden & hden) == -1)
- overflow = 1;
- }
- if (hden < 0)
- {
- quo_neg = ~ quo_neg;
- neg_double (lden, hden, &lden, &hden);
- }
- }
-
- if (hnum == 0 && hden == 0)
- { /* single precision */
- *hquo = *hrem = 0;
- /* This unsigned division rounds toward zero. */
- *lquo = lnum / lden;
- goto finish_up;
- }
-
- if (hnum == 0)
- { /* trivial case: dividend < divisor */
- /* hden != 0 already checked. */
- *hquo = *lquo = 0;
- *hrem = hnum;
- *lrem = lnum;
- goto finish_up;
- }
-
- memset (quo, 0, sizeof quo);
-
- memset (num, 0, sizeof num); /* to zero 9th element */
- memset (den, 0, sizeof den);
-
- encode (num, lnum, hnum);
- encode (den, lden, hden);
-
- /* Special code for when the divisor < BASE. */
- if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
- {
- /* hnum != 0 already checked. */
- for (i = 4 - 1; i >= 0; i--)
- {
- work = num[i] + carry * BASE;
- quo[i] = work / lden;
- carry = work % lden;
- }
- }
- else
- {
- /* Full double precision division,
- with thanks to Don Knuth's "Seminumerical Algorithms". */
- int num_hi_sig, den_hi_sig;
- unsigned HOST_WIDE_INT quo_est, scale;
-
- /* Find the highest nonzero divisor digit. */
- for (i = 4 - 1;; i--)
- if (den[i] != 0)
- {
- den_hi_sig = i;
- break;
- }
-
- /* Insure that the first digit of the divisor is at least BASE/2.
- This is required by the quotient digit estimation algorithm. */
-
- scale = BASE / (den[den_hi_sig] + 1);
- if (scale > 1)
- { /* scale divisor and dividend */
- carry = 0;
- for (i = 0; i <= 4 - 1; i++)
- {
- work = (num[i] * scale) + carry;
- num[i] = LOWPART (work);
- carry = HIGHPART (work);
- }
-
- num[4] = carry;
- carry = 0;
- for (i = 0; i <= 4 - 1; i++)
- {
- work = (den[i] * scale) + carry;
- den[i] = LOWPART (work);
- carry = HIGHPART (work);
- if (den[i] != 0) den_hi_sig = i;
- }
- }
-
- num_hi_sig = 4;
-
- /* Main loop */
- for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
- {
- /* Guess the next quotient digit, quo_est, by dividing the first
- two remaining dividend digits by the high order quotient digit.
- quo_est is never low and is at most 2 high. */
- unsigned HOST_WIDE_INT tmp;
-
- num_hi_sig = i + den_hi_sig + 1;
- work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
- if (num[num_hi_sig] != den[den_hi_sig])
- quo_est = work / den[den_hi_sig];
- else
- quo_est = BASE - 1;
-
- /* Refine quo_est so it's usually correct, and at most one high. */
- tmp = work - quo_est * den[den_hi_sig];
- if (tmp < BASE
- && (den[den_hi_sig - 1] * quo_est
- > (tmp * BASE + num[num_hi_sig - 2])))
- quo_est--;
-
- /* Try QUO_EST as the quotient digit, by multiplying the
- divisor by QUO_EST and subtracting from the remaining dividend.
- Keep in mind that QUO_EST is the I - 1st digit. */
-
- carry = 0;
- for (j = 0; j <= den_hi_sig; j++)
- {
- work = quo_est * den[j] + carry;
- carry = HIGHPART (work);
- work = num[i + j] - LOWPART (work);
- num[i + j] = LOWPART (work);
- carry += HIGHPART (work) != 0;
- }
-
- /* If quo_est was high by one, then num[i] went negative and
- we need to correct things. */
- if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
- {
- quo_est--;
- carry = 0; /* add divisor back in */
- for (j = 0; j <= den_hi_sig; j++)
- {
- work = num[i + j] + den[j] + carry;
- carry = HIGHPART (work);
- num[i + j] = LOWPART (work);
- }
-
- num [num_hi_sig] += carry;
- }
-
- /* Store the quotient digit. */
- quo[i] = quo_est;
- }
- }
-
- decode (quo, lquo, hquo);
-
- finish_up:
- /* If result is negative, make it so. */
- if (quo_neg)
- neg_double (*lquo, *hquo, lquo, hquo);
-
- /* Compute trial remainder: rem = num - (quo * den) */
- mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
- neg_double (*lrem, *hrem, lrem, hrem);
- add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
-
- switch (code)
- {
- case TRUNC_DIV_EXPR:
- case TRUNC_MOD_EXPR: /* round toward zero */
- case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
- return overflow;
-
- case FLOOR_DIV_EXPR:
- case FLOOR_MOD_EXPR: /* round toward negative infinity */
- if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
- {
- /* quo = quo - 1; */
- add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
- lquo, hquo);
- }
- else
- return overflow;
- break;
-
- case CEIL_DIV_EXPR:
- case CEIL_MOD_EXPR: /* round toward positive infinity */
- if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
- {
- add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
- lquo, hquo);
- }
- else
- return overflow;
- break;
-
- case ROUND_DIV_EXPR:
- case ROUND_MOD_EXPR: /* round to closest integer */
- {
- unsigned HOST_WIDE_INT labs_rem = *lrem;
- HOST_WIDE_INT habs_rem = *hrem;
- unsigned HOST_WIDE_INT labs_den = lden, ltwice;
- HOST_WIDE_INT habs_den = hden, htwice;
-
- /* Get absolute values. */
- if (*hrem < 0)
- neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
- if (hden < 0)
- neg_double (lden, hden, &labs_den, &habs_den);
-
- /* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
- mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
- labs_rem, habs_rem, <wice, &htwice);
-
- if (((unsigned HOST_WIDE_INT) habs_den
- < (unsigned HOST_WIDE_INT) htwice)
- || (((unsigned HOST_WIDE_INT) habs_den
- == (unsigned HOST_WIDE_INT) htwice)
- && (labs_den <= ltwice)))
- {
- if (*hquo < 0)
- /* quo = quo - 1; */
- add_double (*lquo, *hquo,
- (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
- else
- /* quo = quo + 1; */
- add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
- lquo, hquo);
- }
- else
- return overflow;
- }
- break;
-
- default:
- gcc_unreachable ();
- }
-
- /* Compute true remainder: rem = num - (quo * den) */
- mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
- neg_double (*lrem, *hrem, lrem, hrem);
- add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
- return overflow;
-}
-
/* If ARG2 divides ARG1 with zero remainder, carries out the division
of type CODE and returns the quotient.
Otherwise returns NULL_TREE. */
tree
div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
{
- unsigned HOST_WIDE_INT int1l, int2l;
- HOST_WIDE_INT int1h, int2h;
- unsigned HOST_WIDE_INT quol, reml;
- HOST_WIDE_INT quoh, remh;
- tree type = TREE_TYPE (arg1);
- int uns = TYPE_UNSIGNED (type);
+ double_int quo, rem;
+ int uns;
- int1l = TREE_INT_CST_LOW (arg1);
- int1h = TREE_INT_CST_HIGH (arg1);
- /* &obj[0] + -128 really should be compiled as &obj[-8] rather than
- &obj[some_exotic_number]. */
- if (POINTER_TYPE_P (type))
- {
- uns = false;
- type = signed_type_for (type);
- fit_double_type (int1l, int1h, &int1l, &int1h,
- type);
- }
- else
- fit_double_type (int1l, int1h, &int1l, &int1h, type);
- int2l = TREE_INT_CST_LOW (arg2);
- int2h = TREE_INT_CST_HIGH (arg2);
+ /* The sign of the division is according to operand two, that
+ does the correct thing for POINTER_PLUS_EXPR where we want
+ a signed division. */
+ uns = TYPE_UNSIGNED (TREE_TYPE (arg2));
+ if (TREE_CODE (TREE_TYPE (arg2)) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (TREE_TYPE (arg2)))
+ uns = false;
- div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
- &quol, &quoh, &reml, &remh);
- if (remh != 0 || reml != 0)
- return NULL_TREE;
+ quo = double_int_divmod (tree_to_double_int (arg1),
+ tree_to_double_int (arg2),
+ uns, code, &rem);
- return build_int_cst_wide (type, quol, quoh);
+ if (double_int_zero_p (rem))
+ return build_int_cst_wide (TREE_TYPE (arg1), quo.low, quo.high);
+
+ return NULL_TREE;
}
\f
/* This is nonzero if we should defer warnings about undefined
CASE_FLT_FN (BUILT_IN_NEARBYINT):
CASE_FLT_FN (BUILT_IN_RINT):
return !flag_rounding_math;
-
+
default:
break;
}
&& TYPE_OVERFLOW_WRAPS (type));
case FIXED_CST:
- case REAL_CST:
case NEGATE_EXPR:
return true;
+ case REAL_CST:
+ /* We want to canonicalize to positive real constants. Pretend
+ that only negative ones can be easily negated. */
+ return REAL_VALUE_NEGATIVE (TREE_REAL_CST (t));
+
case COMPLEX_CST:
return negate_expr_p (TREE_REALPART (t))
&& negate_expr_p (TREE_IMAGPART (t));
return fold_build2_loc (loc, PLUS_EXPR, type, TREE_OPERAND (t, 0),
build_int_cst (type, 1));
break;
-
+
case INTEGER_CST:
tem = fold_negate_const (t, type);
if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
fold_negate_expr (loc, TREE_OPERAND (t, 0)),
fold_negate_expr (loc, TREE_OPERAND (t, 1)));
break;
-
+
case CONJ_EXPR:
if (negate_expr_p (t))
return fold_build1_loc (loc, CONJ_EXPR, type,
tem = fold_negate_expr (loc, t);
if (!tem)
- {
- tem = build1 (NEGATE_EXPR, TREE_TYPE (t), t);
- SET_EXPR_LOCATION (tem, loc);
- }
+ tem = build1_loc (loc, NEGATE_EXPR, TREE_TYPE (t), t);
return fold_convert_loc (loc, type, tem);
}
\f
static tree
associate_trees (location_t loc, tree t1, tree t2, enum tree_code code, tree type)
{
- tree tem;
-
if (t1 == 0)
return t2;
else if (t2 == 0)
if (code == PLUS_EXPR)
{
if (TREE_CODE (t1) == NEGATE_EXPR)
- tem = build2 (MINUS_EXPR, type, fold_convert_loc (loc, type, t2),
- fold_convert_loc (loc, type, TREE_OPERAND (t1, 0)));
+ return build2_loc (loc, MINUS_EXPR, type,
+ fold_convert_loc (loc, type, t2),
+ fold_convert_loc (loc, type,
+ TREE_OPERAND (t1, 0)));
else if (TREE_CODE (t2) == NEGATE_EXPR)
- tem = build2 (MINUS_EXPR, type, fold_convert_loc (loc, type, t1),
- fold_convert_loc (loc, type, TREE_OPERAND (t2, 0)));
+ return build2_loc (loc, MINUS_EXPR, type,
+ fold_convert_loc (loc, type, t1),
+ fold_convert_loc (loc, type,
+ TREE_OPERAND (t2, 0)));
else if (integer_zerop (t2))
return fold_convert_loc (loc, type, t1);
}
return fold_convert_loc (loc, type, t1);
}
- tem = build2 (code, type, fold_convert_loc (loc, type, t1),
- fold_convert_loc (loc, type, t2));
- goto associate_trees_exit;
+ return build2_loc (loc, code, type, fold_convert_loc (loc, type, t1),
+ fold_convert_loc (loc, type, t2));
}
return fold_build2_loc (loc, code, type, fold_convert_loc (loc, type, t1),
- fold_convert_loc (loc, type, t2));
- associate_trees_exit:
- protected_set_expr_location (tem, loc);
- return tem;
+ fold_convert_loc (loc, type, t2));
}
\f
/* Check whether TYPE1 and TYPE2 are equivalent integer types, suitable
/* Combine two integer constants ARG1 and ARG2 under operation CODE
to produce a new constant. Return NULL_TREE if we don't know how
- to evaluate CODE at compile-time.
-
- If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
+ to evaluate CODE at compile-time. */
tree
-int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2, int notrunc)
-{
- unsigned HOST_WIDE_INT int1l, int2l;
- HOST_WIDE_INT int1h, int2h;
- unsigned HOST_WIDE_INT low;
- HOST_WIDE_INT hi;
- unsigned HOST_WIDE_INT garbagel;
- HOST_WIDE_INT garbageh;
+int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2)
+{
+ double_int op1, op2, res, tmp;
tree t;
tree type = TREE_TYPE (arg1);
- int uns = TYPE_UNSIGNED (type);
- int is_sizetype
+ bool uns = TYPE_UNSIGNED (type);
+ bool is_sizetype
= (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
- int overflow = 0;
+ bool overflow = false;
- int1l = TREE_INT_CST_LOW (arg1);
- int1h = TREE_INT_CST_HIGH (arg1);
- int2l = TREE_INT_CST_LOW (arg2);
- int2h = TREE_INT_CST_HIGH (arg2);
+ op1 = tree_to_double_int (arg1);
+ op2 = tree_to_double_int (arg2);
switch (code)
{
case BIT_IOR_EXPR:
- low = int1l | int2l, hi = int1h | int2h;
+ res = double_int_ior (op1, op2);
break;
case BIT_XOR_EXPR:
- low = int1l ^ int2l, hi = int1h ^ int2h;
+ res = double_int_xor (op1, op2);
break;
case BIT_AND_EXPR:
- low = int1l & int2l, hi = int1h & int2h;
+ res = double_int_and (op1, op2);
break;
case RSHIFT_EXPR:
- int2l = -int2l;
+ res = double_int_rshift (op1, double_int_to_shwi (op2),
+ TYPE_PRECISION (type), !uns);
+ break;
+
case LSHIFT_EXPR:
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
- lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
- &low, &hi, !uns);
+ res = double_int_lshift (op1, double_int_to_shwi (op2),
+ TYPE_PRECISION (type), !uns);
break;
case RROTATE_EXPR:
- int2l = - int2l;
+ res = double_int_rrotate (op1, double_int_to_shwi (op2),
+ TYPE_PRECISION (type));
+ break;
+
case LROTATE_EXPR:
- lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
- &low, &hi);
+ res = double_int_lrotate (op1, double_int_to_shwi (op2),
+ TYPE_PRECISION (type));
break;
case PLUS_EXPR:
- overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
+ overflow = add_double (op1.low, op1.high, op2.low, op2.high,
+ &res.low, &res.high);
break;
case MINUS_EXPR:
- neg_double (int2l, int2h, &low, &hi);
- add_double (int1l, int1h, low, hi, &low, &hi);
- overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
+ neg_double (op2.low, op2.high, &res.low, &res.high);
+ add_double (op1.low, op1.high, res.low, res.high,
+ &res.low, &res.high);
+ overflow = OVERFLOW_SUM_SIGN (res.high, op2.high, op1.high);
break;
case MULT_EXPR:
- overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
+ overflow = mul_double (op1.low, op1.high, op2.low, op2.high,
+ &res.low, &res.high);
break;
case TRUNC_DIV_EXPR:
case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
case EXACT_DIV_EXPR:
/* This is a shortcut for a common special case. */
- if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
+ if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0
&& !TREE_OVERFLOW (arg1)
&& !TREE_OVERFLOW (arg2)
- && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
+ && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0)
{
if (code == CEIL_DIV_EXPR)
- int1l += int2l - 1;
+ op1.low += op2.low - 1;
- low = int1l / int2l, hi = 0;
+ res.low = op1.low / op2.low, res.high = 0;
break;
}
/* ... fall through ... */
case ROUND_DIV_EXPR:
- if (int2h == 0 && int2l == 0)
+ if (double_int_zero_p (op2))
return NULL_TREE;
- if (int2h == 0 && int2l == 1)
+ if (double_int_one_p (op2))
{
- low = int1l, hi = int1h;
+ res = op1;
break;
}
- if (int1l == int2l && int1h == int2h
- && ! (int1l == 0 && int1h == 0))
+ if (double_int_equal_p (op1, op2)
+ && ! double_int_zero_p (op1))
{
- low = 1, hi = 0;
+ res = double_int_one;
break;
}
- overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
- &low, &hi, &garbagel, &garbageh);
+ overflow = div_and_round_double (code, uns,
+ op1.low, op1.high, op2.low, op2.high,
+ &res.low, &res.high,
+ &tmp.low, &tmp.high);
break;
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
/* This is a shortcut for a common special case. */
- if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
+ if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0
&& !TREE_OVERFLOW (arg1)
&& !TREE_OVERFLOW (arg2)
- && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
+ && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0)
{
if (code == CEIL_MOD_EXPR)
- int1l += int2l - 1;
- low = int1l % int2l, hi = 0;
+ op1.low += op2.low - 1;
+ res.low = op1.low % op2.low, res.high = 0;
break;
}
/* ... fall through ... */
case ROUND_MOD_EXPR:
- if (int2h == 0 && int2l == 0)
+ if (double_int_zero_p (op2))
return NULL_TREE;
overflow = div_and_round_double (code, uns,
- int1l, int1h, int2l, int2h,
- &garbagel, &garbageh, &low, &hi);
+ op1.low, op1.high, op2.low, op2.high,
+ &tmp.low, &tmp.high,
+ &res.low, &res.high);
break;
case MIN_EXPR:
- case MAX_EXPR:
- if (uns)
- low = (((unsigned HOST_WIDE_INT) int1h
- < (unsigned HOST_WIDE_INT) int2h)
- || (((unsigned HOST_WIDE_INT) int1h
- == (unsigned HOST_WIDE_INT) int2h)
- && int1l < int2l));
- else
- low = (int1h < int2h
- || (int1h == int2h && int1l < int2l));
+ res = double_int_min (op1, op2, uns);
+ break;
- if (low == (code == MIN_EXPR))
- low = int1l, hi = int1h;
- else
- low = int2l, hi = int2h;
+ case MAX_EXPR:
+ res = double_int_max (op1, op2, uns);
break;
default:
return NULL_TREE;
}
- if (notrunc)
- {
- t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
-
- /* Propagate overflow flags ourselves. */
- if (((!uns || is_sizetype) && overflow)
- | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
- {
- t = copy_node (t);
- TREE_OVERFLOW (t) = 1;
- }
- }
- else
- t = force_fit_type_double (TREE_TYPE (arg1), low, hi, 1,
- ((!uns || is_sizetype) && overflow)
- | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
+ t = force_fit_type_double (TREE_TYPE (arg1), res, 1,
+ ((!uns || is_sizetype) && overflow)
+ | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
return t;
}
/* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
constant. We assume ARG1 and ARG2 have the same data type, or at least
are the same kind of constant and the same machine mode. Return zero if
- combining the constants is not allowed in the current operating mode.
-
- If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
+ combining the constants is not allowed in the current operating mode. */
static tree
-const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
+const_binop (enum tree_code code, tree arg1, tree arg2)
{
/* Sanity check for the recursive cases. */
if (!arg1 || !arg2)
STRIP_NOPS (arg2);
if (TREE_CODE (arg1) == INTEGER_CST)
- return int_const_binop (code, arg1, arg2, notrunc);
+ return int_const_binop (code, arg1, arg2);
if (TREE_CODE (arg1) == REAL_CST)
{
{
case PLUS_EXPR:
case MINUS_EXPR:
- real = const_binop (code, r1, r2, notrunc);
- imag = const_binop (code, i1, i2, notrunc);
+ real = const_binop (code, r1, r2);
+ imag = const_binop (code, i1, i2);
break;
case MULT_EXPR:
-#ifdef HAVE_mpc
if (COMPLEX_FLOAT_TYPE_P (type))
- return do_mpc_arg2 (arg1, arg2, type, mpc_mul);
-#endif
+ return do_mpc_arg2 (arg1, arg2, type,
+ /* do_nonfinite= */ folding_initializer,
+ mpc_mul);
real = const_binop (MINUS_EXPR,
- const_binop (MULT_EXPR, r1, r2, notrunc),
- const_binop (MULT_EXPR, i1, i2, notrunc),
- notrunc);
+ const_binop (MULT_EXPR, r1, r2),
+ const_binop (MULT_EXPR, i1, i2));
imag = const_binop (PLUS_EXPR,
- const_binop (MULT_EXPR, r1, i2, notrunc),
- const_binop (MULT_EXPR, i1, r2, notrunc),
- notrunc);
+ const_binop (MULT_EXPR, r1, i2),
+ const_binop (MULT_EXPR, i1, r2));
break;
case RDIV_EXPR:
-#ifdef HAVE_mpc
if (COMPLEX_FLOAT_TYPE_P (type))
- return do_mpc_arg2 (arg1, arg2, type, mpc_div);
-#endif
-
+ return do_mpc_arg2 (arg1, arg2, type,
+ /* do_nonfinite= */ folding_initializer,
+ mpc_div);
+ /* Fallthru ... */
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ if (flag_complex_method == 0)
{
+ /* Keep this algorithm in sync with
+ tree-complex.c:expand_complex_div_straight().
+
+ Expand complex division to scalars, straightforward algorithm.
+ a / b = ((ar*br + ai*bi)/t) + i((ai*br - ar*bi)/t)
+ t = br*br + bi*bi
+ */
tree magsquared
= const_binop (PLUS_EXPR,
- const_binop (MULT_EXPR, r2, r2, notrunc),
- const_binop (MULT_EXPR, i2, i2, notrunc),
- notrunc);
+ const_binop (MULT_EXPR, r2, r2),
+ const_binop (MULT_EXPR, i2, i2));
tree t1
= const_binop (PLUS_EXPR,
- const_binop (MULT_EXPR, r1, r2, notrunc),
- const_binop (MULT_EXPR, i1, i2, notrunc),
- notrunc);
+ const_binop (MULT_EXPR, r1, r2),
+ const_binop (MULT_EXPR, i1, i2));
tree t2
= const_binop (MINUS_EXPR,
- const_binop (MULT_EXPR, i1, r2, notrunc),
- const_binop (MULT_EXPR, r1, i2, notrunc),
- notrunc);
+ const_binop (MULT_EXPR, i1, r2),
+ const_binop (MULT_EXPR, r1, i2));
+
+ real = const_binop (code, t1, magsquared);
+ imag = const_binop (code, t2, magsquared);
+ }
+ else
+ {
+ /* Keep this algorithm in sync with
+ tree-complex.c:expand_complex_div_wide().
- if (INTEGRAL_TYPE_P (TREE_TYPE (r1)))
- code = TRUNC_DIV_EXPR;
+ Expand complex division to scalars, modified algorithm to minimize
+ overflow with wide input ranges. */
+ tree compare = fold_build2 (LT_EXPR, boolean_type_node,
+ fold_abs_const (r2, TREE_TYPE (type)),
+ fold_abs_const (i2, TREE_TYPE (type)));
- real = const_binop (code, t1, magsquared, notrunc);
- imag = const_binop (code, t2, magsquared, notrunc);
+ if (integer_nonzerop (compare))
+ {
+ /* In the TRUE branch, we compute
+ ratio = br/bi;
+ div = (br * ratio) + bi;
+ tr = (ar * ratio) + ai;
+ ti = (ai * ratio) - ar;
+ tr = tr / div;
+ ti = ti / div; */
+ tree ratio = const_binop (code, r2, i2);
+ tree div = const_binop (PLUS_EXPR, i2,
+ const_binop (MULT_EXPR, r2, ratio));
+ real = const_binop (MULT_EXPR, r1, ratio);
+ real = const_binop (PLUS_EXPR, real, i1);
+ real = const_binop (code, real, div);
+
+ imag = const_binop (MULT_EXPR, i1, ratio);
+ imag = const_binop (MINUS_EXPR, imag, r1);
+ imag = const_binop (code, imag, div);
+ }
+ else
+ {
+ /* In the FALSE branch, we compute
+ ratio = d/c;
+ divisor = (d * ratio) + c;
+ tr = (b * ratio) + a;
+ ti = b - (a * ratio);
+ tr = tr / div;
+ ti = ti / div; */
+ tree ratio = const_binop (code, i2, r2);
+ tree div = const_binop (PLUS_EXPR, r2,
+ const_binop (MULT_EXPR, i2, ratio));
+
+ real = const_binop (MULT_EXPR, i1, ratio);
+ real = const_binop (PLUS_EXPR, real, r1);
+ real = const_binop (code, real, div);
+
+ imag = const_binop (MULT_EXPR, r1, ratio);
+ imag = const_binop (MINUS_EXPR, i1, imag);
+ imag = const_binop (code, imag, div);
+ }
}
break;
tree type = TREE_TYPE(arg1);
int count = TYPE_VECTOR_SUBPARTS (type), i;
tree elements1, elements2, list = NULL_TREE;
-
+
if(TREE_CODE(arg2) != VECTOR_CST)
return NULL_TREE;
-
+
elements1 = TREE_VECTOR_CST_ELTS (arg1);
elements2 = TREE_VECTOR_CST_ELTS (arg2);
for (i = 0; i < count; i++)
{
tree elem1, elem2, elem;
-
+
/* The trailing elements can be empty and should be treated as 0 */
if(!elements1)
elem1 = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
{
elem1 = TREE_VALUE(elements1);
elements1 = TREE_CHAIN (elements1);
- }
-
+ }
+
if(!elements2)
elem2 = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
else
elem2 = TREE_VALUE(elements2);
elements2 = TREE_CHAIN (elements2);
}
-
- elem = const_binop (code, elem1, elem2, notrunc);
-
+
+ elem = const_binop (code, elem1, elem2);
+
/* It is possible that const_binop cannot handle the given
code and return NULL_TREE */
if(elem == NULL_TREE)
return NULL_TREE;
-
+
list = tree_cons (NULL_TREE, elem, list);
}
- return build_vector(type, nreverse(list));
+ return build_vector(type, nreverse(list));
}
return NULL_TREE;
}
}
/* Handle general case of two integer constants. */
- return int_const_binop (code, arg0, arg1, 0);
+ return int_const_binop (code, arg0, arg1);
}
return fold_build2_loc (loc, code, type, arg0, arg1);
/* Given an integer constant, make new constant with new type,
appropriately sign-extended or truncated. */
- t = force_fit_type_double (type, TREE_INT_CST_LOW (arg1),
- TREE_INT_CST_HIGH (arg1),
- /* Don't set the overflow when
- converting from a pointer, */
- !POINTER_TYPE_P (TREE_TYPE (arg1))
- /* or to a sizetype with same signedness
- and the precision is unchanged.
- ??? sizetype is always sign-extended,
- but its signedness depends on the
- frontend. Thus we see spurious overflows
- here if we do not check this. */
- && !((TYPE_PRECISION (TREE_TYPE (arg1))
- == TYPE_PRECISION (type))
- && (TYPE_UNSIGNED (TREE_TYPE (arg1))
- == TYPE_UNSIGNED (type))
- && ((TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (TREE_TYPE (arg1)))
- || (TREE_CODE (type) == INTEGER_TYPE
- && TYPE_IS_SIZETYPE (type)))),
+ t = force_fit_type_double (type, tree_to_double_int (arg1),
+ !POINTER_TYPE_P (TREE_TYPE (arg1)),
(TREE_INT_CST_HIGH (arg1) < 0
&& (TYPE_UNSIGNED (type)
< TYPE_UNSIGNED (TREE_TYPE (arg1))))
C and C++ standards that simply state that the behavior of
FP-to-integer conversion is unspecified upon overflow. */
- HOST_WIDE_INT high, low;
+ double_int val;
REAL_VALUE_TYPE r;
REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
if (REAL_VALUE_ISNAN (r))
{
overflow = 1;
- high = 0;
- low = 0;
+ val = double_int_zero;
}
/* See if R is less than the lower bound or greater than the
if (REAL_VALUES_LESS (r, l))
{
overflow = 1;
- high = TREE_INT_CST_HIGH (lt);
- low = TREE_INT_CST_LOW (lt);
+ val = tree_to_double_int (lt);
}
}
if (REAL_VALUES_LESS (u, r))
{
overflow = 1;
- high = TREE_INT_CST_HIGH (ut);
- low = TREE_INT_CST_LOW (ut);
+ val = tree_to_double_int (ut);
}
}
}
if (! overflow)
- REAL_VALUE_TO_INT (&low, &high, r);
+ real_to_integer2 ((HOST_WIDE_INT *) &val.low, &val.high, &r);
- t = force_fit_type_double (type, low, high, -1,
- overflow | TREE_OVERFLOW (arg1));
+ t = force_fit_type_double (type, val, -1, overflow | TREE_OVERFLOW (arg1));
return t;
}
mode = TREE_FIXED_CST (arg1).mode;
if (GET_MODE_FBIT (mode) < 2 * HOST_BITS_PER_WIDE_INT)
{
- lshift_double (temp.low, temp.high,
- - GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
- &temp.low, &temp.high, SIGNED_FIXED_POINT_MODE_P (mode));
+ temp = double_int_rshift (temp, GET_MODE_FBIT (mode),
+ HOST_BITS_PER_DOUBLE_INT,
+ SIGNED_FIXED_POINT_MODE_P (mode));
/* Left shift temp to temp_trunc by fbit. */
- lshift_double (temp.low, temp.high,
- GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
- &temp_trunc.low, &temp_trunc.high,
- SIGNED_FIXED_POINT_MODE_P (mode));
+ temp_trunc = double_int_lshift (temp, GET_MODE_FBIT (mode),
+ HOST_BITS_PER_DOUBLE_INT,
+ SIGNED_FIXED_POINT_MODE_P (mode));
}
else
{
- temp.low = 0;
- temp.high = 0;
- temp_trunc.low = 0;
- temp_trunc.high = 0;
+ temp = double_int_zero;
+ temp_trunc = double_int_zero;
}
/* If FIXED_CST is negative, we need to round the value toward 0.
By checking if the fractional bits are not zero to add 1 to temp. */
- if (SIGNED_FIXED_POINT_MODE_P (mode) && temp_trunc.high < 0
+ if (SIGNED_FIXED_POINT_MODE_P (mode)
+ && double_int_negative_p (temp_trunc)
&& !double_int_equal_p (TREE_FIXED_CST (arg1).data, temp_trunc))
- {
- double_int one;
- one.low = 1;
- one.high = 0;
- temp = double_int_add (temp, one);
- }
+ temp = double_int_add (temp, double_int_one);
/* Given a fixed-point constant, make new constant with new type,
appropriately sign-extended or truncated. */
- t = force_fit_type_double (type, temp.low, temp.high, -1,
- (temp.high < 0
+ t = force_fit_type_double (type, temp, -1,
+ (double_int_negative_p (temp)
&& (TYPE_UNSIGNED (type)
< TYPE_UNSIGNED (TREE_TYPE (arg1))))
| TREE_OVERFLOW (arg1));
static tree
build_zero_vector (tree type)
{
- tree elem, list;
- int i, units;
+ tree t;
- elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
- units = TYPE_VECTOR_SUBPARTS (type);
-
- list = NULL_TREE;
- for (i = 0; i < units; i++)
- list = tree_cons (NULL_TREE, elem, list);
- return build_vector (type, list);
+ t = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
+ return build_vector_from_val (type, t);
}
/* Returns true, if ARG is convertible to TYPE using a NOP_EXPR. */
|| TREE_CODE (orig) == ERROR_MARK)
return error_mark_node;
- if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
- return fold_build1_loc (loc, NOP_EXPR, type, arg);
-
switch (TREE_CODE (type))
{
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* Handle conversions between pointers to different address spaces. */
+ if (POINTER_TYPE_P (orig)
+ && (TYPE_ADDR_SPACE (TREE_TYPE (type))
+ != TYPE_ADDR_SPACE (TREE_TYPE (orig))))
+ return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, arg);
+ /* fall through */
+
case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
- case POINTER_TYPE: case REFERENCE_TYPE:
case OFFSET_TYPE:
if (TREE_CODE (arg) == INTEGER_CST)
{
case VOID_TYPE:
tem = fold_ignored_result (arg);
- if (TREE_CODE (tem) == MODIFY_EXPR)
- goto fold_convert_exit;
return fold_build1_loc (loc, NOP_EXPR, type, tem);
default:
+ if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
+ return fold_build1_loc (loc, NOP_EXPR, type, arg);
gcc_unreachable ();
}
fold_convert_exit:
- protected_set_expr_location (tem, loc);
+ protected_set_expr_location_unshare (tem, loc);
return tem;
}
\f
case SSA_NAME:
case COMPONENT_REF:
+ case MEM_REF:
case INDIRECT_REF:
- case ALIGN_INDIRECT_REF:
- case MISALIGNED_INDIRECT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case BIT_FIELD_REF:
case TARGET_EXPR:
case COND_EXPR:
case BIND_EXPR:
- case MIN_EXPR:
- case MAX_EXPR:
break;
default:
if (! maybe_lvalue_p (x))
return x;
- x = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
- SET_EXPR_LOCATION (x, loc);
- return x;
+ return build1_loc (loc, NON_LVALUE_EXPR, TREE_TYPE (x), x);
}
/* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
{
if (pedantic_lvalues)
return non_lvalue_loc (loc, x);
- protected_set_expr_location (x, loc);
- return x;
+
+ return protected_set_expr_location_unshare (x, loc);
}
\f
/* Given a tree comparison code, return the code that is the logical inverse
operand_equal_p (const_tree arg0, const_tree arg1, unsigned int flags)
{
/* If either is ERROR_MARK, they aren't equal. */
- if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK)
+ if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK
+ || TREE_TYPE (arg0) == error_mark_node
+ || TREE_TYPE (arg1) == error_mark_node)
+ return 0;
+
+ /* Similar, if either does not have a type (like a released SSA name),
+ they aren't equal. */
+ if (!TREE_TYPE (arg0) || !TREE_TYPE (arg1))
return 0;
/* Check equality of integer constants before bailing out due to
|| POINTER_TYPE_P (TREE_TYPE (arg0)) != POINTER_TYPE_P (TREE_TYPE (arg1)))
return 0;
+ /* We cannot consider pointers to different address space equal. */
+ if (POINTER_TYPE_P (TREE_TYPE (arg0)) && POINTER_TYPE_P (TREE_TYPE (arg1))
+ && (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (arg0)))
+ != TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (arg1)))))
+ return 0;
+
/* If both types don't have the same precision, then it is not safe
to strip NOPs. */
if (TYPE_PRECISION (TREE_TYPE (arg0)) != TYPE_PRECISION (TREE_TYPE (arg1)))
equal if they have no side effects. If we have two identical
expressions with side effects that should be treated the same due
to the only side effects being identical SAVE_EXPR's, that will
- be detected in the recursive calls below. */
+ be detected in the recursive calls below.
+ If we are taking an invariant address of two identical objects
+ they are necessarily equal as well. */
if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST)
&& (TREE_CODE (arg0) == SAVE_EXPR
+ || (flags & OEP_CONSTANT_ADDRESS_OF)
|| (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
return 1;
TREE_REAL_CST (arg1)))
return 1;
-
+
if (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))))
{
/* If we do not distinguish between signed and unsigned zero,
case ADDR_EXPR:
return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
- 0);
+ TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1)
+ ? OEP_CONSTANT_ADDRESS_OF : 0);
default:
break;
}
switch (TREE_CODE (arg0))
{
case INDIRECT_REF:
- case ALIGN_INDIRECT_REF:
- case MISALIGNED_INDIRECT_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
return OP_SAME (0);
+ case MEM_REF:
+ /* Require equal access sizes, and similar pointer types.
+ We can have incomplete types for array references of
+ variable-sized arrays from the Fortran frontent
+ though. */
+ return ((TYPE_SIZE (TREE_TYPE (arg0)) == TYPE_SIZE (TREE_TYPE (arg1))
+ || (TYPE_SIZE (TREE_TYPE (arg0))
+ && TYPE_SIZE (TREE_TYPE (arg1))
+ && operand_equal_p (TYPE_SIZE (TREE_TYPE (arg0)),
+ TYPE_SIZE (TREE_TYPE (arg1)), flags)))
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (arg0, 1)))
+ == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (arg1, 1))))
+ && OP_SAME (0) && OP_SAME (1));
+
case ARRAY_REF:
case ARRAY_RANGE_REF:
/* Operands 2 and 3 may be null.
case TRUTH_ORIF_EXPR:
return OP_SAME (0) && OP_SAME (1);
+ case FMA_EXPR:
+ case WIDEN_MULT_PLUS_EXPR:
+ case WIDEN_MULT_MINUS_EXPR:
+ if (!OP_SAME (2))
+ return 0;
+ /* The multiplcation operands are commutative. */
+ /* FALLTHRU */
+
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
TREE_OPERAND (arg1, 0), flags));
case COND_EXPR:
+ case VEC_COND_EXPR:
+ case DOT_PROD_EXPR:
return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
-
+
default:
return 0;
}
/* If the resulting operand is an empty statement, just return the omitted
statement casted to void. */
if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
- {
- t = build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
- goto omit_one_operand_exit;
- }
+ return build1_loc (loc, NOP_EXPR, void_type_node,
+ fold_ignored_result (omitted));
if (TREE_SIDE_EFFECTS (omitted))
- {
- t = build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
- goto omit_one_operand_exit;
- }
+ return build2_loc (loc, COMPOUND_EXPR, type,
+ fold_ignored_result (omitted), t);
return non_lvalue_loc (loc, t);
-
- omit_one_operand_exit:
- protected_set_expr_location (t, loc);
- return t;
}
/* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
/* If the resulting operand is an empty statement, just return the omitted
statement casted to void. */
if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
- {
- t = build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
- goto pedantic_omit_one_operand_exit;
- }
+ return build1_loc (loc, NOP_EXPR, void_type_node,
+ fold_ignored_result (omitted));
if (TREE_SIDE_EFFECTS (omitted))
- {
- t = build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
- goto pedantic_omit_one_operand_exit;
- }
+ return build2_loc (loc, COMPOUND_EXPR, type,
+ fold_ignored_result (omitted), t);
return pedantic_non_lvalue_loc (loc, t);
-
- pedantic_omit_one_operand_exit:
- protected_set_expr_location (t, loc);
- return t;
}
/* Return a tree for the case when the result of an expression is RESULT
tree
omit_two_operands_loc (location_t loc, tree type, tree result,
- tree omitted1, tree omitted2)
+ tree omitted1, tree omitted2)
{
tree t = fold_convert_loc (loc, type, result);
if (TREE_SIDE_EFFECTS (omitted2))
- {
- t = build2 (COMPOUND_EXPR, type, omitted2, t);
- SET_EXPR_LOCATION (t, loc);
- }
+ t = build2_loc (loc, COMPOUND_EXPR, type, omitted2, t);
if (TREE_SIDE_EFFECTS (omitted1))
- {
- t = build2 (COMPOUND_EXPR, type, omitted1, t);
- SET_EXPR_LOCATION (t, loc);
- }
+ t = build2_loc (loc, COMPOUND_EXPR, type, omitted1, t);
return TREE_CODE (t) != COMPOUND_EXPR ? non_lvalue_loc (loc, t) : t;
}
tree
fold_truth_not_expr (location_t loc, tree arg)
{
- tree t, type = TREE_TYPE (arg);
+ tree type = TREE_TYPE (arg);
enum tree_code code = TREE_CODE (arg);
location_t loc1, loc2;
if (code == ERROR_MARK)
return NULL_TREE;
- t = build2 (code, type, TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
- SET_EXPR_LOCATION (t, loc);
- return t;
+ return build2_loc (loc, code, type, TREE_OPERAND (arg, 0),
+ TREE_OPERAND (arg, 1));
}
switch (code)
return constant_boolean_node (integer_zerop (arg), type);
case TRUTH_AND_EXPR:
- loc1 = EXPR_LOCATION (TREE_OPERAND (arg, 0));
- loc2 = EXPR_LOCATION (TREE_OPERAND (arg, 1));
- if (loc1 == UNKNOWN_LOCATION)
- loc1 = loc;
- if (loc2 == UNKNOWN_LOCATION)
- loc2 = loc;
- t = build2 (TRUTH_OR_EXPR, type,
- invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)),
- invert_truthvalue_loc (loc2, TREE_OPERAND (arg, 1)));
- break;
+ loc1 = expr_location_or (TREE_OPERAND (arg, 0), loc);
+ loc2 = expr_location_or (TREE_OPERAND (arg, 1), loc);
+ return build2_loc (loc, TRUTH_OR_EXPR, type,
+ invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)),
+ invert_truthvalue_loc (loc2, TREE_OPERAND (arg, 1)));
case TRUTH_OR_EXPR:
- loc1 = EXPR_LOCATION (TREE_OPERAND (arg, 0));
- loc2 = EXPR_LOCATION (TREE_OPERAND (arg, 1));
- if (loc1 == UNKNOWN_LOCATION)
- loc1 = loc;
- if (loc2 == UNKNOWN_LOCATION)
- loc2 = loc;
- t = build2 (TRUTH_AND_EXPR, type,
- invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)),
- invert_truthvalue_loc (loc2, TREE_OPERAND (arg, 1)));
- break;
+ loc1 = expr_location_or (TREE_OPERAND (arg, 0), loc);
+ loc2 = expr_location_or (TREE_OPERAND (arg, 1), loc);
+ return build2_loc (loc, TRUTH_AND_EXPR, type,
+ invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)),
+ invert_truthvalue_loc (loc2, TREE_OPERAND (arg, 1)));
case TRUTH_XOR_EXPR:
/* Here we can invert either operand. We invert the first operand
negation of the second operand. */
if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
- t = build2 (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
- TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
+ return build2_loc (loc, TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
+ TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
else
- t = build2 (TRUTH_XOR_EXPR, type,
- invert_truthvalue_loc (loc, TREE_OPERAND (arg, 0)),
- TREE_OPERAND (arg, 1));
- break;
+ return build2_loc (loc, TRUTH_XOR_EXPR, type,
+ invert_truthvalue_loc (loc, TREE_OPERAND (arg, 0)),
+ TREE_OPERAND (arg, 1));
case TRUTH_ANDIF_EXPR:
- loc1 = EXPR_LOCATION (TREE_OPERAND (arg, 0));
- loc2 = EXPR_LOCATION (TREE_OPERAND (arg, 1));
- if (loc1 == UNKNOWN_LOCATION)
- loc1 = loc;
- if (loc2 == UNKNOWN_LOCATION)
- loc2 = loc;
- t = build2 (TRUTH_ORIF_EXPR, type,
- invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)),
- invert_truthvalue_loc (loc2, TREE_OPERAND (arg, 1)));
- break;
+ loc1 = expr_location_or (TREE_OPERAND (arg, 0), loc);
+ loc2 = expr_location_or (TREE_OPERAND (arg, 1), loc);
+ return build2_loc (loc, TRUTH_ORIF_EXPR, type,
+ invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)),
+ invert_truthvalue_loc (loc2, TREE_OPERAND (arg, 1)));
case TRUTH_ORIF_EXPR:
- loc1 = EXPR_LOCATION (TREE_OPERAND (arg, 0));
- loc2 = EXPR_LOCATION (TREE_OPERAND (arg, 1));
- if (loc1 == UNKNOWN_LOCATION)
- loc1 = loc;
- if (loc2 == UNKNOWN_LOCATION)
- loc2 = loc;
- t = build2 (TRUTH_ANDIF_EXPR, type,
- invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)),
- invert_truthvalue_loc (loc2, TREE_OPERAND (arg, 1)));
- break;
+ loc1 = expr_location_or (TREE_OPERAND (arg, 0), loc);
+ loc2 = expr_location_or (TREE_OPERAND (arg, 1), loc);
+ return build2_loc (loc, TRUTH_ANDIF_EXPR, type,
+ invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)),
+ invert_truthvalue_loc (loc2, TREE_OPERAND (arg, 1)));
case TRUTH_NOT_EXPR:
return TREE_OPERAND (arg, 0);
tree arg1 = TREE_OPERAND (arg, 1);
tree arg2 = TREE_OPERAND (arg, 2);
- loc1 = EXPR_LOCATION (TREE_OPERAND (arg, 1));
- loc2 = EXPR_LOCATION (TREE_OPERAND (arg, 2));
- if (loc1 == UNKNOWN_LOCATION)
- loc1 = loc;
- if (loc2 == UNKNOWN_LOCATION)
- loc2 = loc;
+ loc1 = expr_location_or (TREE_OPERAND (arg, 1), loc);
+ loc2 = expr_location_or (TREE_OPERAND (arg, 2), loc);
/* A COND_EXPR may have a throw as one operand, which
then has void type. Just leave void operands
as they are. */
- t = build3 (COND_EXPR, type, TREE_OPERAND (arg, 0),
- VOID_TYPE_P (TREE_TYPE (arg1))
- ? arg1 : invert_truthvalue_loc (loc1, arg1),
- VOID_TYPE_P (TREE_TYPE (arg2))
- ? arg2 : invert_truthvalue_loc (loc2, arg2));
- break;
+ return build3_loc (loc, COND_EXPR, type, TREE_OPERAND (arg, 0),
+ VOID_TYPE_P (TREE_TYPE (arg1))
+ ? arg1 : invert_truthvalue_loc (loc1, arg1),
+ VOID_TYPE_P (TREE_TYPE (arg2))
+ ? arg2 : invert_truthvalue_loc (loc2, arg2));
}
case COMPOUND_EXPR:
- loc1 = EXPR_LOCATION (TREE_OPERAND (arg, 1));
- if (loc1 == UNKNOWN_LOCATION)
- loc1 = loc;
- t = build2 (COMPOUND_EXPR, type,
- TREE_OPERAND (arg, 0),
- invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 1)));
- break;
+ loc1 = expr_location_or (TREE_OPERAND (arg, 1), loc);
+ return build2_loc (loc, COMPOUND_EXPR, type,
+ TREE_OPERAND (arg, 0),
+ invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 1)));
case NON_LVALUE_EXPR:
- loc1 = EXPR_LOCATION (TREE_OPERAND (arg, 0));
- if (loc1 == UNKNOWN_LOCATION)
- loc1 = loc;
+ loc1 = expr_location_or (TREE_OPERAND (arg, 0), loc);
return invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0));
CASE_CONVERT:
if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
- {
- t = build1 (TRUTH_NOT_EXPR, type, arg);
- break;
- }
+ return build1_loc (loc, TRUTH_NOT_EXPR, type, arg);
/* ... fall through ... */
case FLOAT_EXPR:
- loc1 = EXPR_LOCATION (TREE_OPERAND (arg, 0));
- if (loc1 == UNKNOWN_LOCATION)
- loc1 = loc;
- t = build1 (TREE_CODE (arg), type,
- invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)));
- break;
+ loc1 = expr_location_or (TREE_OPERAND (arg, 0), loc);
+ return build1_loc (loc, TREE_CODE (arg), type,
+ invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)));
case BIT_AND_EXPR:
if (!integer_onep (TREE_OPERAND (arg, 1)))
return NULL_TREE;
- t = build2 (EQ_EXPR, type, arg, build_int_cst (type, 0));
- break;
+ return build2_loc (loc, EQ_EXPR, type, arg, build_int_cst (type, 0));
case SAVE_EXPR:
- t = build1 (TRUTH_NOT_EXPR, type, arg);
- break;
+ return build1_loc (loc, TRUTH_NOT_EXPR, type, arg);
case CLEANUP_POINT_EXPR:
- loc1 = EXPR_LOCATION (TREE_OPERAND (arg, 0));
- if (loc1 == UNKNOWN_LOCATION)
- loc1 = loc;
- t = build1 (CLEANUP_POINT_EXPR, type,
- invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)));
- break;
+ loc1 = expr_location_or (TREE_OPERAND (arg, 0), loc);
+ return build1_loc (loc, CLEANUP_POINT_EXPR, type,
+ invert_truthvalue_loc (loc1, TREE_OPERAND (arg, 0)));
default:
- t = NULL_TREE;
- break;
+ return NULL_TREE;
}
-
- if (t)
- SET_EXPR_LOCATION (t, loc);
-
- return t;
}
/* Return a simplified tree node for the truth-negation of ARG. This
tem = fold_truth_not_expr (loc, arg);
if (!tem)
- {
- tem = build1 (TRUTH_NOT_EXPR, TREE_TYPE (arg), arg);
- SET_EXPR_LOCATION (tem, loc);
- }
+ tem = build1_loc (loc, TRUTH_NOT_EXPR, TREE_TYPE (arg), arg);
return tem;
}
tree size = TYPE_SIZE (TREE_TYPE (inner));
if ((INTEGRAL_TYPE_P (TREE_TYPE (inner))
|| POINTER_TYPE_P (TREE_TYPE (inner)))
- && host_integerp (size, 0)
+ && host_integerp (size, 0)
&& tree_low_cst (size, 0) == bitsize)
return fold_convert_loc (loc, type, inner);
}
|| TYPE_UNSIGNED (bftype) == !unsignedp)
bftype = build_nonstandard_integer_type (bitsize, 0);
- result = build3 (BIT_FIELD_REF, bftype, inner,
- size_int (bitsize), bitsize_int (bitpos));
- SET_EXPR_LOCATION (result, loc);
+ result = build3_loc (loc, BIT_FIELD_REF, bftype, inner,
+ size_int (bitsize), bitsize_int (bitpos));
if (bftype != type)
result = fold_convert_loc (loc, type, result);
/* See if we can find a mode to refer to this field. We should be able to,
but fail if we can't. */
- nmode = get_best_mode (lbitsize, lbitpos,
- const_p ? TYPE_ALIGN (TREE_TYPE (linner))
- : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
- TYPE_ALIGN (TREE_TYPE (rinner))),
- word_mode, lvolatilep || rvolatilep);
+ if (lvolatilep
+ && GET_MODE_BITSIZE (lmode) > 0
+ && flag_strict_volatile_bitfields > 0)
+ nmode = lmode;
+ else
+ nmode = get_best_mode (lbitsize, lbitpos, 0, 0,
+ const_p ? TYPE_ALIGN (TREE_TYPE (linner))
+ : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
+ TYPE_ALIGN (TREE_TYPE (rinner))),
+ word_mode, lvolatilep || rvolatilep);
if (nmode == VOIDmode)
return 0;
/* Make the mask to be used against the extracted field. */
mask = build_int_cst_type (unsigned_type, -1);
- mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize), 0);
+ mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize));
mask = const_binop (RSHIFT_EXPR, mask,
- size_int (nbitsize - lbitsize - lbitpos), 0);
+ size_int (nbitsize - lbitsize - lbitpos));
if (! const_p)
/* If not comparing with constant, just rework the comparison
if (! integer_zerop (const_binop (RSHIFT_EXPR,
fold_convert_loc (loc,
unsigned_type, rhs),
- size_int (lbitsize), 0)))
+ size_int (lbitsize))))
{
warning (0, "comparison is always %d due to width of bit-field",
code == NE_EXPR);
{
tree tem = const_binop (RSHIFT_EXPR,
fold_convert_loc (loc, signed_type, rhs),
- size_int (lbitsize - 1), 0);
+ size_int (lbitsize - 1));
if (! integer_zerop (tem) && ! integer_all_onesp (tem))
{
warning (0, "comparison is always %d due to width of bit-field",
rhs = const_binop (BIT_AND_EXPR,
const_binop (LSHIFT_EXPR,
fold_convert_loc (loc, unsigned_type, rhs),
- size_int (lbitpos), 0),
- mask, 0);
+ size_int (lbitpos)),
+ mask);
- lhs = build2 (code, compare_type,
- build2 (BIT_AND_EXPR, unsigned_type, lhs, mask),
- rhs);
- SET_EXPR_LOCATION (lhs, loc);
+ lhs = build2_loc (loc, code, compare_type,
+ build2 (BIT_AND_EXPR, unsigned_type, lhs, mask), rhs);
return lhs;
}
\f
mask = build_int_cst_type (unsigned_type, -1);
- mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
- mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
+ mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize));
+ mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize));
/* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
if (and_mask != 0)
tree_int_cst_equal (mask,
const_binop (RSHIFT_EXPR,
const_binop (LSHIFT_EXPR, tmask,
- size_int (precision - size),
- 0),
- size_int (precision - size), 0));
+ size_int (precision - size)),
+ size_int (precision - size)));
}
/* Subroutine for fold: determine if VAL is the INTEGER_CONST that
n_high = range_binop (MINUS_EXPR, exp_type,
build_int_cst (exp_type, 0),
0, low, 0);
- low = n_low, high = n_high;
- exp = arg0;
- continue;
+ if (n_high != 0 && TREE_OVERFLOW (n_high))
+ break;
+ goto normalize;
case BIT_NOT_EXPR:
/* ~ X -> -X - 1 */
- exp = build2 (MINUS_EXPR, exp_type, negate_expr (arg0),
- build_int_cst (exp_type, 1));
- SET_EXPR_LOCATION (exp, loc);
+ exp = build2_loc (loc, MINUS_EXPR, exp_type, negate_expr (arg0),
+ build_int_cst (exp_type, 1));
continue;
case PLUS_EXPR: case MINUS_EXPR:
if (TYPE_OVERFLOW_UNDEFINED (arg0_type))
*strict_overflow_p = true;
+ normalize:
/* Check for an unsigned range which has wrapped around the maximum
value thus making n_high < n_low, and normalize it. */
if (n_low && n_high && tree_int_cst_lt (n_high, n_low))
low = fold_convert_loc (loc, etype, low);
exp = fold_convert_loc (loc, etype, exp);
- value = const_binop (MINUS_EXPR, high, low, 0);
+ value = const_binop (MINUS_EXPR, high, low);
if (POINTER_TYPE_P (etype))
{
if (value != 0 && !TREE_OVERFLOW (value))
{
- low = fold_convert_loc (loc, sizetype, low);
- low = fold_build1_loc (loc, NEGATE_EXPR, sizetype, low);
+ low = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (low), low);
return build_range_check (loc, type,
- fold_build2_loc (loc, POINTER_PLUS_EXPR,
- etype, exp, low),
+ fold_build_pointer_plus_loc (loc, exp, low),
1, build_int_cst (etype, 0), value);
}
return 0;
OEP_ONLY_CONST)
&& operand_equal_p (arg01,
const_binop (PLUS_EXPR, arg2,
- build_int_cst (type, 1), 0),
+ build_int_cst (type, 1)),
OEP_ONLY_CONST))
{
tem = fold_build2_loc (loc, MIN_EXPR, TREE_TYPE (arg00), arg00,
fold_convert_loc (loc, TREE_TYPE (arg00),
arg2));
- return pedantic_non_lvalue_loc (loc,
+ return pedantic_non_lvalue_loc (loc,
fold_convert_loc (loc, type, tem));
}
break;
OEP_ONLY_CONST)
&& operand_equal_p (arg01,
const_binop (MINUS_EXPR, arg2,
- build_int_cst (type, 1), 0),
+ build_int_cst (type, 1)),
OEP_ONLY_CONST))
{
tem = fold_build2_loc (loc, MIN_EXPR, TREE_TYPE (arg00), arg00,
OEP_ONLY_CONST)
&& operand_equal_p (arg01,
const_binop (MINUS_EXPR, arg2,
- build_int_cst (type, 1), 0),
+ build_int_cst (type, 1)),
OEP_ONLY_CONST))
{
tem = fold_build2_loc (loc, MAX_EXPR, TREE_TYPE (arg00), arg00,
OEP_ONLY_CONST)
&& operand_equal_p (arg01,
const_binop (PLUS_EXPR, arg2,
- build_int_cst (type, 1), 0),
+ build_int_cst (type, 1)),
OEP_ONLY_CONST))
{
tem = fold_build2_loc (loc, MAX_EXPR, TREE_TYPE (arg00), arg00,
if ((lhs == 0 || rhs == 0 || operand_equal_p (lhs, rhs, 0))
&& merge_ranges (&in_p, &low, &high, in0_p, low0, high0,
in1_p, low1, high1)
- && 0 != (tem = (build_range_check (UNKNOWN_LOCATION, type,
+ && 0 != (tem = (build_range_check (loc, type,
lhs != 0 ? lhs
: rhs != 0 ? rhs : integer_zero_node,
in_p, low, high))))
unless we are at top level or LHS contains a PLACEHOLDER_EXPR, in
which cases we can't do this. */
if (simple_operand_p (lhs))
- {
- tem = build2 (code == TRUTH_ANDIF_EXPR
- ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
- type, op0, op1);
- SET_EXPR_LOCATION (tem, loc);
- return tem;
- }
+ return build2_loc (loc, code == TRUTH_ANDIF_EXPR
+ ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
+ type, op0, op1);
- else if (lang_hooks.decls.global_bindings_p () == 0
- && ! CONTAINS_PLACEHOLDER_P (lhs))
+ else if (!lang_hooks.decls.global_bindings_p ()
+ && !CONTAINS_PLACEHOLDER_P (lhs))
{
tree common = save_expr (lhs);
if (strict_overflow_p)
fold_overflow_warning (warnmsg,
WARN_STRICT_OVERFLOW_COMPARISON);
- tem = build2 (code == TRUTH_ANDIF_EXPR
- ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
- type, lhs, rhs);
- SET_EXPR_LOCATION (tem, loc);
- return tem;
+ return build2_loc (loc, code == TRUTH_ANDIF_EXPR
+ ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
+ type, lhs, rhs);
}
}
}
/* We work by getting just the sign bit into the low-order bit, then
into the high-order bit, then sign-extend. We then XOR that value
with C. */
- temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
- temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
+ temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1));
+ temp = const_binop (BIT_AND_EXPR, temp, size_int (1));
/* We must use a signed type in order to get an arithmetic right shift.
However, we must also avoid introducing accidental overflows, so that
if (TYPE_UNSIGNED (type))
temp = fold_convert (signed_type_for (type), temp);
- temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
- temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
+ temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1));
+ temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1));
if (mask != 0)
temp = const_binop (BIT_AND_EXPR, temp,
- fold_convert (TREE_TYPE (c), mask),
- 0);
+ fold_convert (TREE_TYPE (c), mask));
/* If necessary, convert the type back to match the type of C. */
if (TYPE_UNSIGNED (type))
temp = fold_convert (type, temp);
- return fold_convert (type,
- const_binop (BIT_XOR_EXPR, c, temp, 0));
+ return fold_convert (type, const_binop (BIT_XOR_EXPR, c, temp));
}
\f
+/* For an expression that has the form
+ (A && B) || ~B
+ or
+ (A || B) && ~B,
+ we can drop one of the inner expressions and simplify to
+ A || ~B
+ or
+ A && ~B
+ LOC is the location of the resulting expression. OP is the inner
+ logical operation; the left-hand side in the examples above, while CMPOP
+ is the right-hand side. RHS_ONLY is used to prevent us from accidentally
+ removing a condition that guards another, as in
+ (A != NULL && A->...) || A == NULL
+ which we must not transform. If RHS_ONLY is true, only eliminate the
+ right-most operand of the inner logical operation. */
+
+static tree
+merge_truthop_with_opposite_arm (location_t loc, tree op, tree cmpop,
+ bool rhs_only)
+{
+ tree type = TREE_TYPE (cmpop);
+ enum tree_code code = TREE_CODE (cmpop);
+ enum tree_code truthop_code = TREE_CODE (op);
+ tree lhs = TREE_OPERAND (op, 0);
+ tree rhs = TREE_OPERAND (op, 1);
+ tree orig_lhs = lhs, orig_rhs = rhs;
+ enum tree_code rhs_code = TREE_CODE (rhs);
+ enum tree_code lhs_code = TREE_CODE (lhs);
+ enum tree_code inv_code;
+
+ if (TREE_SIDE_EFFECTS (op) || TREE_SIDE_EFFECTS (cmpop))
+ return NULL_TREE;
+
+ if (TREE_CODE_CLASS (code) != tcc_comparison)
+ return NULL_TREE;
+
+ if (rhs_code == truthop_code)
+ {
+ tree newrhs = merge_truthop_with_opposite_arm (loc, rhs, cmpop, rhs_only);
+ if (newrhs != NULL_TREE)
+ {
+ rhs = newrhs;
+ rhs_code = TREE_CODE (rhs);
+ }
+ }
+ if (lhs_code == truthop_code && !rhs_only)
+ {
+ tree newlhs = merge_truthop_with_opposite_arm (loc, lhs, cmpop, false);
+ if (newlhs != NULL_TREE)
+ {
+ lhs = newlhs;
+ lhs_code = TREE_CODE (lhs);
+ }
+ }
+
+ inv_code = invert_tree_comparison (code, HONOR_NANS (TYPE_MODE (type)));
+ if (inv_code == rhs_code
+ && operand_equal_p (TREE_OPERAND (rhs, 0), TREE_OPERAND (cmpop, 0), 0)
+ && operand_equal_p (TREE_OPERAND (rhs, 1), TREE_OPERAND (cmpop, 1), 0))
+ return lhs;
+ if (!rhs_only && inv_code == lhs_code
+ && operand_equal_p (TREE_OPERAND (lhs, 0), TREE_OPERAND (cmpop, 0), 0)
+ && operand_equal_p (TREE_OPERAND (lhs, 1), TREE_OPERAND (cmpop, 1), 0))
+ return rhs;
+ if (rhs != orig_rhs || lhs != orig_lhs)
+ return fold_build2_loc (loc, truthop_code, TREE_TYPE (cmpop),
+ lhs, rhs);
+ return NULL_TREE;
+}
+
/* Find ways of folding logical expressions of LHS and RHS:
Try to merge two comparisons to the same innermost item.
Look for range tests like "ch >= '0' && ch <= '9'".
if (simple_operand_p (ll_arg)
&& simple_operand_p (lr_arg))
{
- tree result;
if (operand_equal_p (ll_arg, rl_arg, 0)
&& operand_equal_p (lr_arg, rr_arg, 0))
{
&& rcode == NE_EXPR && integer_zerop (rr_arg)
&& TREE_TYPE (ll_arg) == TREE_TYPE (rl_arg)
&& INTEGRAL_TYPE_P (TREE_TYPE (ll_arg)))
- {
- result = build2 (NE_EXPR, truth_type,
+ return build2_loc (loc, NE_EXPR, truth_type,
build2 (BIT_IOR_EXPR, TREE_TYPE (ll_arg),
ll_arg, rl_arg),
build_int_cst (TREE_TYPE (ll_arg), 0));
- goto fold_truthop_exit;
- }
/* Convert (a == 0) && (b == 0) into (a | b) == 0. */
if (code == TRUTH_AND_EXPR
&& rcode == EQ_EXPR && integer_zerop (rr_arg)
&& TREE_TYPE (ll_arg) == TREE_TYPE (rl_arg)
&& INTEGRAL_TYPE_P (TREE_TYPE (ll_arg)))
- {
- result = build2 (EQ_EXPR, truth_type,
+ return build2_loc (loc, EQ_EXPR, truth_type,
build2 (BIT_IOR_EXPR, TREE_TYPE (ll_arg),
ll_arg, rl_arg),
build_int_cst (TREE_TYPE (ll_arg), 0));
- goto fold_truthop_exit;
- }
if (LOGICAL_OP_NON_SHORT_CIRCUIT)
{
if (code != orig_code || lhs != orig_lhs || rhs != orig_rhs)
- {
- result = build2 (code, truth_type, lhs, rhs);
- goto fold_truthop_exit;
- }
+ return build2_loc (loc, code, truth_type, lhs, rhs);
return NULL_TREE;
}
}
to be relative to a field of that size. */
first_bit = MIN (ll_bitpos, rl_bitpos);
end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
- lnmode = get_best_mode (end_bit - first_bit, first_bit,
+ lnmode = get_best_mode (end_bit - first_bit, first_bit, 0, 0,
TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
volatilep);
if (lnmode == VOIDmode)
}
ll_mask = const_binop (LSHIFT_EXPR, fold_convert_loc (loc, lntype, ll_mask),
- size_int (xll_bitpos), 0);
+ size_int (xll_bitpos));
rl_mask = const_binop (LSHIFT_EXPR, fold_convert_loc (loc, lntype, rl_mask),
- size_int (xrl_bitpos), 0);
+ size_int (xrl_bitpos));
if (l_const)
{
l_const = fold_convert_loc (loc, lntype, l_const);
l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask);
- l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
+ l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos));
if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
fold_build1_loc (loc, BIT_NOT_EXPR,
- lntype, ll_mask),
- 0)))
+ lntype, ll_mask))))
{
warning (0, "comparison is always %d", wanted_code == NE_EXPR);
{
r_const = fold_convert_loc (loc, lntype, r_const);
r_const = unextend (r_const, rl_bitsize, rl_unsignedp, rl_and_mask);
- r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
+ r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos));
if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
fold_build1_loc (loc, BIT_NOT_EXPR,
- lntype, rl_mask),
- 0)))
+ lntype, rl_mask))))
{
warning (0, "comparison is always %d", wanted_code == NE_EXPR);
first_bit = MIN (lr_bitpos, rr_bitpos);
end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
- rnmode = get_best_mode (end_bit - first_bit, first_bit,
+ rnmode = get_best_mode (end_bit - first_bit, first_bit, 0, 0,
TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
volatilep);
if (rnmode == VOIDmode)
lr_mask = const_binop (LSHIFT_EXPR, fold_convert_loc (loc,
rntype, lr_mask),
- size_int (xlr_bitpos), 0);
+ size_int (xlr_bitpos));
rr_mask = const_binop (LSHIFT_EXPR, fold_convert_loc (loc,
rntype, rr_mask),
- size_int (xrr_bitpos), 0);
+ size_int (xrr_bitpos));
/* Make a mask that corresponds to both fields being compared.
Do this for both items being compared. If the operands are the
same size and the bits being compared are in the same position
then we can do this by masking both and comparing the masked
results. */
- ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
- lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask);
+ lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask);
if (lnbitsize == rnbitsize && xll_bitpos == xlr_bitpos)
{
lhs = make_bit_field_ref (loc, ll_inner, lntype, lnbitsize, lnbitpos,
if (! all_ones_mask_p (lr_mask, rnbitsize))
rhs = build2 (BIT_AND_EXPR, rntype, rhs, lr_mask);
- result = build2 (wanted_code, truth_type, lhs, rhs);
- goto fold_truthop_exit;
+ return build2_loc (loc, wanted_code, truth_type, lhs, rhs);
}
/* There is still another way we can do something: If both pairs of
MIN (lr_bitpos, rr_bitpos), lr_unsignedp);
ll_mask = const_binop (RSHIFT_EXPR, ll_mask,
- size_int (MIN (xll_bitpos, xrl_bitpos)), 0);
+ size_int (MIN (xll_bitpos, xrl_bitpos)));
lr_mask = const_binop (RSHIFT_EXPR, lr_mask,
- size_int (MIN (xlr_bitpos, xrr_bitpos)), 0);
+ size_int (MIN (xlr_bitpos, xrr_bitpos)));
/* Convert to the smaller type before masking out unwanted bits. */
type = lntype;
if (! all_ones_mask_p (lr_mask, lr_bitsize + rr_bitsize))
rhs = build2 (BIT_AND_EXPR, type, rhs, lr_mask);
- result = build2 (wanted_code, truth_type, lhs, rhs);
- goto fold_truthop_exit;
+ return build2_loc (loc, wanted_code, truth_type, lhs, rhs);
}
return 0;
common between the masks, those bits of the constants must be the same.
If not, the condition is always false. Test for this to avoid generating
incorrect code below. */
- result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
+ result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask);
if (! integer_zerop (result)
- && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
- const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
+ && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const),
+ const_binop (BIT_AND_EXPR, result, r_const)) != 1)
{
if (wanted_code == NE_EXPR)
{
result = make_bit_field_ref (loc, ll_inner, lntype, lnbitsize, lnbitpos,
ll_unsignedp || rl_unsignedp);
- ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
+ ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask);
if (! all_ones_mask_p (ll_mask, lnbitsize))
- {
- result = build2 (BIT_AND_EXPR, lntype, result, ll_mask);
- SET_EXPR_LOCATION (result, loc);
- }
-
- result = build2 (wanted_code, truth_type, result,
- const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
+ result = build2_loc (loc, BIT_AND_EXPR, lntype, result, ll_mask);
- fold_truthop_exit:
- SET_EXPR_LOCATION (result, loc);
- return result;
+ return build2_loc (loc, wanted_code, truth_type, result,
+ const_binop (BIT_IOR_EXPR, l_const, r_const));
}
\f
/* Optimize T, which is a comparison of a MIN_EXPR or MAX_EXPR with a
/* For a constant, we can always simplify if we are a multiply
or (for divide and modulus) if it is a multiple of our constant. */
if (code == MULT_EXPR
- || integer_zerop (const_binop (TRUNC_MOD_EXPR, t, c, 0)))
+ || integer_zerop (const_binop (TRUNC_MOD_EXPR, t, c)))
return const_binop (code, fold_convert (ctype, t),
- fold_convert (ctype, c), 0);
+ fold_convert (ctype, c));
break;
CASE_CONVERT: case NON_LVALUE_EXPR:
&& 0 != (t1 = fold_convert (ctype,
const_binop (LSHIFT_EXPR,
size_one_node,
- op1, 0)))
+ op1)))
&& !TREE_OVERFLOW (t1))
return extract_muldiv (build2 (tcode == LSHIFT_EXPR
? MULT_EXPR : FLOOR_DIV_EXPR,
/* If this was a subtraction, negate OP1 and set it to be an addition.
This simplifies the logic below. */
if (tcode == MINUS_EXPR)
- tcode = PLUS_EXPR, op1 = negate_expr (op1);
+ {
+ tcode = PLUS_EXPR, op1 = negate_expr (op1);
+ /* If OP1 was not easily negatable, the constant may be OP0. */
+ if (TREE_CODE (op0) == INTEGER_CST)
+ {
+ tree tem = op0;
+ op0 = op1;
+ op1 = tem;
+ tem = t1;
+ t1 = t2;
+ t2 = tem;
+ }
+ }
if (TREE_CODE (op1) != INTEGER_CST)
break;
/* If it's a multiply or a division/modulus operation of a multiple
of our constant, do the operation and verify it doesn't overflow. */
if (code == MULT_EXPR
- || integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
+ || integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c)))
{
op1 = const_binop (code, fold_convert (ctype, op1),
- fold_convert (ctype, c), 0);
+ fold_convert (ctype, c));
/* We allow the constant to overflow with wrapping semantics. */
if (op1 == 0
|| (TREE_OVERFLOW (op1) && !TYPE_OVERFLOW_WRAPS (ctype)))
|| (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (TREE_TYPE (t))))
&& TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST
- && integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
+ && integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c)))
{
*strict_overflow_p = true;
return omit_one_operand (type, integer_zero_node, op0);
/* If these are the same operation types, we can associate them
assuming no overflow. */
- if (tcode == code
- && 0 != (t1 = int_const_binop (MULT_EXPR,
- fold_convert (ctype, op1),
- fold_convert (ctype, c), 1))
- && 0 != (t1 = force_fit_type_double (ctype, TREE_INT_CST_LOW (t1),
- TREE_INT_CST_HIGH (t1),
- (TYPE_UNSIGNED (ctype)
- && tcode != MULT_EXPR) ? -1 : 1,
- TREE_OVERFLOW (t1)))
- && !TREE_OVERFLOW (t1))
- return fold_build2 (tcode, ctype, fold_convert (ctype, op0), t1);
+ if (tcode == code)
+ {
+ double_int mul;
+ int overflow_p;
+ mul = double_int_mul_with_sign
+ (double_int_ext
+ (tree_to_double_int (op1),
+ TYPE_PRECISION (ctype), TYPE_UNSIGNED (ctype)),
+ double_int_ext
+ (tree_to_double_int (c),
+ TYPE_PRECISION (ctype), TYPE_UNSIGNED (ctype)),
+ false, &overflow_p);
+ overflow_p = (((!TYPE_UNSIGNED (ctype)
+ || (TREE_CODE (ctype) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (ctype)))
+ && overflow_p)
+ | TREE_OVERFLOW (c) | TREE_OVERFLOW (op1));
+ if (!double_int_fits_to_tree_p (ctype, mul)
+ && ((TYPE_UNSIGNED (ctype) && tcode != MULT_EXPR)
+ || !TYPE_UNSIGNED (ctype)
+ || (TREE_CODE (ctype) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (ctype))))
+ overflow_p = 1;
+ if (!overflow_p)
+ return fold_build2 (tcode, ctype, fold_convert (ctype, op0),
+ double_int_to_tree (ctype, mul));
+ }
/* If these operations "cancel" each other, we have the main
optimizations of this pass, which occur when either constant is a
&& code != FLOOR_MOD_EXPR && code != ROUND_MOD_EXPR
&& code != MULT_EXPR)))
{
- if (integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
+ if (integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c)))
{
if (TYPE_OVERFLOW_UNDEFINED (ctype))
*strict_overflow_p = true;
return fold_build2 (tcode, ctype, fold_convert (ctype, op0),
fold_convert (ctype,
const_binop (TRUNC_DIV_EXPR,
- op1, c, 0)));
+ op1, c)));
}
- else if (integer_zerop (const_binop (TRUNC_MOD_EXPR, c, op1, 0)))
+ else if (integer_zerop (const_binop (TRUNC_MOD_EXPR, c, op1)))
{
if (TYPE_OVERFLOW_UNDEFINED (ctype))
*strict_overflow_p = true;
return fold_build2 (code, ctype, fold_convert (ctype, op0),
fold_convert (ctype,
const_binop (TRUNC_DIV_EXPR,
- c, op1, 0)));
+ c, op1)));
}
}
break;
}
\f
/* Return a node which has the indicated constant VALUE (either 0 or
- 1), and is of the indicated TYPE. */
+ 1 for scalars or {-1,-1,..} or {0,0,...} for vectors),
+ and is of the indicated TYPE. */
tree
-constant_boolean_node (int value, tree type)
+constant_boolean_node (bool value, tree type)
{
if (type == integer_type_node)
return value ? integer_one_node : integer_zero_node;
else if (type == boolean_type_node)
return value ? boolean_true_node : boolean_false_node;
+ else if (TREE_CODE (type) == VECTOR_TYPE)
+ return build_vector_from_val (type,
+ build_int_cst (TREE_TYPE (type),
+ value ? -1 : 0));
else
- return build_int_cst (type, value);
+ return fold_convert (type, value ? integer_one_node : integer_zero_node);
}
tree lhs = NULL_TREE;
tree rhs = NULL_TREE;
- /* This transformation is only worthwhile if we don't have to wrap
- arg in a SAVE_EXPR, and the operation can be simplified on at least
- one of the branches once its pushed inside the COND_EXPR. */
- if (!TREE_CONSTANT (arg))
- return NULL_TREE;
-
if (TREE_CODE (cond) == COND_EXPR)
{
test = TREE_OPERAND (cond, 0);
false_value = constant_boolean_node (false, testtype);
}
+ /* This transformation is only worthwhile if we don't have to wrap ARG
+ in a SAVE_EXPR and the operation can be simplified on at least one
+ of the branches once its pushed inside the COND_EXPR. */
+ if (!TREE_CONSTANT (arg)
+ && (TREE_SIDE_EFFECTS (arg)
+ || TREE_CONSTANT (true_value) || TREE_CONSTANT (false_value)))
+ return NULL_TREE;
+
arg = fold_convert_loc (loc, arg_type, arg);
if (lhs == 0)
{
rhs = fold_build2_loc (loc, code, type, arg, false_value);
}
- test = fold_build3_loc (loc, COND_EXPR, type, test, lhs, rhs);
- return fold_convert_loc (loc, type, test);
+ /* Check that we have simplified at least one of the branches. */
+ if (!TREE_CONSTANT (arg) && !TREE_CONSTANT (lhs) && !TREE_CONSTANT (rhs))
+ return NULL_TREE;
+
+ return fold_build3_loc (loc, COND_EXPR, type, test, lhs, rhs);
}
\f
build_real (TREE_TYPE (arg), dconst0));
/* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
- if (lang_hooks.decls.global_bindings_p () != 0
- || CONTAINS_PLACEHOLDER_P (arg))
- return NULL_TREE;
-
arg = save_expr (arg);
return fold_build2_loc (loc, TRUTH_ANDIF_EXPR, type,
fold_build2_loc (loc, GE_EXPR, type, arg,
build_real (TREE_TYPE (arg), c2));
/* sqrt(x) < c is the same as x >= 0 && x < c*c. */
- if (lang_hooks.decls.global_bindings_p () == 0
- && ! CONTAINS_PLACEHOLDER_P (arg))
- {
- arg = save_expr (arg);
- return fold_build2_loc (loc, TRUTH_ANDIF_EXPR, type,
+ arg = save_expr (arg);
+ return fold_build2_loc (loc, TRUTH_ANDIF_EXPR, type,
fold_build2_loc (loc, GE_EXPR, type, arg,
build_real (TREE_TYPE (arg),
dconst0)),
fold_build2_loc (loc, code, type, arg,
build_real (TREE_TYPE (arg),
c2)));
- }
}
}
return omit_one_operand_loc (loc, type, integer_one_node, arg0);
/* x <= +Inf is the same as x == x, i.e. isfinite(x). */
- if (lang_hooks.decls.global_bindings_p () == 0
- && ! CONTAINS_PLACEHOLDER_P (arg0))
- {
- arg0 = save_expr (arg0);
- return fold_build2_loc (loc, EQ_EXPR, type, arg0, arg0);
- }
- break;
+ arg0 = save_expr (arg0);
+ return fold_build2_loc (loc, EQ_EXPR, type, arg0, arg0);
case EQ_EXPR:
case GE_EXPR:
tree prod, tmp, hi, lo;
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
- unsigned HOST_WIDE_INT lpart;
- HOST_WIDE_INT hpart;
+ double_int val;
bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
bool neg_overflow;
int overflow;
/* We have to do this the hard way to detect unsigned overflow.
- prod = int_const_binop (MULT_EXPR, arg01, arg1, 0); */
+ prod = int_const_binop (MULT_EXPR, arg01, arg1); */
overflow = mul_double_with_sign (TREE_INT_CST_LOW (arg01),
TREE_INT_CST_HIGH (arg01),
TREE_INT_CST_LOW (arg1),
TREE_INT_CST_HIGH (arg1),
- &lpart, &hpart, unsigned_p);
- prod = force_fit_type_double (TREE_TYPE (arg00), lpart, hpart,
- -1, overflow);
+ &val.low, &val.high, unsigned_p);
+ prod = force_fit_type_double (TREE_TYPE (arg00), val, -1, overflow);
neg_overflow = false;
if (unsigned_p)
{
tmp = int_const_binop (MINUS_EXPR, arg01,
- build_int_cst (TREE_TYPE (arg01), 1), 0);
+ build_int_cst (TREE_TYPE (arg01), 1));
lo = prod;
- /* Likewise hi = int_const_binop (PLUS_EXPR, prod, tmp, 0). */
+ /* Likewise hi = int_const_binop (PLUS_EXPR, prod, tmp). */
overflow = add_double_with_sign (TREE_INT_CST_LOW (prod),
TREE_INT_CST_HIGH (prod),
TREE_INT_CST_LOW (tmp),
TREE_INT_CST_HIGH (tmp),
- &lpart, &hpart, unsigned_p);
- hi = force_fit_type_double (TREE_TYPE (arg00), lpart, hpart,
+ &val.low, &val.high, unsigned_p);
+ hi = force_fit_type_double (TREE_TYPE (arg00), val,
-1, overflow | TREE_OVERFLOW (prod));
}
else if (tree_int_cst_sgn (arg01) >= 0)
{
tmp = int_const_binop (MINUS_EXPR, arg01,
- build_int_cst (TREE_TYPE (arg01), 1), 0);
+ build_int_cst (TREE_TYPE (arg01), 1));
switch (tree_int_cst_sgn (arg1))
{
case -1:
neg_overflow = true;
- lo = int_const_binop (MINUS_EXPR, prod, tmp, 0);
+ lo = int_const_binop (MINUS_EXPR, prod, tmp);
hi = prod;
break;
break;
case 1:
- hi = int_const_binop (PLUS_EXPR, prod, tmp, 0);
+ hi = int_const_binop (PLUS_EXPR, prod, tmp);
lo = prod;
break;
code = swap_tree_comparison (code);
tmp = int_const_binop (PLUS_EXPR, arg01,
- build_int_cst (TREE_TYPE (arg01), 1), 0);
+ build_int_cst (TREE_TYPE (arg01), 1));
switch (tree_int_cst_sgn (arg1))
{
case -1:
- hi = int_const_binop (MINUS_EXPR, prod, tmp, 0);
+ hi = int_const_binop (MINUS_EXPR, prod, tmp);
lo = prod;
break;
case 1:
neg_overflow = true;
- lo = int_const_binop (PLUS_EXPR, prod, tmp, 0);
+ lo = int_const_binop (PLUS_EXPR, prod, tmp);
hi = prod;
break;
operations as unsigned. If we must use the AND, we have a choice.
Normally unsigned is faster, but for some machines signed is. */
#ifdef LOAD_EXTEND_OP
- ops_unsigned = (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND
+ ops_unsigned = (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND
&& !flag_syntax_only) ? 0 : 1;
#else
ops_unsigned = 1;
return NULL_TREE;
if (TREE_CODE (arg1) == INTEGER_CST)
- arg1 = force_fit_type_double (inner_type, TREE_INT_CST_LOW (arg1),
- TREE_INT_CST_HIGH (arg1), 0,
- TREE_OVERFLOW (arg1));
+ arg1 = force_fit_type_double (inner_type, tree_to_double_int (arg1),
+ 0, TREE_OVERFLOW (arg1));
else
arg1 = fold_convert_loc (loc, inner_type, arg1);
STRIP_NOPS (arg0);
STRIP_NOPS (arg1);
-
+
if (TREE_CODE (arg0) == INTEGER_CST)
{
s = arg0;
{
if (TREE_CODE (ref) == ARRAY_REF)
{
+ tree domain;
+
/* Remember if this was a multi-dimensional array. */
if (TREE_CODE (TREE_OPERAND (ref, 0)) == ARRAY_REF)
mdim = true;
- itype = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (ref, 0)));
- if (! itype)
+ domain = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (ref, 0)));
+ if (! domain)
continue;
+ itype = TREE_TYPE (domain);
step = array_ref_element_size (ref);
if (TREE_CODE (step) != INTEGER_CST)
tree tmp;
if (TREE_CODE (TREE_OPERAND (ref, 1)) != INTEGER_CST
- || !INTEGRAL_TYPE_P (itype)
- || !TYPE_MAX_VALUE (itype)
- || TREE_CODE (TYPE_MAX_VALUE (itype)) != INTEGER_CST)
+ || !TYPE_MAX_VALUE (domain)
+ || TREE_CODE (TYPE_MAX_VALUE (domain)) != INTEGER_CST)
continue;
tmp = fold_binary_loc (loc, PLUS_EXPR, itype,
- fold_convert_loc (loc, itype,
- TREE_OPERAND (ref, 1)),
- fold_convert_loc (loc, itype, delta));
+ fold_convert_loc (loc, itype,
+ TREE_OPERAND (ref, 1)),
+ fold_convert_loc (loc, itype, delta));
if (!tmp
|| TREE_CODE (tmp) != INTEGER_CST
- || tree_int_cst_lt (TYPE_MAX_VALUE (itype), tmp))
+ || tree_int_cst_lt (TYPE_MAX_VALUE (domain), tmp))
continue;
}
int11 = TREE_INT_CST_LOW (arg11);
/* Move min of absolute values to int11. */
- if ((int01 >= 0 ? int01 : -int01)
- < (int11 >= 0 ? int11 : -int11))
+ if (absu_hwi (int01) < absu_hwi (int11))
{
tmp = int01, int01 = int11, int11 = tmp;
alt0 = arg00, arg00 = arg10, arg10 = alt0;
else
maybe_same = arg11;
- if (exact_log2 (abs (int11)) > 0 && int01 % int11 == 0
+ if (exact_log2 (absu_hwi (int11)) > 0 && int01 % int11 == 0
/* The remainder should not be a constant, otherwise we
end up folding i * 4 + 2 to (i * 2 + 1) * 2 which has
increased the number of multiplications necessary. */
int total_bytes = GET_MODE_SIZE (TYPE_MODE (type));
int byte, offset, word, words;
unsigned char value;
- unsigned int HOST_WIDE_INT lo = 0;
- HOST_WIDE_INT hi = 0;
+ double_int result;
if (total_bytes > len)
return NULL_TREE;
if (total_bytes * BITS_PER_UNIT > 2 * HOST_BITS_PER_WIDE_INT)
return NULL_TREE;
+
+ result = double_int_zero;
words = total_bytes / UNITS_PER_WORD;
for (byte = 0; byte < total_bytes; byte++)
value = ptr[offset];
if (bitpos < HOST_BITS_PER_WIDE_INT)
- lo |= (unsigned HOST_WIDE_INT) value << bitpos;
+ result.low |= (unsigned HOST_WIDE_INT) value << bitpos;
else
- hi |= (unsigned HOST_WIDE_INT) value
- << (bitpos - HOST_BITS_PER_WIDE_INT);
+ result.high |= (unsigned HOST_WIDE_INT) value
+ << (bitpos - HOST_BITS_PER_WIDE_INT);
}
- return build_int_cst_wide_type (type, lo, hi);
+ return double_int_to_tree (type, result);
}
if (TREE_CODE (t) == WITH_SIZE_EXPR)
t = TREE_OPERAND (t, 0);
- /* Note: doesn't apply to ALIGN_INDIRECT_REF */
- if (TREE_CODE (t) == INDIRECT_REF
- || TREE_CODE (t) == MISALIGNED_INDIRECT_REF)
+ if (TREE_CODE (t) == INDIRECT_REF)
{
t = TREE_OPERAND (t, 0);
if (TREE_TYPE (t) != ptrtype)
- {
- t = build1 (NOP_EXPR, ptrtype, t);
- SET_EXPR_LOCATION (t, loc);
- }
+ t = build1_loc (loc, NOP_EXPR, ptrtype, t);
}
+ else if (TREE_CODE (t) == MEM_REF
+ && integer_zerop (TREE_OPERAND (t, 1)))
+ return TREE_OPERAND (t, 0);
else if (TREE_CODE (t) == VIEW_CONVERT_EXPR)
{
t = build_fold_addr_expr_loc (loc, TREE_OPERAND (t, 0));
t = fold_convert_loc (loc, ptrtype, t);
}
else
- {
- t = build1 (ADDR_EXPR, ptrtype, t);
- SET_EXPR_LOCATION (t, loc);
- }
+ t = build1_loc (loc, ADDR_EXPR, ptrtype, t);
return t;
}
if (arg0)
{
if (CONVERT_EXPR_CODE_P (code)
- || code == FLOAT_EXPR || code == ABS_EXPR)
+ || code == FLOAT_EXPR || code == ABS_EXPR || code == NEGATE_EXPR)
{
/* Don't use STRIP_NOPS, because signedness of argument type
matters. */
(TREE_TYPE (TREE_OPERAND (TREE_OPERAND (tem, 1), 0))))
&& TYPE_PRECISION (TREE_TYPE (tem)) <= BITS_PER_WORD)
|| flag_syntax_only))
- {
- tem = build1 (code, type,
- build3 (COND_EXPR,
- TREE_TYPE (TREE_OPERAND
- (TREE_OPERAND (tem, 1), 0)),
- TREE_OPERAND (tem, 0),
- TREE_OPERAND (TREE_OPERAND (tem, 1), 0),
- TREE_OPERAND (TREE_OPERAND (tem, 2), 0)));
- SET_EXPR_LOCATION (tem, loc);
- }
+ tem = build1_loc (loc, code, type,
+ build3 (COND_EXPR,
+ TREE_TYPE (TREE_OPERAND
+ (TREE_OPERAND (tem, 1), 0)),
+ TREE_OPERAND (tem, 0),
+ TREE_OPERAND (TREE_OPERAND (tem, 1), 0),
+ TREE_OPERAND (TREE_OPERAND (tem, 2),
+ 0)));
return tem;
}
- else if (COMPARISON_CLASS_P (arg0))
- {
- if (TREE_CODE (type) == BOOLEAN_TYPE)
- {
- arg0 = copy_node (arg0);
- TREE_TYPE (arg0) = type;
- return arg0;
- }
- else if (TREE_CODE (type) != INTEGER_TYPE)
- return fold_build3_loc (loc, COND_EXPR, type, arg0,
- fold_build1_loc (loc, code, type,
- integer_one_node),
- fold_build1_loc (loc, code, type,
- integer_zero_node));
- }
}
switch (code)
case FIX_TRUNC_EXPR:
if (TREE_TYPE (op0) == type)
return op0;
-
- /* If we have (type) (a CMP b) and type is an integral type, return
- new expression involving the new type. */
- if (COMPARISON_CLASS_P (op0) && INTEGRAL_TYPE_P (type))
- return fold_build2_loc (loc, TREE_CODE (op0), type, TREE_OPERAND (op0, 0),
- TREE_OPERAND (op0, 1));
+
+ if (COMPARISON_CLASS_P (op0))
+ {
+ /* If we have (type) (a CMP b) and type is an integral type, return
+ new expression involving the new type. Canonicalize
+ (type) (a CMP b) to (a CMP b) ? (type) true : (type) false for
+ non-integral type.
+ Do not fold the result as that would not simplify further, also
+ folding again results in recursions. */
+ if (TREE_CODE (type) == BOOLEAN_TYPE)
+ return build2_loc (loc, TREE_CODE (op0), type,
+ TREE_OPERAND (op0, 0),
+ TREE_OPERAND (op0, 1));
+ else if (!INTEGRAL_TYPE_P (type))
+ return build3_loc (loc, COND_EXPR, type, op0,
+ constant_boolean_node (true, type),
+ constant_boolean_node (false, type));
+ }
/* Handle cases of two conversions in a row. */
if (CONVERT_EXPR_P (op0))
&mode, &unsignedp, &volatilep, false);
/* If the reference was to a (constant) zero offset, we can use
the address of the base if it has the same base type
- as the result type. */
+ as the result type and the pointer type is unqualified. */
if (! offset && bitpos == 0
- && TYPE_MAIN_VARIANT (TREE_TYPE (type))
+ && (TYPE_MAIN_VARIANT (TREE_TYPE (type))
== TYPE_MAIN_VARIANT (TREE_TYPE (base)))
+ && TYPE_QUALS (type) == TYPE_UNQUALIFIED)
return fold_convert_loc (loc, type,
build_fold_addr_expr_loc (loc, base));
}
unless assigning a bitfield. */
tem = fold_build1_loc (loc, code, type, TREE_OPERAND (op0, 1));
/* First do the assignment, then return converted constant. */
- tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem), op0, tem);
+ tem = build2_loc (loc, COMPOUND_EXPR, TREE_TYPE (tem), op0, tem);
TREE_NO_WARNING (tem) = 1;
TREE_USED (tem) = 1;
- SET_EXPR_LOCATION (tem, loc);
return tem;
}
}
if (change)
{
- tem = force_fit_type_double (type, TREE_INT_CST_LOW (and1),
- TREE_INT_CST_HIGH (and1), 0,
- TREE_OVERFLOW (and1));
+ tem = force_fit_type_double (type, tree_to_double_int (and1),
+ 0, TREE_OVERFLOW (and1));
return fold_build2_loc (loc, BIT_AND_EXPR, type,
fold_convert_loc (loc, type, and0), tem);
}
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
- return fold_build2_loc (loc,
- TREE_CODE (arg0), type,
- fold_convert_loc (loc, type, arg00),
- fold_convert_loc (loc, sizetype, arg01));
+ return fold_build_pointer_plus_loc
+ (loc, fold_convert_loc (loc, type, arg00), arg01);
}
/* Convert (T1)(~(T2)X) into ~(T1)X if T1 and T2 are integral types
tem = fold_convert_const (code, type, op0);
return tem ? tem : NULL_TREE;
+ case ADDR_SPACE_CONVERT_EXPR:
+ if (integer_zerop (arg0))
+ return fold_convert_const (code, type, arg0);
+ return NULL_TREE;
+
case FIXED_CONVERT_EXPR:
tem = fold_convert_const (code, type, arg0);
return tem ? tem : NULL_TREE;
if (TREE_CODE (op0) == VIEW_CONVERT_EXPR)
return fold_build1_loc (loc, VIEW_CONVERT_EXPR,
type, TREE_OPERAND (op0, 0));
+ if (TREE_CODE (op0) == MEM_REF)
+ return fold_build2_loc (loc, MEM_REF, type,
+ TREE_OPERAND (op0, 0), TREE_OPERAND (op0, 1));
/* For integral conversions with the same precision or pointer
conversions use a NOP_EXPR instead. */
case IMAGPART_EXPR:
if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
- return fold_convert_loc (loc, type, integer_zero_node);
+ return build_zero_cst (type);
if (TREE_CODE (arg0) == COMPLEX_EXPR)
return omit_one_operand_loc (loc, type, TREE_OPERAND (arg0, 1),
TREE_OPERAND (arg0, 0));
}
return NULL_TREE;
+ case INDIRECT_REF:
+ /* Fold *&X to X if X is an lvalue. */
+ if (TREE_CODE (op0) == ADDR_EXPR)
+ {
+ tree op00 = TREE_OPERAND (op0, 0);
+ if ((TREE_CODE (op00) == VAR_DECL
+ || TREE_CODE (op00) == PARM_DECL
+ || TREE_CODE (op00) == RESULT_DECL)
+ && !TREE_READONLY (op00))
+ return op00;
+ }
+ return NULL_TREE;
+
default:
return NULL_TREE;
} /* switch (code) */
return res;
}
+/* Fold a binary bitwise/truth expression of code CODE and type TYPE with
+ operands OP0 and OP1. LOC is the location of the resulting expression.
+ ARG0 and ARG1 are the NOP_STRIPed results of OP0 and OP1.
+ Return the folded expression if folding is successful. Otherwise,
+ return NULL_TREE. */
+static tree
+fold_truth_andor (location_t loc, enum tree_code code, tree type,
+ tree arg0, tree arg1, tree op0, tree op1)
+{
+ tree tem;
+
+ /* We only do these simplifications if we are optimizing. */
+ if (!optimize)
+ return NULL_TREE;
+
+ /* Check for things like (A || B) && (A || C). We can convert this
+ to A || (B && C). Note that either operator can be any of the four
+ truth and/or operations and the transformation will still be
+ valid. Also note that we only care about order for the
+ ANDIF and ORIF operators. If B contains side effects, this
+ might change the truth-value of A. */
+ if (TREE_CODE (arg0) == TREE_CODE (arg1)
+ && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
+ || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
+ || TREE_CODE (arg0) == TRUTH_AND_EXPR
+ || TREE_CODE (arg0) == TRUTH_OR_EXPR)
+ && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg0, 1)))
+ {
+ tree a00 = TREE_OPERAND (arg0, 0);
+ tree a01 = TREE_OPERAND (arg0, 1);
+ tree a10 = TREE_OPERAND (arg1, 0);
+ tree a11 = TREE_OPERAND (arg1, 1);
+ int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
+ || TREE_CODE (arg0) == TRUTH_AND_EXPR)
+ && (code == TRUTH_AND_EXPR
+ || code == TRUTH_OR_EXPR));
+
+ if (operand_equal_p (a00, a10, 0))
+ return fold_build2_loc (loc, TREE_CODE (arg0), type, a00,
+ fold_build2_loc (loc, code, type, a01, a11));
+ else if (commutative && operand_equal_p (a00, a11, 0))
+ return fold_build2_loc (loc, TREE_CODE (arg0), type, a00,
+ fold_build2_loc (loc, code, type, a01, a10));
+ else if (commutative && operand_equal_p (a01, a10, 0))
+ return fold_build2_loc (loc, TREE_CODE (arg0), type, a01,
+ fold_build2_loc (loc, code, type, a00, a11));
+
+ /* This case if tricky because we must either have commutative
+ operators or else A10 must not have side-effects. */
+
+ else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
+ && operand_equal_p (a01, a11, 0))
+ return fold_build2_loc (loc, TREE_CODE (arg0), type,
+ fold_build2_loc (loc, code, type, a00, a10),
+ a01);
+ }
+
+ /* See if we can build a range comparison. */
+ if (0 != (tem = fold_range_test (loc, code, type, op0, op1)))
+ return tem;
+
+ if ((code == TRUTH_ANDIF_EXPR && TREE_CODE (arg0) == TRUTH_ORIF_EXPR)
+ || (code == TRUTH_ORIF_EXPR && TREE_CODE (arg0) == TRUTH_ANDIF_EXPR))
+ {
+ tem = merge_truthop_with_opposite_arm (loc, arg0, arg1, true);
+ if (tem)
+ return fold_build2_loc (loc, code, type, tem, arg1);
+ }
+
+ if ((code == TRUTH_ANDIF_EXPR && TREE_CODE (arg1) == TRUTH_ORIF_EXPR)
+ || (code == TRUTH_ORIF_EXPR && TREE_CODE (arg1) == TRUTH_ANDIF_EXPR))
+ {
+ tem = merge_truthop_with_opposite_arm (loc, arg1, arg0, false);
+ if (tem)
+ return fold_build2_loc (loc, code, type, arg0, tem);
+ }
+
+ /* Check for the possibility of merging component references. If our
+ lhs is another similar operation, try to merge its rhs with our
+ rhs. Then try to merge our lhs and rhs. */
+ if (TREE_CODE (arg0) == code
+ && 0 != (tem = fold_truthop (loc, code, type,
+ TREE_OPERAND (arg0, 1), arg1)))
+ return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 0), tem);
+
+ if ((tem = fold_truthop (loc, code, type, arg0, arg1)) != 0)
+ return tem;
+
+ return NULL_TREE;
+}
+
/* Fold a binary expression of code CODE and type TYPE with operands
OP0 and OP1, containing either a MIN-MAX or a MAX-MIN combination.
Return the folded expression if folding is successful. Otherwise,
return NULL_TREE;
t = int_const_binop (sgn0 == -1 ? PLUS_EXPR : MINUS_EXPR,
- cst0, build_int_cst (TREE_TYPE (cst0), 1), 0);
+ cst0, build_int_cst (TREE_TYPE (cst0), 1));
if (code0 != INTEGER_CST)
t = fold_build2_loc (loc, code0, TREE_TYPE (arg0), TREE_OPERAND (arg0, 0), t);
+ t = fold_convert (TREE_TYPE (arg1), t);
/* If swapping might yield to a more canonical form, do so. */
if (swap)
&& (TREE_CODE (lhs) != INTEGER_CST
|| !TREE_OVERFLOW (lhs)))
{
- fold_overflow_warning (("assuming signed overflow does not occur "
- "when changing X +- C1 cmp C2 to "
- "X cmp C1 +- C2"),
- WARN_STRICT_OVERFLOW_COMPARISON);
+ if (code != EQ_EXPR && code != NE_EXPR)
+ fold_overflow_warning ("assuming signed overflow does not occur "
+ "when changing X +- C1 cmp C2 to "
+ "X cmp C1 +- C2",
+ WARN_STRICT_OVERFLOW_COMPARISON);
return fold_build2_loc (loc, code, type, variable, lhs);
}
}
else if (TREE_CODE (arg0) == POINTER_PLUS_EXPR)
{
base0 = TREE_OPERAND (arg0, 0);
+ STRIP_SIGN_NOPS (base0);
+ if (TREE_CODE (base0) == ADDR_EXPR)
+ {
+ base0 = TREE_OPERAND (base0, 0);
+ indirect_base0 = true;
+ }
offset0 = TREE_OPERAND (arg0, 1);
}
else if (TREE_CODE (arg1) == POINTER_PLUS_EXPR)
{
base1 = TREE_OPERAND (arg1, 0);
+ STRIP_SIGN_NOPS (base1);
+ if (TREE_CODE (base1) == ADDR_EXPR)
+ {
+ base1 = TREE_OPERAND (base1, 0);
+ indirect_base1 = true;
+ }
offset1 = TREE_OPERAND (arg1, 1);
}
+ /* A local variable can never be pointed to by
+ the default SSA name of an incoming parameter. */
+ if ((TREE_CODE (arg0) == ADDR_EXPR
+ && indirect_base0
+ && TREE_CODE (base0) == VAR_DECL
+ && auto_var_in_fn_p (base0, current_function_decl)
+ && !indirect_base1
+ && TREE_CODE (base1) == SSA_NAME
+ && TREE_CODE (SSA_NAME_VAR (base1)) == PARM_DECL
+ && SSA_NAME_IS_DEFAULT_DEF (base1))
+ || (TREE_CODE (arg1) == ADDR_EXPR
+ && indirect_base1
+ && TREE_CODE (base1) == VAR_DECL
+ && auto_var_in_fn_p (base1, current_function_decl)
+ && !indirect_base0
+ && TREE_CODE (base0) == SSA_NAME
+ && TREE_CODE (SSA_NAME_VAR (base0)) == PARM_DECL
+ && SSA_NAME_IS_DEFAULT_DEF (base0)))
+ {
+ if (code == NE_EXPR)
+ return constant_boolean_node (1, type);
+ else if (code == EQ_EXPR)
+ return constant_boolean_node (0, type);
+ }
/* If we have equivalent bases we might be able to simplify. */
- if (indirect_base0 == indirect_base1
- && operand_equal_p (base0, base1, 0))
+ else if (indirect_base0 == indirect_base1
+ && operand_equal_p (base0, base1, 0))
{
/* We can fold this expression to a constant if the non-constant
offset parts are equal. */
&& operand_equal_p (offset0, offset1, 0)))
&& (code == EQ_EXPR
|| code == NE_EXPR
+ || (indirect_base0 && DECL_P (base0))
|| POINTER_TYPE_OVERFLOW_UNDEFINED))
-
+
{
if (code != EQ_EXPR
&& code != NE_EXPR
6.5.6/8 and /9 with respect to the signed ptrdiff_t. */
else if (bitpos0 == bitpos1
&& ((code == EQ_EXPR || code == NE_EXPR)
+ || (indirect_base0 && DECL_P (base0))
|| POINTER_TYPE_OVERFLOW_UNDEFINED))
{
- tree signed_size_type_node;
- signed_size_type_node = signed_type_for (size_type_node);
-
/* By converting to signed size type we cover middle-end pointer
arithmetic which operates on unsigned pointer types of size
type size and ARRAY_REF offsets which are properly sign or
zero extended from their type in case it is narrower than
size type. */
if (offset0 == NULL_TREE)
- offset0 = build_int_cst (signed_size_type_node, 0);
+ offset0 = build_int_cst (ssizetype, 0);
else
- offset0 = fold_convert_loc (loc, signed_size_type_node,
- offset0);
+ offset0 = fold_convert_loc (loc, ssizetype, offset0);
if (offset1 == NULL_TREE)
- offset1 = build_int_cst (signed_size_type_node, 0);
+ offset1 = build_int_cst (ssizetype, 0);
else
- offset1 = fold_convert_loc (loc, signed_size_type_node,
- offset1);
+ offset1 = fold_convert_loc (loc, ssizetype, offset1);
if (code != EQ_EXPR
&& code != NE_EXPR
of lower absolute value than before. */
cst = int_const_binop (TREE_CODE (arg0) == TREE_CODE (arg1)
? MINUS_EXPR : PLUS_EXPR,
- const2, const1, 0);
+ const2, const1);
if (!TREE_OVERFLOW (cst)
&& tree_int_cst_compare (const2, cst) == tree_int_cst_sgn (const2))
{
cst = int_const_binop (TREE_CODE (arg0) == TREE_CODE (arg1)
? MINUS_EXPR : PLUS_EXPR,
- const1, const2, 0);
+ const1, const2);
if (!TREE_OVERFLOW (cst)
&& tree_int_cst_compare (const1, cst) == tree_int_cst_sgn (const1))
{
tree variable1 = TREE_OPERAND (arg0, 0);
enum tree_code cmp_code = code;
- gcc_assert (!integer_zerop (const1));
+ /* Handle unfolded multiplication by zero. */
+ if (integer_zerop (const1))
+ return fold_build2_loc (loc, cmp_code, type, const1, const2);
fold_overflow_warning (("assuming signed overflow does not occur when "
"eliminating multiplication in comparison "
return fold_build2_loc (loc, cmp_code, type, variable1, const2);
}
- tem = maybe_canonicalize_comparison (loc, code, type, op0, op1);
+ tem = maybe_canonicalize_comparison (loc, code, type, arg0, arg1);
if (tem)
return tem;
return fold_build2_loc (loc, swap_tree_comparison (code), type,
TREE_OPERAND (arg0, 0),
build_real (TREE_TYPE (arg1),
- REAL_VALUE_NEGATE (cst)));
+ real_value_negate (&cst)));
/* IEEE doesn't distinguish +0 and -0 in comparisons. */
/* a CMP (-0) -> a CMP 0 */
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == REAL_CST
&& 0 != (tem = const_binop (TREE_CODE (arg0) == PLUS_EXPR
? MINUS_EXPR : PLUS_EXPR,
- arg1, TREE_OPERAND (arg0, 1), 0))
+ arg1, TREE_OPERAND (arg0, 1)))
&& !TREE_OVERFLOW (tem))
return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 0), tem);
/* Likewise, we can simplify a comparison of a real constant with
a MINUS_EXPR whose first operand is also a real constant, i.e.
- (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
+ (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
floating-point types only if -fassociative-math is set. */
if (flag_associative_math
&& TREE_CODE (arg1) == REAL_CST
&& TREE_CODE (arg0) == MINUS_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 0)) == REAL_CST
&& 0 != (tem = const_binop (MINUS_EXPR, TREE_OPERAND (arg0, 0),
- arg1, 0))
+ arg1))
&& !TREE_OVERFLOW (tem))
return fold_build2_loc (loc, swap_tree_comparison (code), type,
TREE_OPERAND (arg0, 1), tem);
fold_build2_loc (loc, MULT_EXPR, itype, rpart, rpart),
fold_build2_loc (loc, MULT_EXPR, itype, ipart, ipart));
return fold_build2_loc (loc, COMPLEX_EXPR, type, tem,
- fold_convert_loc (loc, itype, integer_zero_node));
+ build_zero_cst (itype));
}
{
enum tree_code code;
- *residue = 0;
-
- code = TREE_CODE (expr);
- if (code == ADDR_EXPR)
- {
- expr = TREE_OPERAND (expr, 0);
- if (handled_component_p (expr))
- {
- HOST_WIDE_INT bitsize, bitpos;
- tree offset;
- enum machine_mode mode;
- int unsignedp, volatilep;
-
- expr = get_inner_reference (expr, &bitsize, &bitpos, &offset,
- &mode, &unsignedp, &volatilep, false);
- *residue = bitpos / BITS_PER_UNIT;
- if (offset)
- {
- if (TREE_CODE (offset) == INTEGER_CST)
- *residue += TREE_INT_CST_LOW (offset);
- else
- /* We don't handle more complicated offset expressions. */
- return 1;
- }
- }
+ *residue = 0;
- if (DECL_P (expr)
- && (allow_func_align || TREE_CODE (expr) != FUNCTION_DECL))
- return DECL_ALIGN_UNIT (expr);
+ code = TREE_CODE (expr);
+ if (code == ADDR_EXPR)
+ {
+ unsigned int bitalign;
+ bitalign = get_object_alignment_1 (TREE_OPERAND (expr, 0), residue);
+ *residue /= BITS_PER_UNIT;
+ return bitalign / BITS_PER_UNIT;
}
else if (code == POINTER_PLUS_EXPR)
{
tree op0, op1;
unsigned HOST_WIDE_INT modulus;
enum tree_code inner_code;
-
+
op0 = TREE_OPERAND (expr, 0);
STRIP_NOPS (op0);
modulus = get_pointer_modulus_and_residue (op0, residue,
if (TREE_CODE (op1) == INTEGER_CST)
{
unsigned HOST_WIDE_INT align;
-
+
/* Compute the greatest power-of-2 divisor of op1. */
align = TREE_INT_CST_LOW (op1);
align &= -align;
}
}
- /* If we get here, we were unable to determine anything useful about the
- expression. */
- return 1;
+ /* If we get here, we were unable to determine anything useful about the
+ expression. */
+ return 1;
}
/* Make sure type and arg0 have the same saturating flag. */
gcc_assert (TYPE_SATURATING (type)
== TYPE_SATURATING (TREE_TYPE (arg0)));
- tem = const_binop (code, arg0, arg1, 0);
+ tem = const_binop (code, arg0, arg1);
}
else if (kind == tcc_comparison)
tem = fold_relational_const (code, type, arg0, arg1);
tem = fold_build2_loc (loc, code, type,
fold_convert_loc (loc, TREE_TYPE (op0),
TREE_OPERAND (arg0, 1)), op1);
- protected_set_expr_location (tem, loc);
- tem = build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0), tem);
- goto fold_binary_exit;
+ return build2_loc (loc, COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
+ tem);
}
if (TREE_CODE (arg1) == COMPOUND_EXPR
&& reorder_operands_p (arg0, TREE_OPERAND (arg1, 0)))
tem = fold_build2_loc (loc, code, type, op0,
fold_convert_loc (loc, TREE_TYPE (op1),
TREE_OPERAND (arg1, 1)));
- protected_set_expr_location (tem, loc);
- tem = build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0), tem);
- goto fold_binary_exit;
+ return build2_loc (loc, COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
+ tem);
}
if (TREE_CODE (arg0) == COND_EXPR || COMPARISON_CLASS_P (arg0))
{
tem = fold_binary_op_with_conditional_arg (loc, code, type, op0, op1,
- arg0, arg1,
+ arg0, arg1,
/*cond_first_p=*/1);
if (tem != NULL_TREE)
return tem;
if (TREE_CODE (arg1) == COND_EXPR || COMPARISON_CLASS_P (arg1))
{
tem = fold_binary_op_with_conditional_arg (loc, code, type, op0, op1,
- arg1, arg0,
+ arg1, arg0,
/*cond_first_p=*/0);
if (tem != NULL_TREE)
return tem;
switch (code)
{
+ case MEM_REF:
+ /* MEM[&MEM[p, CST1], CST2] -> MEM[p, CST1 + CST2]. */
+ if (TREE_CODE (arg0) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 0)) == MEM_REF)
+ {
+ tree iref = TREE_OPERAND (arg0, 0);
+ return fold_build2 (MEM_REF, type,
+ TREE_OPERAND (iref, 0),
+ int_const_binop (PLUS_EXPR, arg1,
+ TREE_OPERAND (iref, 1)));
+ }
+
+ /* MEM[&a.b, CST2] -> MEM[&a, offsetof (a, b) + CST2]. */
+ if (TREE_CODE (arg0) == ADDR_EXPR
+ && handled_component_p (TREE_OPERAND (arg0, 0)))
+ {
+ tree base;
+ HOST_WIDE_INT coffset;
+ base = get_addr_base_and_unit_offset (TREE_OPERAND (arg0, 0),
+ &coffset);
+ if (!base)
+ return NULL_TREE;
+ return fold_build2 (MEM_REF, type,
+ build_fold_addr_expr (base),
+ int_const_binop (PLUS_EXPR, arg1,
+ size_int (coffset)));
+ }
+
+ return NULL_TREE;
+
case POINTER_PLUS_EXPR:
/* 0 +p index -> (type)index */
if (integer_zerop (arg0))
fold_convert_loc (loc, sizetype,
arg0)));
- /* index +p PTR -> PTR +p index */
- if (POINTER_TYPE_P (TREE_TYPE (arg1))
- && INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
- return fold_build2_loc (loc, POINTER_PLUS_EXPR, type,
- fold_convert_loc (loc, type, arg1),
- fold_convert_loc (loc, sizetype, arg0));
-
/* (PTR +p B) +p A -> PTR +p (B + A) */
if (TREE_CODE (arg0) == POINTER_PLUS_EXPR)
{
inner = fold_build2_loc (loc, PLUS_EXPR, sizetype,
arg01, fold_convert_loc (loc, sizetype, arg1));
return fold_convert_loc (loc, type,
- fold_build2_loc (loc, POINTER_PLUS_EXPR,
- TREE_TYPE (arg00),
- arg00, inner));
+ fold_build_pointer_plus_loc (loc,
+ arg00, inner));
}
/* PTR_CST +p CST -> CST1 */
&& TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
&& integer_zerop (const_binop (BIT_AND_EXPR,
TREE_OPERAND (arg0, 1),
- TREE_OPERAND (arg1, 1), 0)))
+ TREE_OPERAND (arg1, 1))))
{
code = BIT_IOR_EXPR;
goto bit_ior;
/* Reassociate (plus (plus (mult) (foo)) (mult)) as
(plus (plus (mult) (mult)) (foo)) so that we can
take advantage of the factoring cases below. */
- if (((TREE_CODE (arg0) == PLUS_EXPR
- || TREE_CODE (arg0) == MINUS_EXPR)
- && TREE_CODE (arg1) == MULT_EXPR)
- || ((TREE_CODE (arg1) == PLUS_EXPR
- || TREE_CODE (arg1) == MINUS_EXPR)
- && TREE_CODE (arg0) == MULT_EXPR))
+ if (TYPE_OVERFLOW_WRAPS (type)
+ && (((TREE_CODE (arg0) == PLUS_EXPR
+ || TREE_CODE (arg0) == MINUS_EXPR)
+ && TREE_CODE (arg1) == MULT_EXPR)
+ || ((TREE_CODE (arg1) == PLUS_EXPR
+ || TREE_CODE (arg1) == MINUS_EXPR)
+ && TREE_CODE (arg0) == MULT_EXPR)))
{
tree parg0, parg1, parg, marg;
enum tree_code pcode;
return fold_build2_loc (loc, MULT_EXPR, type, arg0,
build_real (type, dconst2));
- /* Convert a + (b*c + d*e) into (a + b*c) + d*e.
+ /* Convert a + (b*c + d*e) into (a + b*c) + d*e.
We associate floats only if the user has specified
-fassociative-math. */
if (flag_associative_math
return fold_build2_loc (loc, PLUS_EXPR, type, tree0, tree11);
}
}
- /* Convert (b*c + d*e) + a into b*c + (d*e +a).
+ /* Convert (b*c + d*e) + a into b*c + (d*e +a).
We associate floats only if the user has specified
-fassociative-math. */
if (flag_associative_math
&& ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11))
== TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
{
- tem = build2 (LROTATE_EXPR,
- TREE_TYPE (TREE_OPERAND (arg0, 0)),
- TREE_OPERAND (arg0, 0),
- code0 == LSHIFT_EXPR
- ? tree01 : tree11);
- SET_EXPR_LOCATION (tem, loc);
+ tem = build2_loc (loc, LROTATE_EXPR,
+ TREE_TYPE (TREE_OPERAND (arg0, 0)),
+ TREE_OPERAND (arg0, 0),
+ code0 == LSHIFT_EXPR ? tree01 : tree11);
return fold_convert_loc (loc, type, tem);
}
else if (code11 == MINUS_EXPR)
var1 = split_tree (arg1, code, &con1, &lit1, &minus_lit1,
code == MINUS_EXPR);
- /* With undefined overflow we can only associate constants
- with one variable. */
- if (((POINTER_TYPE_P (type) && POINTER_TYPE_OVERFLOW_UNDEFINED)
- || (INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type)))
- && var0 && var1)
+ /* Recombine MINUS_EXPR operands by using PLUS_EXPR. */
+ if (code == MINUS_EXPR)
+ code = PLUS_EXPR;
+
+ /* With undefined overflow we can only associate constants with one
+ variable, and constants whose association doesn't overflow. */
+ if ((POINTER_TYPE_P (type) && POINTER_TYPE_OVERFLOW_UNDEFINED)
+ || (INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type)))
{
- tree tmp0 = var0;
- tree tmp1 = var1;
-
- if (TREE_CODE (tmp0) == NEGATE_EXPR)
- tmp0 = TREE_OPERAND (tmp0, 0);
- if (TREE_CODE (tmp1) == NEGATE_EXPR)
- tmp1 = TREE_OPERAND (tmp1, 0);
- /* The only case we can still associate with two variables
- is if they are the same, modulo negation. */
- if (!operand_equal_p (tmp0, tmp1, 0))
- ok = false;
+ if (var0 && var1)
+ {
+ tree tmp0 = var0;
+ tree tmp1 = var1;
+
+ if (TREE_CODE (tmp0) == NEGATE_EXPR)
+ tmp0 = TREE_OPERAND (tmp0, 0);
+ if (TREE_CODE (tmp1) == NEGATE_EXPR)
+ tmp1 = TREE_OPERAND (tmp1, 0);
+ /* The only case we can still associate with two variables
+ is if they are the same, modulo negation. */
+ if (!operand_equal_p (tmp0, tmp1, 0))
+ ok = false;
+ }
+
+ if (ok && lit0 && lit1)
+ {
+ tree tmp0 = fold_convert (type, lit0);
+ tree tmp1 = fold_convert (type, lit1);
+
+ if (!TREE_OVERFLOW (tmp0) && !TREE_OVERFLOW (tmp1)
+ && TREE_OVERFLOW (fold_build2 (code, type, tmp0, tmp1)))
+ ok = false;
+ }
}
/* Only do something if we found more than two objects. Otherwise,
+ (lit0 != 0) + (lit1 != 0)
+ (minus_lit0 != 0) + (minus_lit1 != 0))))
{
- /* Recombine MINUS_EXPR operands by using PLUS_EXPR. */
- if (code == MINUS_EXPR)
- code = PLUS_EXPR;
-
var0 = associate_trees (loc, var0, var1, code, type);
con0 = associate_trees (loc, con0, con1, code, type);
lit0 = associate_trees (loc, lit0, lit1, code, type);
if ((!FLOAT_TYPE_P (type) || !HONOR_NANS (TYPE_MODE (type)))
&& operand_equal_p (arg0, arg1, 0))
- return fold_convert_loc (loc, type, integer_zero_node);
+ return build_zero_cst (type);
/* A - B -> A + (-B) if B is easily negatable. */
if (negate_expr_p (arg1)
tree diff = build2 (MINUS_EXPR, type, op0, op1);
return fold_build2_loc (loc, MULT_EXPR, type, diff,
fold_convert_loc (loc, type, esz));
-
+
}
}
&& TREE_CODE (TREE_OPERAND (arg0, 0)) == REAL_CST)
{
tree tem = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 0),
- arg1, 0);
+ arg1);
if (tem)
return fold_build2_loc (loc, RDIV_EXPR, type, tem,
TREE_OPERAND (arg0, 1));
}
/* Optimize x*x as pow(x,2.0), which is expanded as x*x. */
- if (optimize_function_for_speed_p (cfun)
+ if (!in_gimple_form
+ && optimize_function_for_speed_p (cfun)
&& operand_equal_p (arg0, arg1, 0))
{
tree powfn = mathfn_built_in (type, BUILT_IN_POW);
if (TREE_CODE (arg0) == BIT_NOT_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
{
- t1 = fold_convert_loc (loc, type, integer_zero_node);
+ t1 = build_zero_cst (type);
t1 = fold_unary_loc (loc, BIT_NOT_EXPR, type, t1);
return omit_one_operand_loc (loc, type, t1, arg1);
}
if (TREE_CODE (arg1) == BIT_NOT_EXPR
&& operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
{
- t1 = fold_convert_loc (loc, type, integer_zero_node);
+ t1 = build_zero_cst (type);
t1 = fold_unary_loc (loc, BIT_NOT_EXPR, type, t1);
return omit_one_operand_loc (loc, type, t1, arg0);
}
if (width > HOST_BITS_PER_WIDE_INT)
{
- mhi = (unsigned HOST_WIDE_INT) -1
+ mhi = (unsigned HOST_WIDE_INT) -1
>> (2 * HOST_BITS_PER_WIDE_INT - width);
mlo = -1;
}
&& reorder_operands_p (arg0, TREE_OPERAND (arg1, 0)))
return omit_one_operand_loc (loc, type, arg0, TREE_OPERAND (arg1, 0));
+ /* (X & ~Y) | (~X & Y) is X ^ Y */
+ if (TREE_CODE (arg0) == BIT_AND_EXPR
+ && TREE_CODE (arg1) == BIT_AND_EXPR)
+ {
+ tree a0, a1, l0, l1, n0, n1;
+
+ a0 = fold_convert_loc (loc, type, TREE_OPERAND (arg1, 0));
+ a1 = fold_convert_loc (loc, type, TREE_OPERAND (arg1, 1));
+
+ l0 = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
+ l1 = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 1));
+
+ n0 = fold_build1_loc (loc, BIT_NOT_EXPR, type, l0);
+ n1 = fold_build1_loc (loc, BIT_NOT_EXPR, type, l1);
+
+ if ((operand_equal_p (n0, a0, 0)
+ && operand_equal_p (n1, a1, 0))
+ || (operand_equal_p (n0, a1, 0)
+ && operand_equal_p (n1, a0, 0)))
+ return fold_build2_loc (loc, BIT_XOR_EXPR, type, l0, n1);
+ }
+
t1 = distribute_bit_expr (loc, code, type, arg0, arg1);
if (t1 != NULL_TREE)
return t1;
if (TREE_CODE (arg0) == BIT_NOT_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
{
- t1 = fold_convert_loc (loc, type, integer_zero_node);
+ t1 = build_zero_cst (type);
t1 = fold_unary_loc (loc, BIT_NOT_EXPR, type, t1);
return omit_one_operand_loc (loc, type, t1, arg1);
}
if (TREE_CODE (arg1) == BIT_NOT_EXPR
&& operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
{
- t1 = fold_convert_loc (loc, type, integer_zero_node);
+ t1 = build_zero_cst (type);
t1 = fold_unary_loc (loc, BIT_NOT_EXPR, type, t1);
return omit_one_operand_loc (loc, type, t1, arg0);
}
&& TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
&& integer_zerop (const_binop (BIT_AND_EXPR,
TREE_OPERAND (arg0, 1),
- TREE_OPERAND (arg1, 1), 0)))
+ TREE_OPERAND (arg1, 1))))
{
code = BIT_IOR_EXPR;
goto bit_ior;
fold_convert_loc (loc, type, t1));
return t1;
}
-
+
/* Convert ~X ^ ~Y to X ^ Y. */
if (TREE_CODE (arg0) == BIT_NOT_EXPR
&& TREE_CODE (arg1) == BIT_NOT_EXPR)
&& operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
{
tem = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
- return fold_build2_loc (loc, BIT_AND_EXPR, type,
+ return fold_build2_loc (loc, BIT_AND_EXPR, type,
fold_build1_loc (loc, BIT_NOT_EXPR, type, tem),
fold_convert_loc (loc, type, arg1));
}
if (operand_equal_p (arg0, arg1, 0))
return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0));
- /* ~X & X is always zero. */
- if (TREE_CODE (arg0) == BIT_NOT_EXPR
+ /* ~X & X, (X == 0) & X, and !X & X are always zero. */
+ if ((TREE_CODE (arg0) == BIT_NOT_EXPR
+ || TREE_CODE (arg0) == TRUTH_NOT_EXPR
+ || (TREE_CODE (arg0) == EQ_EXPR
+ && integer_zerop (TREE_OPERAND (arg0, 1))))
&& operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0))
return omit_one_operand_loc (loc, type, integer_zero_node, arg1);
- /* X & ~X is always zero. */
- if (TREE_CODE (arg1) == BIT_NOT_EXPR
+ /* X & ~X , X & (X == 0), and X & !X are always zero. */
+ if ((TREE_CODE (arg1) == BIT_NOT_EXPR
+ || TREE_CODE (arg1) == TRUTH_NOT_EXPR
+ || (TREE_CODE (arg1) == EQ_EXPR
+ && integer_zerop (TREE_OPERAND (arg1, 1))))
&& operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
return omit_one_operand_loc (loc, type, integer_zero_node, arg0);
build_int_cst (TREE_TYPE (tem), 1)),
build_int_cst (TREE_TYPE (tem), 0));
}
+ /* Fold !X & 1 as X == 0. */
+ if (TREE_CODE (arg0) == TRUTH_NOT_EXPR
+ && integer_onep (arg1))
+ {
+ tem = TREE_OPERAND (arg0, 0);
+ return fold_build2_loc (loc, EQ_EXPR, type, tem,
+ build_int_cst (TREE_TYPE (tem), 0));
+ }
/* Fold (X ^ Y) & Y as ~X & Y. */
if (TREE_CODE (arg0) == BIT_XOR_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
{
tem = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
- return fold_build2_loc (loc, BIT_AND_EXPR, type,
+ return fold_build2_loc (loc, BIT_AND_EXPR, type,
fold_build1_loc (loc, BIT_NOT_EXPR, type, tem),
fold_convert_loc (loc, type, arg1));
}
fold_convert_loc (loc, type, arg0));
}
+ /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
+ ((A & N) + B) & M -> (A + B) & M
+ Similarly if (N & M) == 0,
+ ((A | N) + B) & M -> (A + B) & M
+ and for - instead of + (or unary - instead of +)
+ and/or ^ instead of |.
+ If B is constant and (B & M) == 0, fold into A & M. */
+ if (host_integerp (arg1, 1))
+ {
+ unsigned HOST_WIDE_INT cst1 = tree_low_cst (arg1, 1);
+ if (~cst1 && (cst1 & (cst1 + 1)) == 0
+ && INTEGRAL_TYPE_P (TREE_TYPE (arg0))
+ && (TREE_CODE (arg0) == PLUS_EXPR
+ || TREE_CODE (arg0) == MINUS_EXPR
+ || TREE_CODE (arg0) == NEGATE_EXPR)
+ && (TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
+ || TREE_CODE (TREE_TYPE (arg0)) == INTEGER_TYPE))
+ {
+ tree pmop[2];
+ int which = 0;
+ unsigned HOST_WIDE_INT cst0;
+
+ /* Now we know that arg0 is (C + D) or (C - D) or
+ -C and arg1 (M) is == (1LL << cst) - 1.
+ Store C into PMOP[0] and D into PMOP[1]. */
+ pmop[0] = TREE_OPERAND (arg0, 0);
+ pmop[1] = NULL;
+ if (TREE_CODE (arg0) != NEGATE_EXPR)
+ {
+ pmop[1] = TREE_OPERAND (arg0, 1);
+ which = 1;
+ }
+
+ if (!host_integerp (TYPE_MAX_VALUE (TREE_TYPE (arg0)), 1)
+ || (tree_low_cst (TYPE_MAX_VALUE (TREE_TYPE (arg0)), 1)
+ & cst1) != cst1)
+ which = -1;
+
+ for (; which >= 0; which--)
+ switch (TREE_CODE (pmop[which]))
+ {
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ if (TREE_CODE (TREE_OPERAND (pmop[which], 1))
+ != INTEGER_CST)
+ break;
+ /* tree_low_cst not used, because we don't care about
+ the upper bits. */
+ cst0 = TREE_INT_CST_LOW (TREE_OPERAND (pmop[which], 1));
+ cst0 &= cst1;
+ if (TREE_CODE (pmop[which]) == BIT_AND_EXPR)
+ {
+ if (cst0 != cst1)
+ break;
+ }
+ else if (cst0 != 0)
+ break;
+ /* If C or D is of the form (A & N) where
+ (N & M) == M, or of the form (A | N) or
+ (A ^ N) where (N & M) == 0, replace it with A. */
+ pmop[which] = TREE_OPERAND (pmop[which], 0);
+ break;
+ case INTEGER_CST:
+ /* If C or D is a N where (N & M) == 0, it can be
+ omitted (assumed 0). */
+ if ((TREE_CODE (arg0) == PLUS_EXPR
+ || (TREE_CODE (arg0) == MINUS_EXPR && which == 0))
+ && (TREE_INT_CST_LOW (pmop[which]) & cst1) == 0)
+ pmop[which] = NULL;
+ break;
+ default:
+ break;
+ }
+
+ /* Only build anything new if we optimized one or both arguments
+ above. */
+ if (pmop[0] != TREE_OPERAND (arg0, 0)
+ || (TREE_CODE (arg0) != NEGATE_EXPR
+ && pmop[1] != TREE_OPERAND (arg0, 1)))
+ {
+ tree utype = TREE_TYPE (arg0);
+ if (! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0)))
+ {
+ /* Perform the operations in a type that has defined
+ overflow behavior. */
+ utype = unsigned_type_for (TREE_TYPE (arg0));
+ if (pmop[0] != NULL)
+ pmop[0] = fold_convert_loc (loc, utype, pmop[0]);
+ if (pmop[1] != NULL)
+ pmop[1] = fold_convert_loc (loc, utype, pmop[1]);
+ }
+
+ if (TREE_CODE (arg0) == NEGATE_EXPR)
+ tem = fold_build1_loc (loc, NEGATE_EXPR, utype, pmop[0]);
+ else if (TREE_CODE (arg0) == PLUS_EXPR)
+ {
+ if (pmop[0] != NULL && pmop[1] != NULL)
+ tem = fold_build2_loc (loc, PLUS_EXPR, utype,
+ pmop[0], pmop[1]);
+ else if (pmop[0] != NULL)
+ tem = pmop[0];
+ else if (pmop[1] != NULL)
+ tem = pmop[1];
+ else
+ return build_int_cst (type, 0);
+ }
+ else if (pmop[0] == NULL)
+ tem = fold_build1_loc (loc, NEGATE_EXPR, utype, pmop[1]);
+ else
+ tem = fold_build2_loc (loc, MINUS_EXPR, utype,
+ pmop[0], pmop[1]);
+ /* TEM is now the new binary +, - or unary - replacement. */
+ tem = fold_build2_loc (loc, BIT_AND_EXPR, utype, tem,
+ fold_convert_loc (loc, utype, arg1));
+ return fold_convert_loc (loc, type, tem);
+ }
+ }
+ }
+
t1 = distribute_bit_expr (loc, code, type, arg0, arg1);
if (t1 != NULL_TREE)
return t1;
{
if (flag_reciprocal_math
&& 0 != (tem = const_binop (code, build_real (type, dconst1),
- arg1, 0)))
+ arg1)))
return fold_build2_loc (loc, MULT_EXPR, type, arg0, tem);
/* Find the reciprocal if optimizing and the result is exact. */
if (optimize)
}
}
}
- /* Convert A/B/C to A/(B*C). */
+ /* Convert A/B/C to A/(B*C). */
if (flag_reciprocal_math
&& TREE_CODE (arg0) == RDIV_EXPR)
return fold_build2_loc (loc, RDIV_EXPR, type, TREE_OPERAND (arg0, 0),
&& TREE_CODE (TREE_OPERAND (arg1, 1)) == REAL_CST)
{
tree tem = const_binop (RDIV_EXPR, arg0,
- TREE_OPERAND (arg1, 1), 0);
+ TREE_OPERAND (arg1, 1));
if (tem)
return fold_build2_loc (loc, RDIV_EXPR, type, tem,
TREE_OPERAND (arg1, 0));
return NULL_TREE;
case TRUNC_DIV_EXPR:
+ /* Optimize (X & (-A)) / A where A is a power of 2,
+ to X >> log2(A) */
+ if (TREE_CODE (arg0) == BIT_AND_EXPR
+ && !TYPE_UNSIGNED (type) && TREE_CODE (arg1) == INTEGER_CST
+ && integer_pow2p (arg1) && tree_int_cst_sgn (arg1) > 0)
+ {
+ tree sum = fold_binary_loc (loc, PLUS_EXPR, TREE_TYPE (arg1),
+ arg1, TREE_OPERAND (arg0, 1));
+ if (sum && integer_zerop (sum)) {
+ unsigned long pow2;
+
+ if (TREE_INT_CST_LOW (arg1))
+ pow2 = exact_log2 (TREE_INT_CST_LOW (arg1));
+ else
+ pow2 = exact_log2 (TREE_INT_CST_HIGH (arg1))
+ + HOST_BITS_PER_WIDE_INT;
+
+ return fold_build2_loc (loc, RSHIFT_EXPR, type,
+ TREE_OPERAND (arg0, 0),
+ build_int_cst (integer_type_node, pow2));
+ }
+ }
+
+ /* Fall thru */
+
case FLOOR_DIV_EXPR:
/* Simplify A / (B << N) where A and B are positive and B is
a power of 2, to A >> (N + log2(B)). */
if (integer_pow2p (sval) && tree_int_cst_sgn (sval) > 0)
{
tree sh_cnt = TREE_OPERAND (arg1, 1);
- unsigned long pow2 = exact_log2 (TREE_INT_CST_LOW (sval));
+ unsigned long pow2;
+
+ if (TREE_INT_CST_LOW (sval))
+ pow2 = exact_log2 (TREE_INT_CST_LOW (sval));
+ else
+ pow2 = exact_log2 (TREE_INT_CST_HIGH (sval))
+ + HOST_BITS_PER_WIDE_INT;
if (strict_overflow_p)
fold_overflow_warning (("assuming signed overflow does not "
WARN_STRICT_OVERFLOW_MISC);
sh_cnt = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (sh_cnt),
- sh_cnt, build_int_cst (NULL_TREE, pow2));
+ sh_cnt,
+ build_int_cst (TREE_TYPE (sh_cnt),
+ pow2));
return fold_build2_loc (loc, RSHIFT_EXPR, type,
fold_convert_loc (loc, type, arg0), sh_cnt);
}
&& TREE_INT_CST_HIGH (arg1) == -1)
return omit_one_operand_loc (loc, type, integer_zero_node, arg0);
- /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
- i.e. "X % C" into "X & (C - 1)", if X and C are positive. */
- strict_overflow_p = false;
- if ((code == TRUNC_MOD_EXPR || code == FLOOR_MOD_EXPR)
- && (TYPE_UNSIGNED (type)
- || tree_expr_nonnegative_warnv_p (op0, &strict_overflow_p)))
- {
- tree c = arg1;
- /* Also optimize A % (C << N) where C is a power of 2,
- to A & ((C << N) - 1). */
- if (TREE_CODE (arg1) == LSHIFT_EXPR)
- c = TREE_OPERAND (arg1, 0);
-
- if (integer_pow2p (c) && tree_int_cst_sgn (c) > 0)
- {
- tree mask = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (arg1), arg1,
- build_int_cst (TREE_TYPE (arg1), 1));
- if (strict_overflow_p)
- fold_overflow_warning (("assuming signed overflow does not "
- "occur when simplifying "
- "X % (power of two)"),
- WARN_STRICT_OVERFLOW_MISC);
- return fold_build2_loc (loc, BIT_AND_EXPR, type,
- fold_convert_loc (loc, type, arg0),
- fold_convert_loc (loc, type, mask));
- }
- }
-
/* X % -C is the same as X % C. */
if (code == TRUNC_MOD_EXPR
&& !TYPE_UNSIGNED (type)
fold_convert_loc (loc, type,
TREE_OPERAND (arg1, 0)));
+ strict_overflow_p = false;
if (TREE_CODE (arg1) == INTEGER_CST
&& 0 != (tem = extract_muldiv (op0, arg1, code, NULL_TREE,
&strict_overflow_p)))
return fold_convert_loc (loc, type, tem);
}
+ /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
+ i.e. "X % C" into "X & (C - 1)", if X and C are positive. */
+ if ((code == TRUNC_MOD_EXPR || code == FLOOR_MOD_EXPR)
+ && (TYPE_UNSIGNED (type)
+ || tree_expr_nonnegative_warnv_p (op0, &strict_overflow_p)))
+ {
+ tree c = arg1;
+ /* Also optimize A % (C << N) where C is a power of 2,
+ to A & ((C << N) - 1). */
+ if (TREE_CODE (arg1) == LSHIFT_EXPR)
+ c = TREE_OPERAND (arg1, 0);
+
+ if (integer_pow2p (c) && tree_int_cst_sgn (c) > 0)
+ {
+ tree mask
+ = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (arg1), arg1,
+ build_int_cst (TREE_TYPE (arg1), 1));
+ if (strict_overflow_p)
+ fold_overflow_warning (("assuming signed overflow does not "
+ "occur when simplifying "
+ "X % (power of two)"),
+ WARN_STRICT_OVERFLOW_MISC);
+ return fold_build2_loc (loc, BIT_AND_EXPR, type,
+ fold_convert_loc (loc, type, arg0),
+ fold_convert_loc (loc, type, mask));
+ }
+ }
+
return NULL_TREE;
case LROTATE_EXPR:
arg00 = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
lshift = build_int_cst (type, -1);
- lshift = int_const_binop (code, lshift, arg1, 0);
+ lshift = int_const_binop (code, lshift, arg1);
return fold_build2_loc (loc, BIT_AND_EXPR, type, arg00, lshift);
}
{
tree tem = build_int_cst (TREE_TYPE (arg1),
TYPE_PRECISION (type));
- tem = const_binop (MINUS_EXPR, tem, arg1, 0);
+ tem = const_binop (MINUS_EXPR, tem, arg1);
return fold_build2_loc (loc, RROTATE_EXPR, type, op0, tem);
}
return fold_build2_loc (loc, code, type, arg0, tem);
}
- truth_andor:
- /* We only do these simplifications if we are optimizing. */
- if (!optimize)
- return NULL_TREE;
-
- /* Check for things like (A || B) && (A || C). We can convert this
- to A || (B && C). Note that either operator can be any of the four
- truth and/or operations and the transformation will still be
- valid. Also note that we only care about order for the
- ANDIF and ORIF operators. If B contains side effects, this
- might change the truth-value of A. */
- if (TREE_CODE (arg0) == TREE_CODE (arg1)
- && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
- || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
- || TREE_CODE (arg0) == TRUTH_AND_EXPR
- || TREE_CODE (arg0) == TRUTH_OR_EXPR)
- && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg0, 1)))
- {
- tree a00 = TREE_OPERAND (arg0, 0);
- tree a01 = TREE_OPERAND (arg0, 1);
- tree a10 = TREE_OPERAND (arg1, 0);
- tree a11 = TREE_OPERAND (arg1, 1);
- int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
- || TREE_CODE (arg0) == TRUTH_AND_EXPR)
- && (code == TRUTH_AND_EXPR
- || code == TRUTH_OR_EXPR));
-
- if (operand_equal_p (a00, a10, 0))
- return fold_build2_loc (loc, TREE_CODE (arg0), type, a00,
- fold_build2_loc (loc, code, type, a01, a11));
- else if (commutative && operand_equal_p (a00, a11, 0))
- return fold_build2_loc (loc, TREE_CODE (arg0), type, a00,
- fold_build2_loc (loc, code, type, a01, a10));
- else if (commutative && operand_equal_p (a01, a10, 0))
- return fold_build2_loc (loc, TREE_CODE (arg0), type, a01,
- fold_build2_loc (loc, code, type, a00, a11));
-
- /* This case if tricky because we must either have commutative
- operators or else A10 must not have side-effects. */
-
- else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
- && operand_equal_p (a01, a11, 0))
- return fold_build2_loc (loc, TREE_CODE (arg0), type,
- fold_build2_loc (loc, code, type, a00, a10),
- a01);
- }
-
- /* See if we can build a range comparison. */
- if (0 != (tem = fold_range_test (loc, code, type, op0, op1)))
- return tem;
-
- /* Check for the possibility of merging component references. If our
- lhs is another similar operation, try to merge its rhs with our
- rhs. Then try to merge our lhs and rhs. */
- if (TREE_CODE (arg0) == code
- && 0 != (tem = fold_truthop (loc, code, type,
- TREE_OPERAND (arg0, 1), arg1)))
- return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 0), tem);
-
- if ((tem = fold_truthop (loc, code, type, arg0, arg1)) != 0)
- return tem;
+ if ((tem = fold_truth_andor (loc, code, type, arg0, arg1, op0, op1))
+ != NULL_TREE)
+ return tem;
return NULL_TREE;
&& operand_equal_p (arg0, TREE_OPERAND (arg1, 0), 0))
return omit_one_operand_loc (loc, type, integer_one_node, arg0);
- goto truth_andor;
+ /* (X && !Y) || (!X && Y) is X ^ Y */
+ if (TREE_CODE (arg0) == TRUTH_AND_EXPR
+ && TREE_CODE (arg1) == TRUTH_AND_EXPR)
+ {
+ tree a0, a1, l0, l1, n0, n1;
+
+ a0 = fold_convert_loc (loc, type, TREE_OPERAND (arg1, 0));
+ a1 = fold_convert_loc (loc, type, TREE_OPERAND (arg1, 1));
+
+ l0 = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
+ l1 = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 1));
+
+ n0 = fold_build1_loc (loc, TRUTH_NOT_EXPR, type, l0);
+ n1 = fold_build1_loc (loc, TRUTH_NOT_EXPR, type, l1);
+
+ if ((operand_equal_p (n0, a0, 0)
+ && operand_equal_p (n1, a1, 0))
+ || (operand_equal_p (n0, a1, 0)
+ && operand_equal_p (n1, a0, 0)))
+ return fold_build2_loc (loc, TRUTH_XOR_EXPR, type, l0, n1);
+ }
+
+ if ((tem = fold_truth_andor (loc, code, type, arg0, arg1, op0, op1))
+ != NULL_TREE)
+ return tem;
+
+ return NULL_TREE;
case TRUTH_XOR_EXPR:
/* If the second arg is constant zero, drop it. */
case EQ_EXPR:
case NE_EXPR:
+ STRIP_NOPS (arg0);
+ STRIP_NOPS (arg1);
+
tem = fold_comparison (loc, code, type, op0, op1);
if (tem != NULL_TREE)
return tem;
/* bool_var != 1 becomes !bool_var. */
if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE && integer_onep (arg1)
&& code == NE_EXPR)
- return fold_build1_loc (loc, TRUTH_NOT_EXPR, type,
- fold_convert_loc (loc, type, arg0));
+ return fold_convert_loc (loc, type,
+ fold_build1_loc (loc, TRUTH_NOT_EXPR,
+ TREE_TYPE (arg0), arg0));
/* bool_var == 0 becomes !bool_var. */
if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE && integer_zerop (arg1)
&& code == EQ_EXPR)
- return fold_build1_loc (loc, TRUTH_NOT_EXPR, type,
- fold_convert_loc (loc, type, arg0));
+ return fold_convert_loc (loc, type,
+ fold_build1_loc (loc, TRUTH_NOT_EXPR,
+ TREE_TYPE (arg0), arg0));
+
+ /* !exp != 0 becomes !exp */
+ if (TREE_CODE (arg0) == TRUTH_NOT_EXPR && integer_zerop (arg1)
+ && code == NE_EXPR)
+ return non_lvalue_loc (loc, fold_convert_loc (loc, type, arg0));
/* If this is an equality comparison of the address of two non-weak,
unaliased symbols neither of which are extern (since we do not
? MINUS_EXPR : PLUS_EXPR,
fold_convert_loc (loc, TREE_TYPE (arg0),
arg1),
- TREE_OPERAND (arg0, 1), 0))
+ TREE_OPERAND (arg0, 1)))
&& !TREE_OVERFLOW (tem))
return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 0), tem);
/* Similarly for a NEGATE_EXPR. */
if (TREE_CODE (arg0) == NEGATE_EXPR
&& TREE_CODE (arg1) == INTEGER_CST
- && 0 != (tem = negate_expr (arg1))
+ && 0 != (tem = negate_expr (fold_convert_loc (loc, TREE_TYPE (arg0),
+ arg1)))
&& TREE_CODE (tem) == INTEGER_CST
&& !TREE_OVERFLOW (tem))
return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 0), tem);
if ((TREE_CODE (arg0) == PLUS_EXPR
|| TREE_CODE (arg0) == POINTER_PLUS_EXPR
|| TREE_CODE (arg0) == MINUS_EXPR)
- && operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)
+ && operand_equal_p (tree_strip_nop_conversions (TREE_OPERAND (arg0,
+ 0)),
+ arg1, 0)
&& (INTEGRAL_TYPE_P (TREE_TYPE (arg0))
|| POINTER_TYPE_P (TREE_TYPE (arg0))))
{
/* Transform comparisons of the form C - X CMP X if C % 2 == 1. */
if (TREE_CODE (arg0) == MINUS_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 0)) == INTEGER_CST
- && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0)
+ && operand_equal_p (tree_strip_nop_conversions (TREE_OPERAND (arg0,
+ 1)),
+ arg1, 0)
&& (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 0)) & 1) == 1)
{
return omit_two_operands_loc (loc, type,
{
tem = fold_build2_loc (loc, LSHIFT_EXPR, itype, arg01, arg001);
tem = fold_build2_loc (loc, BIT_AND_EXPR, itype, arg000, tem);
- return fold_build2_loc (loc, code, type, tem, arg1);
+ return fold_build2_loc (loc, code, type, tem,
+ fold_convert_loc (loc, itype, arg1));
}
/* Otherwise, for signed (arithmetic) shifts,
((X >> C1) & C2) != 0 is rewritten as X < 0, and
}
}
- /* If this is an NE comparison of zero with an AND of one, remove the
- comparison since the AND will give the correct value. */
- if (code == NE_EXPR
- && integer_zerop (arg1)
- && TREE_CODE (arg0) == BIT_AND_EXPR
- && integer_onep (TREE_OPERAND (arg0, 1)))
- return fold_convert_loc (loc, type, arg0);
-
/* If we have (A & C) == C where C is a power of 2, convert this into
(A & C) != 0. Similarly for NE_EXPR. */
if (TREE_CODE (arg0) == BIT_AND_EXPR
tree notc = fold_build1_loc (loc, BIT_NOT_EXPR,
TREE_TYPE (TREE_OPERAND (arg0, 1)),
TREE_OPERAND (arg0, 1));
- tree dandnotc = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg0),
- arg1, notc);
+ tree dandnotc
+ = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg0),
+ fold_convert_loc (loc, TREE_TYPE (arg0), arg1),
+ notc);
tree rslt = code == EQ_EXPR ? integer_zero_node : integer_one_node;
if (integer_nonzerop (dandnotc))
return omit_one_operand_loc (loc, type, rslt, arg0);
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
tree notd = fold_build1_loc (loc, BIT_NOT_EXPR, TREE_TYPE (arg1), arg1);
- tree candnotd = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg0),
- TREE_OPERAND (arg0, 1), notd);
+ tree candnotd
+ = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg0),
+ TREE_OPERAND (arg0, 1),
+ fold_convert_loc (loc, TREE_TYPE (arg0), notd));
tree rslt = code == EQ_EXPR ? integer_zero_node : integer_one_node;
if (integer_nonzerop (candnotd))
return omit_one_operand_loc (loc, type, rslt, arg0);
if (TREE_CODE (arg0) == BIT_XOR_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 0),
- build_int_cst (TREE_TYPE (arg1), 0));
+ build_int_cst (TREE_TYPE (arg0), 0));
/* Likewise (X ^ Y) == X becomes Y == 0. X has no side-effects. */
if (TREE_CODE (arg0) == BIT_XOR_EXPR
&& operand_equal_p (TREE_OPERAND (arg0, 0), arg1, 0)
&& reorder_operands_p (TREE_OPERAND (arg0, 1), arg1))
return fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 1),
- build_int_cst (TREE_TYPE (arg1), 0));
+ build_int_cst (TREE_TYPE (arg0), 0));
/* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
if (TREE_CODE (arg0) == BIT_XOR_EXPR
&& integer_pow2p (TREE_OPERAND (arg0, 1)))
{
tem = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg0),
- TREE_OPERAND (TREE_OPERAND (arg0, 0), 0),
- TREE_OPERAND (arg0, 1));
+ TREE_OPERAND (TREE_OPERAND (arg0, 0), 0),
+ TREE_OPERAND (arg0, 1));
return fold_build2_loc (loc, code == EQ_EXPR ? NE_EXPR : EQ_EXPR,
- type, tem, arg1);
+ type, tem,
+ fold_convert_loc (loc, TREE_TYPE (arg0),
+ arg1));
}
/* Fold ((X & C) ^ C) eq/ne 0 into (X & C) ne/eq 0, when the
if (TREE_CODE (arg0) == NEGATE_EXPR
&& TREE_CODE (arg1) == NEGATE_EXPR)
return fold_build2_loc (loc, code, type,
- TREE_OPERAND (arg0, 0),
- TREE_OPERAND (arg1, 0));
+ TREE_OPERAND (arg0, 0),
+ fold_convert_loc (loc, TREE_TYPE (arg0),
+ TREE_OPERAND (arg1, 0)));
/* Fold (X & C) op (Y & C) as (X ^ Y) & C op 0", and symmetries. */
if (TREE_CODE (arg0) == BIT_AND_EXPR
operand_equal_p guarantees no side-effects so we don't need
to use omit_one_operand on Z. */
if (operand_equal_p (arg01, arg11, 0))
- return fold_build2_loc (loc, code, type, arg00, arg10);
+ return fold_build2_loc (loc, code, type, arg00,
+ fold_convert_loc (loc, TREE_TYPE (arg00),
+ arg10));
if (operand_equal_p (arg01, arg10, 0))
- return fold_build2_loc (loc, code, type, arg00, arg11);
+ return fold_build2_loc (loc, code, type, arg00,
+ fold_convert_loc (loc, TREE_TYPE (arg00),
+ arg11));
if (operand_equal_p (arg00, arg11, 0))
- return fold_build2_loc (loc, code, type, arg01, arg10);
+ return fold_build2_loc (loc, code, type, arg01,
+ fold_convert_loc (loc, TREE_TYPE (arg01),
+ arg10));
if (operand_equal_p (arg00, arg10, 0))
- return fold_build2_loc (loc, code, type, arg01, arg11);
+ return fold_build2_loc (loc, code, type, arg01,
+ fold_convert_loc (loc, TREE_TYPE (arg01),
+ arg11));
/* Optimize (X ^ C1) op (Y ^ C2) as (X ^ (C1 ^ C2)) op Y. */
if (TREE_CODE (arg01) == INTEGER_CST
&& TREE_CODE (arg11) == INTEGER_CST)
- return fold_build2_loc (loc, code, type,
- fold_build2_loc (loc, BIT_XOR_EXPR, itype, arg00,
- fold_build2_loc (loc,
- BIT_XOR_EXPR, itype,
- arg01, arg11)),
- arg10);
+ {
+ tem = fold_build2_loc (loc, BIT_XOR_EXPR, itype, arg01,
+ fold_convert_loc (loc, itype, arg11));
+ tem = fold_build2_loc (loc, BIT_XOR_EXPR, itype, arg00, tem);
+ return fold_build2_loc (loc, code, type, tem,
+ fold_convert_loc (loc, itype, arg10));
+ }
}
/* Attempt to simplify equality/inequality comparisons of complex
{
case GT_EXPR:
arg1 = const_binop (PLUS_EXPR, arg1,
- build_int_cst (TREE_TYPE (arg1), 1), 0);
+ build_int_cst (TREE_TYPE (arg1), 1));
return fold_build2_loc (loc, EQ_EXPR, type,
fold_convert_loc (loc,
TREE_TYPE (arg1), arg0),
arg1);
case LE_EXPR:
arg1 = const_binop (PLUS_EXPR, arg1,
- build_int_cst (TREE_TYPE (arg1), 1), 0);
+ build_int_cst (TREE_TYPE (arg1), 1));
return fold_build2_loc (loc, NE_EXPR, type,
fold_convert_loc (loc, TREE_TYPE (arg1),
arg0),
switch (code)
{
case GE_EXPR:
- arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
+ arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node);
return fold_build2_loc (loc, NE_EXPR, type,
fold_convert_loc (loc,
TREE_TYPE (arg1), arg0),
arg1);
case LT_EXPR:
- arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
+ arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node);
return fold_build2_loc (loc, EQ_EXPR, type,
fold_convert_loc (loc, TREE_TYPE (arg1),
arg0),
&& TYPE_UNSIGNED (TREE_TYPE (arg0))
&& TREE_CODE (arg1) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (arg1, 0)))
- {
- tem = build2 (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
- build2 (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
- TREE_OPERAND (arg1, 1)),
- build_int_cst (TREE_TYPE (arg0), 0));
- goto fold_binary_exit;
- }
+ return build2_loc (loc, code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
+ build2 (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
+ TREE_OPERAND (arg1, 1)),
+ build_int_cst (TREE_TYPE (arg0), 0));
if ((code == LT_EXPR || code == GE_EXPR)
&& TYPE_UNSIGNED (TREE_TYPE (arg0))
&& TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
&& integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
{
- tem = build2 (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
- fold_convert_loc (loc, TREE_TYPE (arg0),
- build2 (RSHIFT_EXPR,
- TREE_TYPE (arg0), arg0,
- TREE_OPERAND (TREE_OPERAND (arg1, 0),
- 1))),
- build_int_cst (TREE_TYPE (arg0), 0));
- goto fold_binary_exit;
+ tem = build2 (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
+ TREE_OPERAND (TREE_OPERAND (arg1, 0), 1));
+ return build2_loc (loc, code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
+ fold_convert_loc (loc, TREE_TYPE (arg0), tem),
+ build_int_cst (TREE_TYPE (arg0), 0));
}
return NULL_TREE;
|| (TREE_CODE (arg0) == INTEGER_CST
&& TREE_CODE (arg1) == INTEGER_CST))
return build_complex (type, arg0, arg1);
+ if (TREE_CODE (arg0) == REALPART_EXPR
+ && TREE_CODE (arg1) == IMAGPART_EXPR
+ && TREE_TYPE (TREE_OPERAND (arg0, 0)) == type
+ && operand_equal_p (TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0), 0))
+ return omit_one_operand_loc (loc, type, TREE_OPERAND (arg0, 0),
+ TREE_OPERAND (arg1, 0));
return NULL_TREE;
case ASSERT_EXPR:
default:
return NULL_TREE;
} /* switch (code) */
- fold_binary_exit:
- protected_set_expr_location (tem, loc);
- return tem;
}
/* Callback for walk_tree, looking for LABEL_EXPR. Return *TP if it is
tree
fold_ternary_loc (location_t loc, enum tree_code code, tree type,
- tree op0, tree op1, tree op2)
+ tree op0, tree op1, tree op2)
{
tree tem;
- tree arg0 = NULL_TREE, arg1 = NULL_TREE;
+ tree arg0 = NULL_TREE, arg1 = NULL_TREE, arg2 = NULL_TREE;
enum tree_code_class kind = TREE_CODE_CLASS (code);
gcc_assert (IS_EXPR_CODE_CLASS (kind)
STRIP_NOPS (arg1);
}
+ if (op2)
+ {
+ arg2 = op2;
+ STRIP_NOPS (arg2);
+ }
+
switch (code)
{
case COMPONENT_REF:
TREE_OPERAND (arg0, 1))
&& !HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op2))))
{
- tem = fold_truth_not_expr (loc, arg0);
+ location_t loc0 = expr_location_or (arg0, loc);
+ tem = fold_truth_not_expr (loc0, arg0);
if (tem && COMPARISON_CLASS_P (tem))
{
tem = fold_cond_expr_with_comparison (loc, type, tem, op2, op1);
if (truth_value_p (TREE_CODE (arg0))
&& tree_swap_operands_p (op1, op2, false))
{
+ location_t loc0 = expr_location_or (arg0, loc);
/* See if this can be inverted. If it can't, possibly because
it was a floating-point inequality comparison, don't do
anything. */
- tem = fold_truth_not_expr (loc, arg0);
+ tem = fold_truth_not_expr (loc0, arg0);
if (tem)
return fold_build3_loc (loc, code, type, tem, op2, op1);
}
&& truth_value_p (TREE_CODE (arg0))
&& truth_value_p (TREE_CODE (arg1)))
{
+ location_t loc0 = expr_location_or (arg0, loc);
/* Only perform transformation if ARG0 is easily inverted. */
- tem = fold_truth_not_expr (loc, arg0);
+ tem = fold_truth_not_expr (loc0, arg0);
if (tem)
return fold_build2_loc (loc, TRUTH_ORIF_EXPR, type,
fold_convert_loc (loc, type, tem),
&& truth_value_p (TREE_CODE (arg0))
&& truth_value_p (TREE_CODE (op2)))
{
+ location_t loc0 = expr_location_or (arg0, loc);
/* Only perform transformation if ARG0 is easily inverted. */
- tem = fold_truth_not_expr (loc, arg0);
+ tem = fold_truth_not_expr (loc0, arg0);
if (tem)
return fold_build2_loc (loc, TRUTH_ANDIF_EXPR, type,
fold_convert_loc (loc, type, tem),
if (elements)
return TREE_VALUE (elements);
else
- return fold_convert_loc (loc, type, integer_zero_node);
+ return build_zero_cst (type);
}
}
return NULL_TREE;
+ case FMA_EXPR:
+ /* For integers we can decompose the FMA if possible. */
+ if (TREE_CODE (arg0) == INTEGER_CST
+ && TREE_CODE (arg1) == INTEGER_CST)
+ return fold_build2_loc (loc, PLUS_EXPR, type,
+ const_binop (MULT_EXPR, arg0, arg1), arg2);
+ if (integer_zerop (arg2))
+ return fold_build2_loc (loc, MULT_EXPR, type, arg0, arg1);
+
+ return fold_fma (loc, type, arg0, arg1, arg2);
+
default:
return NULL_TREE;
} /* switch (code) */
static void
fold_checksum_tree (const_tree expr, struct md5_ctx *ctx, htab_t ht)
{
- const void **slot;
+ void **slot;
enum tree_code code;
union tree_node buf;
int i, len;
-
+
recursive_label:
gcc_assert ((sizeof (struct tree_exp) + 5 * sizeof (tree)
&& sizeof (struct tree_type) <= sizeof (struct tree_function_decl));
if (expr == NULL)
return;
- slot = (const void **) htab_find_slot (ht, expr, INSERT);
+ slot = (void **) htab_find_slot (ht, expr, INSERT);
if (*slot != NULL)
return;
- *slot = expr;
+ *slot = CONST_CAST_TREE (expr);
code = TREE_CODE (expr);
if (TREE_CODE_CLASS (code) == tcc_declaration
&& DECL_ASSEMBLER_NAME_SET_P (expr))
if (TREE_CODE_CLASS (code) != tcc_type
&& TREE_CODE_CLASS (code) != tcc_declaration
&& code != TREE_LIST
- && code != SSA_NAME)
+ && code != SSA_NAME
+ && CODE_CONTAINS_STRUCT (code, TS_COMMON))
fold_checksum_tree (TREE_CHAIN (expr), ctx, ht);
switch (TREE_CODE_CLASS (code))
{
}
if (CODE_CONTAINS_STRUCT (TREE_CODE (expr), TS_DECL_WITH_VIS))
fold_checksum_tree (DECL_SECTION_NAME (expr), ctx, ht);
-
+
if (CODE_CONTAINS_STRUCT (TREE_CODE (expr), TS_DECL_NON_COMMON))
{
fold_checksum_tree (DECL_VINDEX (expr), ctx, ht);
by "call debug_fold_checksum (op0)", then just trace down till the
outputs differ. */
-void
+DEBUG_FUNCTION void
debug_fold_checksum (const_tree t)
{
int i;
unsigned char checksum[16];
struct md5_ctx ctx;
htab_t ht = htab_create (32, htab_hash_pointer, htab_eq_pointer, NULL);
-
+
md5_init_ctx (&ctx);
fold_checksum_tree (t, &ctx, ht);
md5_finish_ctx (&ctx, checksum);
md5_finish_ctx (&ctx, checksum_before);
htab_empty (ht);
#endif
-
+
tem = fold_unary_loc (loc, code, type, op0);
if (!tem)
- {
- tem = build1_stat (code, type, op0 PASS_MEM_STAT);
- SET_EXPR_LOCATION (tem, loc);
- }
-
+ tem = build1_stat_loc (loc, code, type, op0 PASS_MEM_STAT);
+
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
fold_checksum_tree (op0, &ctx, ht);
tem = fold_binary_loc (loc, code, type, op0, op1);
if (!tem)
- {
- tem = build2_stat (code, type, op0, op1 PASS_MEM_STAT);
- SET_EXPR_LOCATION (tem, loc);
- }
-
+ tem = build2_stat_loc (loc, code, type, op0, op1 PASS_MEM_STAT);
+
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
fold_checksum_tree (op0, &ctx, ht);
if (memcmp (checksum_before_op0, checksum_after_op0, 16))
fold_check_failed (op0, tem);
-
+
md5_init_ctx (&ctx);
fold_checksum_tree (op1, &ctx, ht);
md5_finish_ctx (&ctx, checksum_after_op1);
gcc_assert (TREE_CODE_CLASS (code) != tcc_vl_exp);
tem = fold_ternary_loc (loc, code, type, op0, op1, op2);
if (!tem)
- {
- tem = build3_stat (code, type, op0, op1, op2 PASS_MEM_STAT);
- SET_EXPR_LOCATION (tem, loc);
- }
-
+ tem = build3_stat_loc (loc, code, type, op0, op1, op2 PASS_MEM_STAT);
+
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
fold_checksum_tree (op0, &ctx, ht);
if (memcmp (checksum_before_op0, checksum_after_op0, 16))
fold_check_failed (op0, tem);
-
+
md5_init_ctx (&ctx);
fold_checksum_tree (op1, &ctx, ht);
md5_finish_ctx (&ctx, checksum_after_op1);
if (memcmp (checksum_before_op1, checksum_after_op1, 16))
fold_check_failed (op1, tem);
-
+
md5_init_ctx (&ctx);
fold_checksum_tree (op2, &ctx, ht);
md5_finish_ctx (&ctx, checksum_after_op2);
#endif
tem = fold_builtin_call_array (loc, type, fn, nargs, argarray);
-
+
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
fold_checksum_tree (fn, &ctx, ht);
if (memcmp (checksum_before_fn, checksum_after_fn, 16))
fold_check_failed (fn, tem);
-
+
md5_init_ctx (&ctx);
for (i = 0; i < nargs; i++)
fold_checksum_tree (argarray[i], &ctx, ht);
&& 0 != (t1 = fold_convert (type,
const_binop (LSHIFT_EXPR,
size_one_node,
- op1, 0)))
+ op1)))
&& !TREE_OVERFLOW (t1))
return multiple_of_p (type, t1, bottom);
}
case SAVE_EXPR:
return multiple_of_p (type, TREE_OPERAND (top, 0), bottom);
+ case COND_EXPR:
+ return (multiple_of_p (type, TREE_OPERAND (top, 1), bottom)
+ && multiple_of_p (type, TREE_OPERAND (top, 2), bottom));
+
case INTEGER_CST:
if (TREE_CODE (bottom) != INTEGER_CST
|| integer_zerop (bottom)
|| tree_int_cst_sgn (bottom) < 0)))
return 0;
return integer_zerop (int_const_binop (TRUNC_MOD_EXPR,
- top, bottom, 0));
+ top, bottom));
default:
return 0;
&& (TREE_CODE (op0) == NOP_EXPR || TREE_CODE (op0) == INTEGER_CST)
&& (TREE_CODE (op1) == NOP_EXPR || TREE_CODE (op1) == INTEGER_CST))
{
- tree inner0 = (TREE_CODE (op0) == NOP_EXPR)
+ tree inner0 = (TREE_CODE (op0) == NOP_EXPR)
? TREE_TYPE (TREE_OPERAND (op0, 0))
: TREE_TYPE (op0);
- tree inner1 = (TREE_CODE (op1) == NOP_EXPR)
+ tree inner1 = (TREE_CODE (op1) == NOP_EXPR)
? TREE_TYPE (TREE_OPERAND (op1, 0))
: TREE_TYPE (op1);
CASE_FLT_FN (BUILT_IN_FLOOR):
CASE_FLT_FN (BUILT_IN_FMOD):
CASE_FLT_FN (BUILT_IN_FREXP):
+ CASE_FLT_FN (BUILT_IN_ICEIL):
+ CASE_FLT_FN (BUILT_IN_IFLOOR):
+ CASE_FLT_FN (BUILT_IN_IRINT):
+ CASE_FLT_FN (BUILT_IN_IROUND):
CASE_FLT_FN (BUILT_IN_LCEIL):
CASE_FLT_FN (BUILT_IN_LDEXP):
CASE_FLT_FN (BUILT_IN_LFLOOR):
case ADDR_EXPR:
{
- tree base = get_base_address (TREE_OPERAND (t, 0));
+ tree base = TREE_OPERAND (t, 0);
+ if (!DECL_P (base))
+ base = get_base_address (base);
if (!base)
return false;
allocated on the stack. */
if (DECL_P (base)
&& (flag_delete_null_pointer_checks
- || (TREE_CODE (base) == VAR_DECL && !TREE_STATIC (base))))
+ || (DECL_CONTEXT (base)
+ && TREE_CODE (DECL_CONTEXT (base)) == FUNCTION_DECL
+ && auto_var_in_fn_p (base, DECL_CONTEXT (base)))))
return !VAR_OR_FUNCTION_DECL_P (base) || !DECL_WEAK (base);
/* Constants are never weak. */
{
case INTEGER_CST:
{
- unsigned HOST_WIDE_INT low;
- HOST_WIDE_INT high;
- int overflow = neg_double (TREE_INT_CST_LOW (arg0),
- TREE_INT_CST_HIGH (arg0),
- &low, &high);
- t = force_fit_type_double (type, low, high, 1,
+ double_int val = tree_to_double_int (arg0);
+ int overflow = neg_double (val.low, val.high, &val.low, &val.high);
+
+ t = force_fit_type_double (type, val, 1,
(overflow | TREE_OVERFLOW (arg0))
&& !TYPE_UNSIGNED (type));
break;
}
case REAL_CST:
- t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
+ t = build_real (type, real_value_negate (&TREE_REAL_CST (arg0)));
break;
case FIXED_CST:
switch (TREE_CODE (arg0))
{
case INTEGER_CST:
- /* If the value is unsigned, then the absolute value is
- the same as the ordinary value. */
- if (TYPE_UNSIGNED (type))
- t = arg0;
- /* Similarly, if the value is non-negative. */
- else if (INT_CST_LT (integer_minus_one_node, arg0))
- t = arg0;
- /* If the value is negative, then the absolute value is
- its negation. */
- else
- {
- unsigned HOST_WIDE_INT low;
- HOST_WIDE_INT high;
- int overflow = neg_double (TREE_INT_CST_LOW (arg0),
- TREE_INT_CST_HIGH (arg0),
- &low, &high);
- t = force_fit_type_double (type, low, high, -1,
- overflow | TREE_OVERFLOW (arg0));
- }
+ {
+ double_int val = tree_to_double_int (arg0);
+
+ /* If the value is unsigned or non-negative, then the absolute value
+ is the same as the ordinary value. */
+ if (TYPE_UNSIGNED (type)
+ || !double_int_negative_p (val))
+ t = arg0;
+
+ /* If the value is negative, then the absolute value is
+ its negation. */
+ else
+ {
+ int overflow;
+
+ overflow = neg_double (val.low, val.high, &val.low, &val.high);
+ t = force_fit_type_double (type, val, -1,
+ overflow | TREE_OVERFLOW (arg0));
+ }
+ }
break;
case REAL_CST:
if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
- t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
+ t = build_real (type, real_value_negate (&TREE_REAL_CST (arg0)));
else
t = arg0;
break;
constant. TYPE is the type of the result. */
static tree
-fold_not_const (tree arg0, tree type)
+fold_not_const (const_tree arg0, tree type)
{
- tree t = NULL_TREE;
+ double_int val;
gcc_assert (TREE_CODE (arg0) == INTEGER_CST);
- t = force_fit_type_double (type, ~TREE_INT_CST_LOW (arg0),
- ~TREE_INT_CST_HIGH (arg0), 0,
- TREE_OVERFLOW (arg0));
-
- return t;
+ val = double_int_not (tree_to_double_int (arg0));
+ return force_fit_type_double (type, val, 0, TREE_OVERFLOW (arg0));
}
/* Given CODE, a relational operator, the target type, TYPE and two
if (!TREE_SIDE_EFFECTS (op))
return expr;
}
-
+
return build1 (CLEANUP_POINT_EXPR, type, expr);
}
}
/* *(foo *)&fooarray => fooarray[0] */
else if (TREE_CODE (optype) == ARRAY_TYPE
- && type == TREE_TYPE (optype))
+ && type == TREE_TYPE (optype)
+ && (!in_gimple_form
+ || TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST))
{
tree type_domain = TYPE_DOMAIN (optype);
tree min_val = size_zero_node;
if (type_domain && TYPE_MIN_VALUE (type_domain))
min_val = TYPE_MIN_VALUE (type_domain);
- op0 = build4 (ARRAY_REF, type, op, min_val, NULL_TREE, NULL_TREE);
- SET_EXPR_LOCATION (op0, loc);
- return op0;
+ if (in_gimple_form
+ && TREE_CODE (min_val) != INTEGER_CST)
+ return NULL_TREE;
+ return build4_loc (loc, ARRAY_REF, type, op, min_val,
+ NULL_TREE, NULL_TREE);
}
/* *(foo *)&complexfoo => __real__ complexfoo */
else if (TREE_CODE (optype) == COMPLEX_TYPE
}
}
- /* ((foo*)&vectorfoo)[1] => BIT_FIELD_REF<vectorfoo,...> */
- if (TREE_CODE (sub) == POINTER_PLUS_EXPR
- && TREE_CODE (TREE_OPERAND (sub, 1)) == INTEGER_CST)
- {
- tree op00 = TREE_OPERAND (sub, 0);
- tree op01 = TREE_OPERAND (sub, 1);
- tree op00type;
-
- STRIP_NOPS (op00);
- op00type = TREE_TYPE (op00);
- if (TREE_CODE (op00) == ADDR_EXPR
- && TREE_CODE (TREE_TYPE (op00type)) == VECTOR_TYPE
- && type == TREE_TYPE (TREE_TYPE (op00type)))
- {
- HOST_WIDE_INT offset = tree_low_cst (op01, 0);
- tree part_width = TYPE_SIZE (type);
- unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0)/BITS_PER_UNIT;
- unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT;
- tree index = bitsize_int (indexi);
-
- if (offset/part_widthi <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (op00type)))
- return fold_build3_loc (loc,
- BIT_FIELD_REF, type, TREE_OPERAND (op00, 0),
- part_width, index);
-
- }
- }
-
-
- /* ((foo*)&complexfoo)[1] => __imag__ complexfoo */
if (TREE_CODE (sub) == POINTER_PLUS_EXPR
&& TREE_CODE (TREE_OPERAND (sub, 1)) == INTEGER_CST)
{
tree op00 = TREE_OPERAND (sub, 0);
tree op01 = TREE_OPERAND (sub, 1);
- tree op00type;
STRIP_NOPS (op00);
- op00type = TREE_TYPE (op00);
- if (TREE_CODE (op00) == ADDR_EXPR
- && TREE_CODE (TREE_TYPE (op00type)) == COMPLEX_TYPE
- && type == TREE_TYPE (TREE_TYPE (op00type)))
+ if (TREE_CODE (op00) == ADDR_EXPR)
{
- tree size = TYPE_SIZE_UNIT (type);
- if (tree_int_cst_equal (size, op01))
- return fold_build1_loc (loc, IMAGPART_EXPR, type,
- TREE_OPERAND (op00, 0));
+ tree op00type;
+ op00 = TREE_OPERAND (op00, 0);
+ op00type = TREE_TYPE (op00);
+
+ /* ((foo*)&vectorfoo)[1] => BIT_FIELD_REF<vectorfoo,...> */
+ if (TREE_CODE (op00type) == VECTOR_TYPE
+ && type == TREE_TYPE (op00type))
+ {
+ HOST_WIDE_INT offset = tree_low_cst (op01, 0);
+ tree part_width = TYPE_SIZE (type);
+ unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0)/BITS_PER_UNIT;
+ unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT;
+ tree index = bitsize_int (indexi);
+
+ if (offset/part_widthi <= TYPE_VECTOR_SUBPARTS (op00type))
+ return fold_build3_loc (loc,
+ BIT_FIELD_REF, type, op00,
+ part_width, index);
+
+ }
+ /* ((foo*)&complexfoo)[1] => __imag__ complexfoo */
+ else if (TREE_CODE (op00type) == COMPLEX_TYPE
+ && type == TREE_TYPE (op00type))
+ {
+ tree size = TYPE_SIZE_UNIT (type);
+ if (tree_int_cst_equal (size, op01))
+ return fold_build1_loc (loc, IMAGPART_EXPR, type, op00);
+ }
+ /* ((foo *)&fooarray)[1] => fooarray[1] */
+ else if (TREE_CODE (op00type) == ARRAY_TYPE
+ && type == TREE_TYPE (op00type))
+ {
+ tree type_domain = TYPE_DOMAIN (op00type);
+ tree min_val = size_zero_node;
+ if (type_domain && TYPE_MIN_VALUE (type_domain))
+ min_val = TYPE_MIN_VALUE (type_domain);
+ op01 = size_binop_loc (loc, EXACT_DIV_EXPR, op01,
+ TYPE_SIZE_UNIT (type));
+ op01 = size_binop_loc (loc, PLUS_EXPR, op01, min_val);
+ return build4_loc (loc, ARRAY_REF, type, op00, op01,
+ NULL_TREE, NULL_TREE);
+ }
}
}
-
+
/* *(foo *)fooarrptr => (*fooarrptr)[0] */
if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE
- && type == TREE_TYPE (TREE_TYPE (subtype)))
+ && type == TREE_TYPE (TREE_TYPE (subtype))
+ && (!in_gimple_form
+ || TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST))
{
tree type_domain;
tree min_val = size_zero_node;
type_domain = TYPE_DOMAIN (TREE_TYPE (sub));
if (type_domain && TYPE_MIN_VALUE (type_domain))
min_val = TYPE_MIN_VALUE (type_domain);
- op0 = build4 (ARRAY_REF, type, sub, min_val, NULL_TREE, NULL_TREE);
- SET_EXPR_LOCATION (op0, loc);
- return op0;
+ if (in_gimple_form
+ && TREE_CODE (min_val) != INTEGER_CST)
+ return NULL_TREE;
+ return build4_loc (loc, ARRAY_REF, type, sub, min_val, NULL_TREE,
+ NULL_TREE);
}
return NULL_TREE;
if (sub)
return sub;
- t = build1 (INDIRECT_REF, type, t);
- SET_EXPR_LOCATION (t, loc);
- return t;
+ return build1_loc (loc, INDIRECT_REF, type, t);
}
/* Given an INDIRECT_REF T, return either T or a simplified version. */
{
if (TREE_CODE (value) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (value);
- unsigned HOST_WIDE_INT high;
+ double_int val = tree_to_double_int (value);
bool overflow_p;
- if ((low & (divisor - 1)) == 0)
+ if ((val.low & (divisor - 1)) == 0)
return value;
overflow_p = TREE_OVERFLOW (value);
- high = TREE_INT_CST_HIGH (value);
- low &= ~(divisor - 1);
- low += divisor;
- if (low == 0)
+ val.low &= ~(divisor - 1);
+ val.low += divisor;
+ if (val.low == 0)
{
- high++;
- if (high == 0)
+ val.high++;
+ if (val.high == 0)
overflow_p = true;
}
- return force_fit_type_double (TREE_TYPE (value), low, high,
+ return force_fit_type_double (TREE_TYPE (value), val,
-1, overflow_p);
}
else
if (arg1)
return fold_build2_loc (loc, COMPOUND_EXPR, TREE_TYPE (exp), arg0, arg1);
break;
-
+
case COND_EXPR:
arg0 = fold_strip_sign_ops (TREE_OPERAND (exp, 1));
arg1 = fold_strip_sign_ops (TREE_OPERAND (exp, 2));
arg0 ? arg0 : TREE_OPERAND (exp, 1),
arg1 ? arg1 : TREE_OPERAND (exp, 2));
break;
-
+
case CALL_EXPR:
{
const enum built_in_function fcode = builtin_mathfn_code (exp);