/* Fold a constant sub-tree into a single node for C-compiler
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
-This file is part of GNU CC.
+This file is part of GCC.
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
/*@@ This file should be rewritten to use an arbitrary precision
@@ representation for "struct tree_int_cst" and "struct tree_real_cst".
#include "config.h"
#include "system.h"
-#include <setjmp.h>
#include "flags.h"
#include "tree.h"
#include "rtl.h"
+#include "expr.h"
#include "tm_p.h"
#include "toplev.h"
#include "ggc.h"
+#include "hashtab.h"
static void encode PARAMS ((HOST_WIDE_INT *,
unsigned HOST_WIDE_INT,
static tree split_tree PARAMS ((tree, enum tree_code, tree *, tree *,
int));
static tree associate_trees PARAMS ((tree, tree, enum tree_code, tree));
-static tree int_const_binop PARAMS ((enum tree_code, tree, tree, int, int));
+static tree int_const_binop PARAMS ((enum tree_code, tree, tree, int));
static void const_binop_1 PARAMS ((PTR));
static tree const_binop PARAMS ((enum tree_code, tree, tree, int));
+static hashval_t size_htab_hash PARAMS ((const void *));
+static int size_htab_eq PARAMS ((const void *, const void *));
static void fold_convert_1 PARAMS ((PTR));
static tree fold_convert PARAMS ((tree, tree));
static enum tree_code invert_tree_comparison PARAMS ((enum tree_code));
static int multiple_of_p PARAMS ((tree, tree, tree));
static tree constant_boolean_node PARAMS ((int, tree));
static int count_cond PARAMS ((tree, int));
-
+static tree fold_binary_op_with_conditional_arg
+ PARAMS ((enum tree_code, tree, tree, tree, int));
+
#ifndef BRANCH_COST
#define BRANCH_COST 1
#endif
TREE_INT_CST_LOW (t) &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
}
- /* Unsigned types do not suffer sign extension or overflow. */
- if (TREE_UNSIGNED (TREE_TYPE (t)))
+ /* Unsigned types do not suffer sign extension or overflow unless they
+ are a sizetype. */
+ if (TREE_UNSIGNED (TREE_TYPE (t))
+ && ! (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (TREE_TYPE (t))))
return overflow;
/* If the value's sign bit is set, extend the sign. */
HOST_WIDE_INT arg1[4];
HOST_WIDE_INT arg2[4];
HOST_WIDE_INT prod[4 * 2];
- register unsigned HOST_WIDE_INT carry;
- register int i, j, k;
+ unsigned HOST_WIDE_INT carry;
+ int i, j, k;
unsigned HOST_WIDE_INT toplow, neglow;
HOST_WIDE_INT tophigh, neghigh;
encode (arg1, l1, h1);
encode (arg2, l2, h2);
- bzero ((char *) prod, sizeof prod);
+ memset ((char *) prod, 0, sizeof prod);
for (i = 0; i < 4; i++)
{
HOST_WIDE_INT *hv;
int arith;
{
+ unsigned HOST_WIDE_INT signmask;
+
if (count < 0)
{
rshift_double (l1, h1, -count, prec, lv, hv, arith);
| (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
*lv = l1 << count;
}
+
+ /* Sign extend all bits that are beyond the precision. */
+
+ signmask = -((prec > HOST_BITS_PER_WIDE_INT
+ ? (*hv >> (prec - HOST_BITS_PER_WIDE_INT - 1))
+ : (*lv >> (prec - 1))) & 1);
+
+ if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
+ ;
+ else if (prec >= HOST_BITS_PER_WIDE_INT)
+ {
+ *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
+ *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
+ }
+ else
+ {
+ *hv = signmask;
+ *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
+ *lv |= signmask << prec;
+ }
}
/* Shift the doubleword integer in L1, H1 right by COUNT places
rshift_double (l1, h1, count, prec, lv, hv, arith)
unsigned HOST_WIDE_INT l1;
HOST_WIDE_INT h1, count;
- unsigned int prec ATTRIBUTE_UNUSED;
+ unsigned int prec;
unsigned HOST_WIDE_INT *lv;
HOST_WIDE_INT *hv;
int arith;
{
/* Shifting by the host word size is undefined according to the
ANSI standard, so we must handle this as a special case. */
- *hv = signmask;
- *lv = signmask;
+ *hv = 0;
+ *lv = 0;
}
else if (count >= HOST_BITS_PER_WIDE_INT)
{
- *hv = signmask;
- *lv = ((signmask << (2 * HOST_BITS_PER_WIDE_INT - count - 1) << 1)
- | ((unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT)));
+ *hv = 0;
+ *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
}
else
{
+ *hv = (unsigned HOST_WIDE_INT) h1 >> count;
*lv = ((l1 >> count)
| ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
- *hv = ((signmask << (HOST_BITS_PER_WIDE_INT - count))
- | ((unsigned HOST_WIDE_INT) h1 >> count));
+ }
+
+ /* Zero / sign extend all bits that are beyond the precision. */
+
+ if (count >= (HOST_WIDE_INT)prec)
+ {
+ *hv = signmask;
+ *lv = signmask;
+ }
+ else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
+ ;
+ else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
+ {
+ *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
+ *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
+ }
+ else
+ {
+ *hv = signmask;
+ *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
+ *lv |= signmask << (prec - count);
}
}
\f
CODE is a tree code for a kind of division, one of
TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
or EXACT_DIV_EXPR
- It controls how the quotient is rounded to a integer.
+ It controls how the quotient is rounded to an integer.
Return nonzero if the operation overflows.
UNS nonzero says do unsigned division. */
int quo_neg = 0;
HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
HOST_WIDE_INT den[4], quo[4];
- register int i, j;
+ int i, j;
unsigned HOST_WIDE_INT work;
unsigned HOST_WIDE_INT carry = 0;
unsigned HOST_WIDE_INT lnum = lnum_orig;
goto finish_up;
}
- bzero ((char *) quo, sizeof quo);
+ memset ((char *) quo, 0, sizeof quo);
- bzero ((char *) num, sizeof num); /* to zero 9th element */
- bzero ((char *) den, sizeof den);
+ memset ((char *) num, 0, sizeof num); /* to zero 9th element */
+ memset ((char *) den, 0, sizeof den);
encode (num, lnum, hnum);
encode (den, lden, hden);
else
quo_est = BASE - 1;
- /* Refine quo_est so it's usually correct, and at most one high. */
+ /* Refine quo_est so it's usually correct, and at most one high. */
tmp = work - quo_est * den[den_hi_sig];
if (tmp < BASE
&& (den[den_hi_sig - 1] * quo_est
return overflow;
}
\f
-#ifndef REAL_ARITHMETIC
-/* Effectively truncate a real value to represent the nearest possible value
- in a narrower mode. The result is actually represented in the same data
- type as the argument, but its value is usually different.
-
- A trap may occur during the FP operations and it is the responsibility
- of the calling function to have a handler established. */
-
-REAL_VALUE_TYPE
-real_value_truncate (mode, arg)
- enum machine_mode mode;
- REAL_VALUE_TYPE arg;
-{
- return REAL_VALUE_TRUNCATE (mode, arg);
-}
-
-#if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
-
-/* Check for infinity in an IEEE double precision number. */
-
-int
-target_isinf (x)
- REAL_VALUE_TYPE x;
-{
- /* The IEEE 64-bit double format. */
- union {
- REAL_VALUE_TYPE d;
- struct {
- unsigned sign : 1;
- unsigned exponent : 11;
- unsigned mantissa1 : 20;
- unsigned mantissa2;
- } little_endian;
- struct {
- unsigned mantissa2;
- unsigned mantissa1 : 20;
- unsigned exponent : 11;
- unsigned sign : 1;
- } big_endian;
- } u;
-
- u.d = dconstm1;
- if (u.big_endian.sign == 1)
- {
- u.d = x;
- return (u.big_endian.exponent == 2047
- && u.big_endian.mantissa1 == 0
- && u.big_endian.mantissa2 == 0);
- }
- else
- {
- u.d = x;
- return (u.little_endian.exponent == 2047
- && u.little_endian.mantissa1 == 0
- && u.little_endian.mantissa2 == 0);
- }
-}
-
-/* Check whether an IEEE double precision number is a NaN. */
-
-int
-target_isnan (x)
- REAL_VALUE_TYPE x;
-{
- /* The IEEE 64-bit double format. */
- union {
- REAL_VALUE_TYPE d;
- struct {
- unsigned sign : 1;
- unsigned exponent : 11;
- unsigned mantissa1 : 20;
- unsigned mantissa2;
- } little_endian;
- struct {
- unsigned mantissa2;
- unsigned mantissa1 : 20;
- unsigned exponent : 11;
- unsigned sign : 1;
- } big_endian;
- } u;
-
- u.d = dconstm1;
- if (u.big_endian.sign == 1)
- {
- u.d = x;
- return (u.big_endian.exponent == 2047
- && (u.big_endian.mantissa1 != 0
- || u.big_endian.mantissa2 != 0));
- }
- else
- {
- u.d = x;
- return (u.little_endian.exponent == 2047
- && (u.little_endian.mantissa1 != 0
- || u.little_endian.mantissa2 != 0));
- }
-}
-
-/* Check for a negative IEEE double precision number. */
-
-int
-target_negative (x)
- REAL_VALUE_TYPE x;
-{
- /* The IEEE 64-bit double format. */
- union {
- REAL_VALUE_TYPE d;
- struct {
- unsigned sign : 1;
- unsigned exponent : 11;
- unsigned mantissa1 : 20;
- unsigned mantissa2;
- } little_endian;
- struct {
- unsigned mantissa2;
- unsigned mantissa1 : 20;
- unsigned exponent : 11;
- unsigned sign : 1;
- } big_endian;
- } u;
-
- u.d = dconstm1;
- if (u.big_endian.sign == 1)
- {
- u.d = x;
- return u.big_endian.sign;
- }
- else
- {
- u.d = x;
- return u.little_endian.sign;
- }
-}
-#else /* Target not IEEE */
-
-/* Let's assume other float formats don't have infinity.
- (This can be overridden by redefining REAL_VALUE_ISINF.) */
-
-int
-target_isinf (x)
- REAL_VALUE_TYPE x ATTRIBUTE_UNUSED;
-{
- return 0;
-}
-
-/* Let's assume other float formats don't have NaNs.
- (This can be overridden by redefining REAL_VALUE_ISNAN.) */
-
-int
-target_isnan (x)
- REAL_VALUE_TYPE x ATTRIBUTE_UNUSED;
-{
- return 0;
-}
-
-/* Let's assume other float formats don't have minus zero.
- (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
-
-int
-target_negative (x)
- REAL_VALUE_TYPE x;
-{
- return x < 0;
-}
-#endif /* Target not IEEE */
-
-/* Try to change R into its exact multiplicative inverse in machine mode
- MODE. Return nonzero function value if successful. */
-
-int
-exact_real_inverse (mode, r)
- enum machine_mode mode;
- REAL_VALUE_TYPE *r;
-{
- jmp_buf float_error;
- union
- {
- double d;
- unsigned short i[4];
- }x, t, y;
-#ifdef CHECK_FLOAT_VALUE
- int i;
-#endif
-
- /* Usually disable if bounds checks are not reliable. */
- if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT) && !flag_pretend_float)
- return 0;
-
- /* Set array index to the less significant bits in the unions, depending
- on the endian-ness of the host doubles.
- Disable if insufficient information on the data structure. */
-#if HOST_FLOAT_FORMAT == UNKNOWN_FLOAT_FORMAT
- return 0;
-#else
-#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
-#define K 2
-#else
-#if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
-#define K 2
-#else
-#define K (2 * HOST_FLOAT_WORDS_BIG_ENDIAN)
-#endif
-#endif
-#endif
-
- if (setjmp (float_error))
- {
- /* Don't do the optimization if there was an arithmetic error. */
-fail:
- set_float_handler (NULL_PTR);
- return 0;
- }
- set_float_handler (float_error);
-
- /* Domain check the argument. */
- x.d = *r;
- if (x.d == 0.0)
- goto fail;
-
-#ifdef REAL_INFINITY
- if (REAL_VALUE_ISINF (x.d) || REAL_VALUE_ISNAN (x.d))
- goto fail;
-#endif
-
- /* Compute the reciprocal and check for numerical exactness.
- It is unnecessary to check all the significand bits to determine
- whether X is a power of 2. If X is not, then it is impossible for
- the bottom half significand of both X and 1/X to be all zero bits.
- Hence we ignore the data structure of the top half and examine only
- the low order bits of the two significands. */
- t.d = 1.0 / x.d;
- if (x.i[K] != 0 || x.i[K + 1] != 0 || t.i[K] != 0 || t.i[K + 1] != 0)
- goto fail;
-
- /* Truncate to the required mode and range-check the result. */
- y.d = REAL_VALUE_TRUNCATE (mode, t.d);
-#ifdef CHECK_FLOAT_VALUE
- i = 0;
- if (CHECK_FLOAT_VALUE (mode, y.d, i))
- goto fail;
-#endif
-
- /* Fail if truncation changed the value. */
- if (y.d != t.d || y.d == 0.0)
- goto fail;
-
-#ifdef REAL_INFINITY
- if (REAL_VALUE_ISINF (y.d) || REAL_VALUE_ISNAN (y.d))
- goto fail;
-#endif
-
- /* Output the reciprocal and return success flag. */
- set_float_handler (NULL_PTR);
- *r = y.d;
- return 1;
-}
-
-/* Convert C9X hexadecimal floating point string constant S. Return
- real value type in mode MODE. This function uses the host computer's
- floating point arithmetic when there is no REAL_ARITHMETIC. */
-
-REAL_VALUE_TYPE
-real_hex_to_f (s, mode)
- char *s;
- enum machine_mode mode;
-{
- REAL_VALUE_TYPE ip;
- char *p = s;
- unsigned HOST_WIDE_INT low, high;
- int shcount, nrmcount, k;
- int sign, expsign, isfloat;
- int lost = 0;/* Nonzero low order bits shifted out and discarded. */
- int frexpon = 0; /* Bits after the decimal point. */
- int expon = 0; /* Value of exponent. */
- int decpt = 0; /* How many decimal points. */
- int gotp = 0; /* How many P's. */
- char c;
-
- isfloat = 0;
- expsign = 1;
- ip = 0.0;
-
- while (*p == ' ' || *p == '\t')
- ++p;
-
- /* Sign, if any, comes first. */
- sign = 1;
- if (*p == '-')
- {
- sign = -1;
- ++p;
- }
-
- /* The string is supposed to start with 0x or 0X . */
- if (*p == '0')
- {
- ++p;
- if (*p == 'x' || *p == 'X')
- ++p;
- else
- abort ();
- }
- else
- abort ();
-
- while (*p == '0')
- ++p;
-
- high = 0;
- low = 0;
- shcount = 0;
- while ((c = *p) != '\0')
- {
- if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F')
- || (c >= 'a' && c <= 'f'))
- {
- k = c & CHARMASK;
- if (k >= 'a' && k <= 'f')
- k = k - 'a' + 10;
- else if (k >= 'A')
- k = k - 'A' + 10;
- else
- k = k - '0';
-
- if ((high & 0xf0000000) == 0)
- {
- high = (high << 4) + ((low >> 28) & 15);
- low = (low << 4) + k;
- shcount += 4;
- if (decpt)
- frexpon += 4;
- }
- else
- {
- /* Record nonzero lost bits. */
- lost |= k;
- if (! decpt)
- frexpon -= 4;
- }
- ++p;
- }
- else if (c == '.')
- {
- ++decpt;
- ++p;
- }
-
- else if (c == 'p' || c == 'P')
- {
- ++gotp;
- ++p;
- /* Sign of exponent. */
- if (*p == '-')
- {
- expsign = -1;
- ++p;
- }
-
- /* Value of exponent.
- The exponent field is a decimal integer. */
- while (ISDIGIT (*p))
- {
- k = (*p++ & CHARMASK) - '0';
- expon = 10 * expon + k;
- }
-
- expon *= expsign;
- /* F suffix is ambiguous in the significand part
- so it must appear after the decimal exponent field. */
- if (*p == 'f' || *p == 'F')
- {
- isfloat = 1;
- ++p;
- break;
- }
- }
-
- else if (c == 'l' || c == 'L')
- {
- ++p;
- break;
- }
- else
- break;
- }
-
- /* Abort if last character read was not legitimate. */
- c = *p;
- if ((c != '\0' && c != ' ' && c != '\n' && c != '\r') || (decpt > 1))
- abort ();
-
- /* There must be either one decimal point or one p. */
- if (decpt == 0 && gotp == 0)
- abort ();
-
- shcount -= 4;
- if (high == 0 && low == 0)
- return dconst0;
-
- /* Normalize. */
- nrmcount = 0;
- if (high == 0)
- {
- high = low;
- low = 0;
- nrmcount += 32;
- }
-
- /* Leave a high guard bit for carry-out. */
- if ((high & 0x80000000) != 0)
- {
- lost |= low & 1;
- low = (low >> 1) | (high << 31);
- high = high >> 1;
- nrmcount -= 1;
- }
-
- if ((high & 0xffff8000) == 0)
- {
- high = (high << 16) + ((low >> 16) & 0xffff);
- low = low << 16;
- nrmcount += 16;
- }
-
- while ((high & 0xc0000000) == 0)
- {
- high = (high << 1) + ((low >> 31) & 1);
- low = low << 1;
- nrmcount += 1;
- }
-
- if (isfloat || GET_MODE_SIZE (mode) == UNITS_PER_WORD)
- {
- /* Keep 24 bits precision, bits 0x7fffff80.
- Rounding bit is 0x40. */
- lost = lost | low | (high & 0x3f);
- low = 0;
- if (high & 0x40)
- {
- if ((high & 0x80) || lost)
- high += 0x40;
- }
- high &= 0xffffff80;
- }
- else
- {
- /* We need real.c to do long double formats, so here default
- to double precision. */
-#if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
- /* IEEE double.
- Keep 53 bits precision, bits 0x7fffffff fffffc00.
- Rounding bit is low word 0x200. */
- lost = lost | (low & 0x1ff);
- if (low & 0x200)
- {
- if ((low & 0x400) || lost)
- {
- low = (low + 0x200) & 0xfffffc00;
- if (low == 0)
- high += 1;
- }
- }
- low &= 0xfffffc00;
-#else
- /* Assume it's a VAX with 56-bit significand,
- bits 0x7fffffff ffffff80. */
- lost = lost | (low & 0x7f);
- if (low & 0x40)
- {
- if ((low & 0x80) || lost)
- {
- low = (low + 0x40) & 0xffffff80;
- if (low == 0)
- high += 1;
- }
- }
- low &= 0xffffff80;
-#endif
- }
-
- ip = (double) high;
- ip = REAL_VALUE_LDEXP (ip, 32) + (double) low;
- /* Apply shifts and exponent value as power of 2. */
- ip = REAL_VALUE_LDEXP (ip, expon - (nrmcount + frexpon));
-
- if (sign < 0)
- ip = -ip;
- return ip;
-}
-
-#endif /* no REAL_ARITHMETIC */
-\f
/* Given T, an expression, return the negation of T. Allow for T to be
null, in which case return null. */
case MINUS_EXPR:
/* - (A - B) -> B - A */
- if (! FLOAT_TYPE_P (type) || flag_fast_math)
+ if (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
return convert (type,
fold (build (MINUS_EXPR, TREE_TYPE (t),
TREE_OPERAND (t, 1),
break;
}
- return convert (type, build1 (NEGATE_EXPR, TREE_TYPE (t), t));
+ return convert (type, fold (build1 (NEGATE_EXPR, TREE_TYPE (t), t)));
}
\f
/* Split a tree IN into a constant, literal and variable parts that could be
*conp = 0;
*litp = 0;
- /* Strip any conversions that don't change the machine mode or signedness. */
+ /* Strip any conversions that don't change the machine mode or signedness. */
STRIP_SIGN_NOPS (in);
if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST)
*litp = in;
- else if (TREE_CONSTANT (in))
- *conp = in;
-
else if (TREE_CODE (in) == code
|| (! FLOAT_TYPE_P (TREE_TYPE (in))
/* We can associate addition and subtraction together (even
*conp = op1, neg_conp_p = neg1_p, op1 = 0;
/* If we haven't dealt with either operand, this is not a case we can
- decompose. Otherwise, VAR is either of the ones remaining, if any. */
+ decompose. Otherwise, VAR is either of the ones remaining, if any. */
if (op0 != 0 && op1 != 0)
var = in;
else if (op0 != 0)
if (neg_conp_p) *conp = negate_expr (*conp);
if (neg_var_p) var = negate_expr (var);
}
+ else if (TREE_CONSTANT (in))
+ *conp = in;
else
var = in;
/* Combine two integer constants ARG1 and ARG2 under operation CODE
to produce a new constant.
- If NOTRUNC is nonzero, do not truncate the result to fit the data type.
- If FORSIZE is nonzero, compute overflow for unsigned types. */
+ If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
static tree
-int_const_binop (code, arg1, arg2, notrunc, forsize)
+int_const_binop (code, arg1, arg2, notrunc)
enum tree_code code;
- register tree arg1, arg2;
- int notrunc, forsize;
+ tree arg1, arg2;
+ int notrunc;
{
unsigned HOST_WIDE_INT int1l, int2l;
HOST_WIDE_INT int1h, int2h;
HOST_WIDE_INT hi;
unsigned HOST_WIDE_INT garbagel;
HOST_WIDE_INT garbageh;
- register tree t;
- int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
+ tree t;
+ tree type = TREE_TYPE (arg1);
+ int uns = TREE_UNSIGNED (type);
+ int is_sizetype
+ = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
int overflow = 0;
int no_overflow = 0;
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
- lshift_double (int1l, int1h, int2l, TYPE_PRECISION (TREE_TYPE (arg1)),
+ lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
&low, &hi, !uns);
no_overflow = 1;
break;
case RROTATE_EXPR:
int2l = - int2l;
case LROTATE_EXPR:
- lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (TREE_TYPE (arg1)),
+ lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
&low, &hi);
break;
break;
}
- /* ... fall through ... */
+ /* ... fall through ... */
case ROUND_DIV_EXPR:
if (int2h == 0 && int2l == 1)
low = 1, hi = 0;
break;
}
- overflow = div_and_round_double (code, uns,
- int1l, int1h, int2l, int2h,
+ overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
&low, &hi, &garbagel, &garbageh);
break;
break;
}
- /* ... fall through ... */
+ /* ... fall through ... */
case ROUND_MOD_EXPR:
overflow = div_and_round_double (code, uns,
abort ();
}
- if (forsize && hi == 0 && low < 10000)
- return size_int_type_wide (low, TREE_TYPE (arg1));
+ /* If this is for a sizetype, can be represented as one (signed)
+ HOST_WIDE_INT word, and doesn't overflow, use size_int since it caches
+ constants. */
+ if (is_sizetype
+ && ((hi == 0 && (HOST_WIDE_INT) low >= 0)
+ || (hi == -1 && (HOST_WIDE_INT) low < 0))
+ && overflow == 0 && ! TREE_OVERFLOW (arg1) && ! TREE_OVERFLOW (arg2))
+ return size_int_type_wide (low, type);
else
{
t = build_int_2 (low, hi);
}
TREE_OVERFLOW (t)
- = ((notrunc ? (!uns || forsize) && overflow
- : force_fit_type (t, (!uns || forsize) && overflow) && ! no_overflow)
+ = ((notrunc
+ ? (!uns || is_sizetype) && overflow
+ : (force_fit_type (t, (!uns || is_sizetype) && overflow)
+ && ! no_overflow))
| TREE_OVERFLOW (arg1)
| TREE_OVERFLOW (arg2));
/* If we're doing a size calculation, unsigned arithmetic does overflow.
So check if force_fit_type truncated the value. */
- if (forsize
+ if (is_sizetype
&& ! TREE_OVERFLOW (t)
&& (TREE_INT_CST_HIGH (t) != hi
|| TREE_INT_CST_LOW (t) != low))
struct cb_args *args = (struct cb_args *) data;
REAL_VALUE_TYPE value;
-#ifdef REAL_ARITHMETIC
REAL_ARITHMETIC (value, args->code, args->d1, args->d2);
-#else
- switch (args->code)
- {
- case PLUS_EXPR:
- value = args->d1 + args->d2;
- break;
-
- case MINUS_EXPR:
- value = args->d1 - args->d2;
- break;
-
- case MULT_EXPR:
- value = args->d1 * args->d2;
- break;
-
- case RDIV_EXPR:
-#ifndef REAL_INFINITY
- if (args->d2 == 0)
- abort ();
-#endif
-
- value = args->d1 / args->d2;
- break;
-
- case MIN_EXPR:
- value = MIN (args->d1, args->d2);
- break;
-
- case MAX_EXPR:
- value = MAX (args->d1, args->d2);
- break;
-
- default:
- abort ();
- }
-#endif /* no REAL_ARITHMETIC */
args->t
= build_real (args->type,
static tree
const_binop (code, arg1, arg2, notrunc)
enum tree_code code;
- register tree arg1, arg2;
+ tree arg1, arg2;
int notrunc;
{
STRIP_NOPS (arg1);
STRIP_NOPS (arg2);
if (TREE_CODE (arg1) == INTEGER_CST)
- return int_const_binop (code, arg1, arg2, notrunc, 0);
+ return int_const_binop (code, arg1, arg2, notrunc);
-#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
if (TREE_CODE (arg1) == REAL_CST)
{
REAL_VALUE_TYPE d1;
args.code = code;
if (do_float_handler (const_binop_1, (PTR) &args))
- /* Receive output from const_binop_1. */
+ /* Receive output from const_binop_1. */
t = args.t;
else
{
- /* We got an exception from const_binop_1. */
+ /* We got an exception from const_binop_1. */
t = copy_node (arg1);
overflow = 1;
}
| TREE_CONSTANT_OVERFLOW (arg2);
return t;
}
-#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
if (TREE_CODE (arg1) == COMPLEX_CST)
{
- register tree type = TREE_TYPE (arg1);
- register tree r1 = TREE_REALPART (arg1);
- register tree i1 = TREE_IMAGPART (arg1);
- register tree r2 = TREE_REALPART (arg2);
- register tree i2 = TREE_IMAGPART (arg2);
- register tree t;
+ tree type = TREE_TYPE (arg1);
+ tree r1 = TREE_REALPART (arg1);
+ tree i1 = TREE_IMAGPART (arg1);
+ tree r2 = TREE_REALPART (arg2);
+ tree i2 = TREE_IMAGPART (arg2);
+ tree t;
switch (code)
{
case RDIV_EXPR:
{
- register tree magsquared
+ tree magsquared
= const_binop (PLUS_EXPR,
const_binop (MULT_EXPR, r2, r2, notrunc),
const_binop (MULT_EXPR, i2, i2, notrunc),
}
return 0;
}
+
+/* These are the hash table functions for the hash table of INTEGER_CST
+ nodes of a sizetype. */
+
+/* Return the hash code code X, an INTEGER_CST. */
+
+static hashval_t
+size_htab_hash (x)
+ const void *x;
+{
+ tree t = (tree) x;
+
+ return (TREE_INT_CST_HIGH (t) ^ TREE_INT_CST_LOW (t)
+ ^ (hashval_t) ((long) TREE_TYPE (t) >> 3)
+ ^ (TREE_OVERFLOW (t) << 20));
+}
+
+/* Return non-zero if the value represented by *X (an INTEGER_CST tree node)
+ is the same as that given by *Y, which is the same. */
+
+static int
+size_htab_eq (x, y)
+ const void *x;
+ const void *y;
+{
+ tree xt = (tree) x;
+ tree yt = (tree) y;
+
+ return (TREE_INT_CST_HIGH (xt) == TREE_INT_CST_HIGH (yt)
+ && TREE_INT_CST_LOW (xt) == TREE_INT_CST_LOW (yt)
+ && TREE_TYPE (xt) == TREE_TYPE (yt)
+ && TREE_OVERFLOW (xt) == TREE_OVERFLOW (yt));
+}
\f
/* Return an INTEGER_CST with value whose low-order HOST_BITS_PER_WIDE_INT
bits are given by NUMBER and of the sizetype represented by KIND. */
HOST_WIDE_INT number;
tree type;
{
- /* Type-size nodes already made for small sizes. */
- static tree size_table[2048 + 1];
- static int init_p = 0;
- tree t;
+ static htab_t size_htab = 0;
+ static tree new_const = 0;
+ PTR *slot;
- if (ggc_p && ! init_p)
+ if (size_htab == 0)
{
- ggc_add_tree_root ((tree *) size_table,
- sizeof size_table / sizeof (tree));
- init_p = 1;
+ size_htab = htab_create (1024, size_htab_hash, size_htab_eq, NULL);
+ ggc_add_deletable_htab (size_htab, NULL, NULL);
+ new_const = make_node (INTEGER_CST);
+ ggc_add_tree_root (&new_const, 1);
}
- /* If this is a positive number that fits in the table we use to hold
- cached entries, see if it is already in the table and put it there
- if not. */
- if (number >= 0 && number < (int) (sizeof size_table / sizeof size_table[0]))
+ /* Adjust NEW_CONST to be the constant we want. If it's already in the
+ hash table, we return the value from the hash table. Otherwise, we
+ place that in the hash table and make a new node for the next time. */
+ TREE_INT_CST_LOW (new_const) = number;
+ TREE_INT_CST_HIGH (new_const) = number < 0 ? -1 : 0;
+ TREE_TYPE (new_const) = type;
+ TREE_OVERFLOW (new_const) = TREE_CONSTANT_OVERFLOW (new_const)
+ = force_fit_type (new_const, 0);
+
+ slot = htab_find_slot (size_htab, new_const, INSERT);
+ if (*slot == 0)
{
- if (size_table[number] != 0)
- for (t = size_table[number]; t != 0; t = TREE_CHAIN (t))
- if (TREE_TYPE (t) == type)
- return t;
-
- if (! ggc_p)
- {
- /* Make this a permanent node. */
- push_obstacks_nochange ();
- end_temporary_allocation ();
- }
-
- t = build_int_2 (number, 0);
- TREE_TYPE (t) = type;
- TREE_CHAIN (t) = size_table[number];
- size_table[number] = t;
-
- if (! ggc_p)
- pop_obstacks ();
+ tree t = new_const;
+ *slot = (PTR) new_const;
+ new_const = make_node (INTEGER_CST);
return t;
}
-
- t = build_int_2 (number, number < 0 ? -1 : 0);
- TREE_TYPE (t) = type;
- TREE_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (t) = force_fit_type (t, 0);
- return t;
+ else
+ return (tree) *slot;
}
/* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
return arg1;
/* Handle general case of two integer constants. */
- return int_const_binop (code, arg0, arg1, 0, 1);
+ return int_const_binop (code, arg0, arg1, 0);
}
if (arg0 == error_mark_node || arg1 == error_mark_node)
/* This structure is used to communicate arguments to fold_convert_1. */
struct fc_args
{
- tree arg1; /* Input: value to convert. */
- tree type; /* Input: type to convert value to. */
- tree t; /* Ouput: result of conversion. */
+ tree arg1; /* Input: value to convert. */
+ tree type; /* Input: type to convert value to. */
+ tree t; /* Output: result of conversion. */
};
/* Function to convert floating-point constants, protected by floating
static tree
fold_convert (t, arg1)
- register tree t;
- register tree arg1;
+ tree t;
+ tree arg1;
{
- register tree type = TREE_TYPE (t);
+ tree type = TREE_TYPE (t);
int overflow = 0;
if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
/* If we are trying to make a sizetype for a small integer, use
size_int to pick up cached types to reduce duplicate nodes. */
- if (TREE_CODE (type) == INTEGER_CST && TYPE_IS_SIZETYPE (type)
+ if (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
+ && !TREE_CONSTANT_OVERFLOW (arg1)
&& compare_tree_int (arg1, 10000) < 0)
return size_int_type_wide (TREE_INT_CST_LOW (arg1), type);
TREE_CONSTANT_OVERFLOW (t)
= TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
}
-#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
else if (TREE_CODE (arg1) == REAL_CST)
{
/* Don't initialize these, use assignments.
/* See if X will be in range after truncation towards 0.
To compensate for truncation, move the bounds away from 0,
but reject if X exactly equals the adjusted bounds. */
-#ifdef REAL_ARITHMETIC
REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
if (!no_upper_bound)
REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
-#else
- l--;
- if (!no_upper_bound)
- u++;
-#endif
/* If X is a NaN, use zero instead and show we have an overflow.
Otherwise, range check. */
if (REAL_VALUE_ISNAN (x))
&& REAL_VALUES_LESS (x, u)))
overflow = 1;
-#ifndef REAL_ARITHMETIC
- {
- HOST_WIDE_INT low, high;
- HOST_WIDE_INT half_word
- = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
-
- if (x < 0)
- x = -x;
-
- high = (HOST_WIDE_INT) (x / half_word / half_word);
- x -= (REAL_VALUE_TYPE) high * half_word * half_word;
- if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
- {
- low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
- low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
- }
- else
- low = (HOST_WIDE_INT) x;
- if (TREE_REAL_CST (arg1) < 0)
- neg_double (low, high, &low, &high);
- t = build_int_2 (low, high);
- }
-#else
{
HOST_WIDE_INT low, high;
REAL_VALUE_TO_INT (&low, &high, x);
t = build_int_2 (low, high);
}
-#endif
TREE_TYPE (t) = type;
TREE_OVERFLOW (t)
= TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
TREE_CONSTANT_OVERFLOW (t)
= TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
}
-#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
TREE_TYPE (t) = type;
}
else if (TREE_CODE (type) == REAL_TYPE)
{
-#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
if (TREE_CODE (arg1) == INTEGER_CST)
return build_real_from_int_cst (type, arg1);
-#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
if (TREE_CODE (arg1) == REAL_CST)
{
struct fc_args args;
&& REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
TREE_REAL_CST (arg1)));
+ case VECTOR_CST:
+ {
+ tree v1, v2;
+
+ if (TREE_CONSTANT_OVERFLOW (arg0)
+ || TREE_CONSTANT_OVERFLOW (arg1))
+ return 0;
+
+ v1 = TREE_VECTOR_CST_ELTS (arg0);
+ v2 = TREE_VECTOR_CST_ELTS (arg1);
+ while (v1 && v2)
+ {
+ if (!operand_equal_p (v1, v2, only_const))
+ return 0;
+ v1 = TREE_CHAIN (v1);
+ v2 = TREE_CHAIN (v2);
+ }
+
+ return 1;
+ }
+
case COMPLEX_CST:
return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
only_const)
case 'r':
/* If either of the pointer (or reference) expressions we are dereferencing
- contain a side effect, these cannot be equal. */
+ contain a side effect, these cannot be equal. */
if (TREE_SIDE_EFFECTS (arg0)
|| TREE_SIDE_EFFECTS (arg1))
return 0;
case COMPONENT_REF:
case ARRAY_REF:
+ case ARRAY_RANGE_REF:
return (operand_equal_p (TREE_OPERAND (arg0, 0),
TREE_OPERAND (arg1, 0), 0)
&& operand_equal_p (TREE_OPERAND (arg0, 1),
if (TREE_CODE_CLASS (code) == '<')
{
if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
- && !flag_fast_math && code != NE_EXPR && code != EQ_EXPR)
+ && !flag_unsafe_math_optimizations
+ && code != NE_EXPR
+ && code != EQ_EXPR)
return build1 (TRUTH_NOT_EXPR, type, arg);
else
return build (invert_tree_comparison (code), type,
enum machine_mode lmode, rmode, nmode;
int lunsignedp, runsignedp;
int lvolatilep = 0, rvolatilep = 0;
- unsigned int alignment;
tree linner, rinner = NULL_TREE;
tree mask;
tree offset;
do anything if the inner expression is a PLACEHOLDER_EXPR since we
then will no longer be able to replace it. */
linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
- &lunsignedp, &lvolatilep, &alignment);
+ &lunsignedp, &lvolatilep);
if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
|| offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR)
return 0;
if (!const_p)
{
/* If this is not a constant, we can only do something if bit positions,
- sizes, and signedness are the same. */
+ sizes, and signedness are the same. */
rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
- &runsignedp, &rvolatilep, &alignment);
+ &runsignedp, &rvolatilep);
if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
|| lunsignedp != runsignedp || offset != 0
convert (unsigned_type, rhs),
size_int (lbitsize), 0)))
{
- warning ("comparison is always %d due to width of bitfield",
+ warning ("comparison is always %d due to width of bit-field",
code == NE_EXPR);
return convert (compare_type,
(code == NE_EXPR
size_int (lbitsize - 1), 0);
if (! integer_zerop (tem) && ! integer_all_onesp (tem))
{
- warning ("comparison is always %d due to width of bitfield",
+ warning ("comparison is always %d due to width of bit-field",
code == NE_EXPR);
return convert (compare_type,
(code == NE_EXPR
tree mask, inner, offset;
tree unsigned_type;
unsigned int precision;
- unsigned int alignment;
/* All the optimizations using this function assume integer fields.
There are problems with FP fields since the type_for_size call
}
inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
- punsignedp, pvolatilep, &alignment);
+ punsignedp, pvolatilep);
if ((inner == exp && and_mask == 0)
|| *pbitsize < 0 || offset != 0
|| TREE_CODE (inner) == PLACEHOLDER_EXPR)
the same. But, this is computer arithmetic, where numbers are finite.
We can therefore make the transformation of any unbounded range with
the value Z, Z being greater than any representable number. This permits
- us to treat unbounded ranges as equal. */
+ us to treat unbounded ranges as equal. */
sgn0 = arg0 != 0 ? 0 : (upper0_p ? 1 : -1);
sgn1 = arg1 != 0 ? 0 : (upper1_p ? 1 : -1);
switch (code)
short-circuited branch and the underlying object on both sides
is the same, make a non-short-circuit operation. */
else if (BRANCH_COST >= 2
+ && lhs != 0 && rhs != 0
&& (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
|| TREE_CODE (exp) == TRUTH_ORIF_EXPR)
&& operand_equal_p (lhs, rhs, 0))
|| TREE_CODE_CLASS (TREE_CODE (op0)) == '2'
|| TREE_CODE_CLASS (TREE_CODE (op0)) == 'e')
&& TREE_UNSIGNED (TREE_TYPE (op0))
- && ! TYPE_IS_SIZETYPE (TREE_TYPE (op0))
+ && ! (TREE_CODE (TREE_TYPE (op0)) == INTEGER_TYPE
+ && TYPE_IS_SIZETYPE (TREE_TYPE (op0)))
&& (GET_MODE_SIZE (TYPE_MODE (ctype))
> GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (op0)))))
break;
if (SAVE_EXPR_RTL (t) == 0 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (t, 0))
&& 0 != (t1 = extract_muldiv (TREE_OPERAND (t, 0), c, code,
wide_type)))
- return save_expr (t1);
+ {
+ t1 = save_expr (t1);
+ if (SAVE_EXPR_PERSISTENT_P (t) && TREE_CODE (t1) == SAVE_EXPR)
+ SAVE_EXPR_PERSISTENT_P (t1) = 1;
+ if (is_pending_size (t))
+ put_pending_size (t1);
+ return t1;
+ }
break;
case LSHIFT_EXPR: case RSHIFT_EXPR:
constant. */
t1 = extract_muldiv (op0, c, code, wide_type);
t2 = extract_muldiv (op1, c, code, wide_type);
- if (t1 != 0 && t2 != 0)
+ if (t1 != 0 && t2 != 0
+ && (code == MULT_EXPR
+ /* If not multiplication, we can only do this if either operand
+ is divisible by c. */
+ || multiple_of_p (ctype, op0, c)
+ || multiple_of_p (ctype, op1, c)))
return fold (build (tcode, ctype, convert (ctype, t1),
convert (ctype, t2)));
{
if (code == CEIL_DIV_EXPR)
code = FLOOR_DIV_EXPR;
- else if (code == CEIL_MOD_EXPR)
- code = FLOOR_MOD_EXPR;
else if (code == FLOOR_DIV_EXPR)
code = CEIL_DIV_EXPR;
- else if (code == FLOOR_MOD_EXPR)
- code = CEIL_MOD_EXPR;
- else if (code != MULT_EXPR)
+ else if (code != MULT_EXPR
+ && code != CEIL_MOD_EXPR && code != FLOOR_MOD_EXPR)
break;
}
the operation since it will change the result if the original
computation overflowed. */
if (TREE_UNSIGNED (ctype)
- && ! TYPE_IS_SIZETYPE (ctype)
+ && ! (TREE_CODE (ctype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (ctype))
&& ctype != type)
break;
/* The last case is if we are a multiply. In that case, we can
apply the distributive law to commute the multiply and addition
- if the multiplication of the constants doesn't overflow. */
+ if the multiplication of the constants doesn't overflow. */
if (code == MULT_EXPR)
return fold (build (tcode, ctype, fold (build (code, ctype,
convert (ctype, op0),
&& integer_zerop (const_binop (TRUNC_MOD_EXPR, op1, c, 0)))
return omit_one_operand (type, integer_zero_node, op0);
- /* ... fall through ... */
+ /* ... fall through ... */
case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR: case EXACT_DIV_EXPR:
multiple of the other, in which case we replace this with either an
operation or CODE or TCODE.
- If we have an unsigned type that is not a sizetype, we canot do
+ If we have an unsigned type that is not a sizetype, we cannot do
this since it will change the result if the original computation
overflowed. */
if ((! TREE_UNSIGNED (ctype)
- || TYPE_IS_SIZETYPE (ctype))
+ || (TREE_CODE (ctype) == INTEGER_TYPE && TYPE_IS_SIZETYPE (ctype)))
&& ((code == MULT_EXPR && tcode == EXACT_DIV_EXPR)
|| (tcode == MULT_EXPR
&& code != TRUNC_MOD_EXPR && code != CEIL_MOD_EXPR
tree expr;
int lim;
{
- int true, false;
+ int ctrue, cfalse;
if (TREE_CODE (expr) != COND_EXPR)
return 0;
else if (lim <= 0)
return 0;
- true = count_cond (TREE_OPERAND (expr, 1), lim - 1);
- false = count_cond (TREE_OPERAND (expr, 2), lim - 1 - true);
- return MIN (lim, 1 + true + false);
+ ctrue = count_cond (TREE_OPERAND (expr, 1), lim - 1);
+ cfalse = count_cond (TREE_OPERAND (expr, 2), lim - 1 - ctrue);
+ return MIN (lim, 1 + ctrue + cfalse);
}
+
+/* Transform `a + (b ? x : y)' into `x ? (a + b) : (a + y)'.
+ Transform, `a + (x < y)' into `(x < y) ? (a + 1) : (a + 0)'. Here
+ CODE corresponds to the `+', COND to the `(b ? x : y)' or `(x < y)'
+ expression, and ARG to `a'. If COND_FIRST_P is non-zero, then the
+ COND is the first argument to CODE; otherwise (as in the example
+ given here), it is the second argument. TYPE is the type of the
+ original expression. */
+
+static tree
+fold_binary_op_with_conditional_arg (code, type, cond, arg, cond_first_p)
+ enum tree_code code;
+ tree type;
+ tree cond;
+ tree arg;
+ int cond_first_p;
+{
+ tree test, true_value, false_value;
+ tree lhs = NULL_TREE;
+ tree rhs = NULL_TREE;
+ /* In the end, we'll produce a COND_EXPR. Both arms of the
+ conditional expression will be binary operations. The left-hand
+ side of the expression to be executed if the condition is true
+ will be pointed to by TRUE_LHS. Similarly, the right-hand side
+ of the expression to be executed if the condition is true will be
+ pointed to by TRUE_RHS. FALSE_LHS and FALSE_RHS are analogous --
+ but apply to the expression to be executed if the conditional is
+ false. */
+ tree *true_lhs;
+ tree *true_rhs;
+ tree *false_lhs;
+ tree *false_rhs;
+ /* These are the codes to use for the left-hand side and right-hand
+ side of the COND_EXPR. Normally, they are the same as CODE. */
+ enum tree_code lhs_code = code;
+ enum tree_code rhs_code = code;
+ /* And these are the types of the expressions. */
+ tree lhs_type = type;
+ tree rhs_type = type;
+
+ if (cond_first_p)
+ {
+ true_rhs = false_rhs = &arg;
+ true_lhs = &true_value;
+ false_lhs = &false_value;
+ }
+ else
+ {
+ true_lhs = false_lhs = &arg;
+ true_rhs = &true_value;
+ false_rhs = &false_value;
+ }
+
+ if (TREE_CODE (cond) == COND_EXPR)
+ {
+ test = TREE_OPERAND (cond, 0);
+ true_value = TREE_OPERAND (cond, 1);
+ false_value = TREE_OPERAND (cond, 2);
+ /* If this operand throws an expression, then it does not make
+ sense to try to perform a logical or arithmetic operation
+ involving it. Instead of building `a + throw 3' for example,
+ we simply build `a, throw 3'. */
+ if (VOID_TYPE_P (TREE_TYPE (true_value)))
+ {
+ lhs_code = COMPOUND_EXPR;
+ if (!cond_first_p)
+ lhs_type = void_type_node;
+ }
+ if (VOID_TYPE_P (TREE_TYPE (false_value)))
+ {
+ rhs_code = COMPOUND_EXPR;
+ if (!cond_first_p)
+ rhs_type = void_type_node;
+ }
+ }
+ else
+ {
+ tree testtype = TREE_TYPE (cond);
+ test = cond;
+ true_value = convert (testtype, integer_one_node);
+ false_value = convert (testtype, integer_zero_node);
+ }
+
+ /* If ARG is complex we want to make sure we only evaluate
+ it once. Though this is only required if it is volatile, it
+ might be more efficient even if it is not. However, if we
+ succeed in folding one part to a constant, we do not need
+ to make this SAVE_EXPR. Since we do this optimization
+ primarily to see if we do end up with constant and this
+ SAVE_EXPR interferes with later optimizations, suppressing
+ it when we can is important.
+
+ If we are not in a function, we can't make a SAVE_EXPR, so don't
+ try to do so. Don't try to see if the result is a constant
+ if an arm is a COND_EXPR since we get exponential behavior
+ in that case. */
+
+ if (TREE_CODE (arg) != SAVE_EXPR && ! TREE_CONSTANT (arg)
+ && global_bindings_p () == 0
+ && ((TREE_CODE (arg) != VAR_DECL
+ && TREE_CODE (arg) != PARM_DECL)
+ || TREE_SIDE_EFFECTS (arg)))
+ {
+ if (TREE_CODE (true_value) != COND_EXPR)
+ lhs = fold (build (lhs_code, lhs_type, *true_lhs, *true_rhs));
+
+ if (TREE_CODE (false_value) != COND_EXPR)
+ rhs = fold (build (rhs_code, rhs_type, *false_lhs, *false_rhs));
+
+ if ((lhs == 0 || ! TREE_CONSTANT (lhs))
+ && (rhs == 0 || !TREE_CONSTANT (rhs)))
+ arg = save_expr (arg), lhs = rhs = 0;
+ }
+
+ if (lhs == 0)
+ lhs = fold (build (lhs_code, lhs_type, *true_lhs, *true_rhs));
+ if (rhs == 0)
+ rhs = fold (build (rhs_code, rhs_type, *false_lhs, *false_rhs));
+
+ test = fold (build (COND_EXPR, type, test, lhs, rhs));
+
+ if (TREE_CODE (arg) == SAVE_EXPR)
+ return build (COMPOUND_EXPR, type,
+ convert (void_type_node, arg),
+ strip_compound_expr (test, arg));
+ else
+ return convert (type, test);
+}
+
\f
/* Perform constant folding and related simplification of EXPR.
The related simplifications include x*1 => x, x*0 => 0, etc.,
fold (expr)
tree expr;
{
- register tree t = expr;
+ tree t = expr;
tree t1 = NULL_TREE;
tree tem;
tree type = TREE_TYPE (expr);
- register tree arg0 = NULL_TREE, arg1 = NULL_TREE;
- register enum tree_code code = TREE_CODE (t);
- register int kind;
+ tree arg0 = NULL_TREE, arg1 = NULL_TREE;
+ enum tree_code code = TREE_CODE (t);
+ int kind = TREE_CODE_CLASS (code);
int invert;
/* WINS will be nonzero when the switch is done
if all operands are constant. */
/* Don't try to process an RTL_EXPR since its operands aren't trees.
Likewise for a SAVE_EXPR that's already been evaluated. */
- if (code == RTL_EXPR || (code == SAVE_EXPR && SAVE_EXPR_RTL (t)) != 0)
+ if (code == RTL_EXPR || (code == SAVE_EXPR && SAVE_EXPR_RTL (t) != 0))
return t;
- /* Return right away if already constant. */
- if (TREE_CONSTANT (t))
- {
- if (code == CONST_DECL)
- return DECL_INITIAL (t);
- return t;
- }
+ /* Return right away if a constant. */
+ if (kind == 'c')
+ return t;
#ifdef MAX_INTEGER_COMPUTATION_MODE
check_max_integer_computation_mode (expr);
#endif
- kind = TREE_CODE_CLASS (code);
if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
{
tree subop;
subop = arg0;
if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
-#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
&& TREE_CODE (subop) != REAL_CST
-#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
)
/* Note that TREE_CONSTANT isn't enough:
static var addresses are constant but we can't
}
else if (IS_EXPR_CODE_CLASS (kind) || kind == 'r')
{
- register int len = TREE_CODE_LENGTH (code);
- register int i;
+ int len = first_rtl_op (code);
+ int i;
for (i = 0; i < len; i++)
{
tree op = TREE_OPERAND (t, i);
subop = op;
if (TREE_CODE (subop) != INTEGER_CST
-#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
- && TREE_CODE (subop) != REAL_CST
-#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
- )
+ && TREE_CODE (subop) != REAL_CST)
/* Note that TREE_CONSTANT isn't enough:
static var addresses are constant but we can't
do arithmetic on them. */
&& (! TREE_SIDE_EFFECTS (arg0)
|| (global_bindings_p () == 0
&& ! contains_placeholder_p (arg0))))
- {
- tree test, true_value, false_value;
- tree lhs = 0, rhs = 0;
-
- if (TREE_CODE (arg1) == COND_EXPR)
- {
- test = TREE_OPERAND (arg1, 0);
- true_value = TREE_OPERAND (arg1, 1);
- false_value = TREE_OPERAND (arg1, 2);
- }
- else
- {
- tree testtype = TREE_TYPE (arg1);
- test = arg1;
- true_value = convert (testtype, integer_one_node);
- false_value = convert (testtype, integer_zero_node);
- }
-
- /* If ARG0 is complex we want to make sure we only evaluate
- it once. Though this is only required if it is volatile, it
- might be more efficient even if it is not. However, if we
- succeed in folding one part to a constant, we do not need
- to make this SAVE_EXPR. Since we do this optimization
- primarily to see if we do end up with constant and this
- SAVE_EXPR interferes with later optimizations, suppressing
- it when we can is important.
-
- If we are not in a function, we can't make a SAVE_EXPR, so don't
- try to do so. Don't try to see if the result is a constant
- if an arm is a COND_EXPR since we get exponential behavior
- in that case. */
-
- if (TREE_CODE (arg0) != SAVE_EXPR && ! TREE_CONSTANT (arg0)
- && global_bindings_p () == 0
- && ((TREE_CODE (arg0) != VAR_DECL
- && TREE_CODE (arg0) != PARM_DECL)
- || TREE_SIDE_EFFECTS (arg0)))
- {
- if (TREE_CODE (true_value) != COND_EXPR)
- lhs = fold (build (code, type, arg0, true_value));
-
- if (TREE_CODE (false_value) != COND_EXPR)
- rhs = fold (build (code, type, arg0, false_value));
-
- if ((lhs == 0 || ! TREE_CONSTANT (lhs))
- && (rhs == 0 || !TREE_CONSTANT (rhs)))
- arg0 = save_expr (arg0), lhs = rhs = 0;
- }
-
- if (lhs == 0)
- lhs = fold (build (code, type, arg0, true_value));
- if (rhs == 0)
- rhs = fold (build (code, type, arg0, false_value));
-
- test = fold (build (COND_EXPR, type, test, lhs, rhs));
-
- if (TREE_CODE (arg0) == SAVE_EXPR)
- return build (COMPOUND_EXPR, type,
- convert (void_type_node, arg0),
- strip_compound_expr (test, arg0));
- else
- return convert (type, test);
- }
-
+ return
+ fold_binary_op_with_conditional_arg (code, type, arg1, arg0,
+ /*cond_first_p=*/0);
else if (TREE_CODE (arg0) == COMPOUND_EXPR)
return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
&& (! TREE_SIDE_EFFECTS (arg1)
|| (global_bindings_p () == 0
&& ! contains_placeholder_p (arg1))))
- {
- tree test, true_value, false_value;
- tree lhs = 0, rhs = 0;
-
- if (TREE_CODE (arg0) == COND_EXPR)
- {
- test = TREE_OPERAND (arg0, 0);
- true_value = TREE_OPERAND (arg0, 1);
- false_value = TREE_OPERAND (arg0, 2);
- }
- else
- {
- tree testtype = TREE_TYPE (arg0);
- test = arg0;
- true_value = convert (testtype, integer_one_node);
- false_value = convert (testtype, integer_zero_node);
- }
-
- if (TREE_CODE (arg1) != SAVE_EXPR && ! TREE_CONSTANT (arg0)
- && global_bindings_p () == 0
- && ((TREE_CODE (arg1) != VAR_DECL
- && TREE_CODE (arg1) != PARM_DECL)
- || TREE_SIDE_EFFECTS (arg1)))
- {
- if (TREE_CODE (true_value) != COND_EXPR)
- lhs = fold (build (code, type, true_value, arg1));
-
- if (TREE_CODE (false_value) != COND_EXPR)
- rhs = fold (build (code, type, false_value, arg1));
-
- if ((lhs == 0 || ! TREE_CONSTANT (lhs))
- && (rhs == 0 || !TREE_CONSTANT (rhs)))
- arg1 = save_expr (arg1), lhs = rhs = 0;
- }
-
- if (lhs == 0)
- lhs = fold (build (code, type, true_value, arg1));
-
- if (rhs == 0)
- rhs = fold (build (code, type, false_value, arg1));
-
- test = fold (build (COND_EXPR, type, test, lhs, rhs));
- if (TREE_CODE (arg1) == SAVE_EXPR)
- return build (COMPOUND_EXPR, type,
- convert (void_type_node, arg1),
- strip_compound_expr (test, arg1));
- else
- return convert (type, test);
- }
+ return
+ fold_binary_op_with_conditional_arg (code, type, arg0, arg1,
+ /*cond_first_p=*/1);
}
else if (TREE_CODE_CLASS (code) == '<'
&& TREE_CODE (arg0) == COMPOUND_EXPR)
{
case INTEGER_CST:
case REAL_CST:
+ case VECTOR_CST:
case STRING_CST:
case COMPLEX_CST:
case CONSTRUCTOR:
handled below, if we are converting something to its own
type via an object of identical or wider precision, neither
conversion is needed. */
- if (inside_type == final_type
+ if (TYPE_MAIN_VARIANT (inside_type) == TYPE_MAIN_VARIANT (final_type)
&& ((inter_int && final_int) || (inter_float && final_float))
&& inter_prec >= final_prec)
- return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
+ return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
/* Likewise, if the intermediate and final types are either both
float or both integer, we don't need the middle conversion if
}
return fold_convert (t, arg0);
+ case VIEW_CONVERT_EXPR:
+ if (TREE_CODE (TREE_OPERAND (t, 0)) == VIEW_CONVERT_EXPR)
+ return build1 (VIEW_CONVERT_EXPR, type,
+ TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+ return t;
+
#if 0 /* This loses on &"foo"[0]. */
case ARRAY_REF:
{
/* Convert - (a - b) to (b - a) for non-floating-point. */
else if (TREE_CODE (arg0) == MINUS_EXPR
- && (! FLOAT_TYPE_P (type) || flag_fast_math))
+ && (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations))
return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
TREE_OPERAND (arg0, 0));
{
if (TREE_CODE (arg0) == INTEGER_CST)
{
- if (! TREE_UNSIGNED (type)
- && TREE_INT_CST_HIGH (arg0) < 0)
+ /* If the value is unsigned, then the absolute value is
+ the same as the ordinary value. */
+ if (TREE_UNSIGNED (type))
+ return arg0;
+ /* Similarly, if the value is non-negative. */
+ else if (INT_CST_LT (integer_minus_one_node, arg0))
+ return arg0;
+ /* If the value is negative, then the absolute value is
+ its negation. */
+ else
{
unsigned HOST_WIDE_INT low;
HOST_WIDE_INT high;
TREE_OPERAND (arg0, 0),
negate_expr (TREE_OPERAND (arg0, 1)));
else if (TREE_CODE (arg0) == COMPLEX_CST)
- return build_complex (type, TREE_OPERAND (arg0, 0),
- negate_expr (TREE_OPERAND (arg0, 1)));
+ return build_complex (type, TREE_REALPART (arg0),
+ negate_expr (TREE_IMAGPART (arg0)));
else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
return fold (build (TREE_CODE (arg0), type,
fold (build1 (CONJ_EXPR, type,
}
/* In IEEE floating point, x+0 may not equal x. */
else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || flag_fast_math)
+ || flag_unsafe_math_optimizations)
&& real_zerop (arg1))
return non_lvalue (convert (type, arg0));
/* x+(-0) equals x, even for IEEE. */
/* (A << B) + (A >> (Z - B)) if A is unsigned and Z is the size of A
is a rotate of A by B bits. */
{
- register enum tree_code code0, code1;
+ enum tree_code code0, code1;
code0 = TREE_CODE (arg0);
code1 = TREE_CODE (arg1);
if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR)
TREE_OPERAND (arg1, 0), 0)
&& TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
{
- register tree tree01, tree11;
- register enum tree_code code01, code11;
+ tree tree01, tree11;
+ enum tree_code code01, code11;
tree01 = TREE_OPERAND (arg0, 1);
tree11 = TREE_OPERAND (arg1, 1);
parentheses. Rather than remember where the parentheses were, we
don't associate floats at all. It shouldn't matter much. However,
associating multiplications is only very slightly inaccurate, so do
- that if -ffast-math is specified. */
+ that if -funsafe-math-optimizations is specified. */
if (! wins
&& (! FLOAT_TYPE_P (type)
- || (flag_fast_math && code != MULT_EXPR)))
+ || (flag_unsafe_math_optimizations && code == MULT_EXPR)))
{
tree var0, con0, lit0, var1, con1, lit1;
associate each group together, the constants with literals,
then the result with variables. This increases the chances of
literals being recombined later and of generating relocatable
- expressions for the sum of a constant and literal. */
+ expressions for the sum of a constant and literal. */
var0 = split_tree (arg0, code, &con0, &lit0, 0);
var1 = split_tree (arg1, code, &con1, &lit1, code == MINUS_EXPR);
}
binary:
-#if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
- if (TREE_CODE (arg1) == REAL_CST)
- return t;
-#endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
if (wins)
t1 = const_binop (code, arg0, arg1, 0);
if (t1 != NULL_TREE)
if (! FLOAT_TYPE_P (type))
{
if (! wins && integer_zerop (arg0))
- return convert (type, negate_expr (arg1));
+ return negate_expr (convert (type, arg1));
if (integer_zerop (arg1))
return non_lvalue (convert (type, arg0));
}
else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || flag_fast_math)
+ || flag_unsafe_math_optimizations)
{
/* Except with IEEE floating point, 0-x equals -x. */
if (! wins && real_zerop (arg0))
- return convert (type, negate_expr (arg1));
+ return negate_expr (convert (type, arg1));
/* Except with IEEE floating point, x-0 equals x. */
if (real_zerop (arg1))
return non_lvalue (convert (type, arg0));
Also note that operand_equal_p is always false if an operand
is volatile. */
- if ((! FLOAT_TYPE_P (type) || flag_fast_math)
+ if ((! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
&& operand_equal_p (arg0, arg1, 0))
return convert (type, integer_zero_node);
{
/* x*0 is 0, except for IEEE floating point. */
if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || flag_fast_math)
+ || flag_unsafe_math_optimizations)
&& real_zerop (arg1))
return omit_one_operand (type, arg1, arg0);
/* In IEEE floating point, x*1 is not equivalent to x for snans.
case RDIV_EXPR:
/* In most cases, do nothing with a divide by zero. */
-#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
#ifndef REAL_INFINITY
if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
return t;
#endif
-#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
/* (-A) / (-B) -> A / B */
if (TREE_CODE (arg0) == NEGATE_EXPR && TREE_CODE (arg1) == NEGATE_EXPR)
/* If ARG1 is a constant, we can convert this to a multiply by the
reciprocal. This does not have the same rounding properties,
- so only do this if -ffast-math. We can actually always safely
- do it if ARG1 is a power of two, but it's hard to tell if it is
- or not in a portable manner. */
+ so only do this if -funsafe-math-optimizations. We can actually
+ always safely do it if ARG1 is a power of two, but it's hard to
+ tell if it is or not in a portable manner. */
if (TREE_CODE (arg1) == REAL_CST)
{
- if (flag_fast_math
+ if (flag_unsafe_math_optimizations
&& 0 != (tem = const_binop (code, build_real (type, dconst1),
arg1, 0)))
return fold (build (MULT_EXPR, type, arg0, tem));
- /* Find the reciprocal if optimizing and the result is exact. */
+ /* Find the reciprocal if optimizing and the result is exact. */
else if (optimize)
{
REAL_VALUE_TYPE r;
}
}
}
+ /* Convert A/B/C to A/(B*C). */
+ if (flag_unsafe_math_optimizations
+ && TREE_CODE (arg0) == RDIV_EXPR)
+ {
+ return fold (build (RDIV_EXPR, type, TREE_OPERAND (arg0, 0),
+ build (MULT_EXPR, type, TREE_OPERAND (arg0, 1),
+ arg1)));
+ }
+ /* Convert A/(B/C) to (A/B)*C. */
+ if (flag_unsafe_math_optimizations
+ && TREE_CODE (arg1) == RDIV_EXPR)
+ {
+ return fold (build (MULT_EXPR, type,
+ build (RDIV_EXPR, type, arg0,
+ TREE_OPERAND (arg1, 0)),
+ TREE_OPERAND (arg1, 1)));
+ }
goto binary;
case TRUNC_DIV_EXPR:
/* If either arg is constant true, drop it. */
if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
return non_lvalue (convert (type, arg1));
- if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
+ if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1)
+ /* Preserve sequence points. */
+ && (code != TRUTH_ANDIF_EXPR || ! TREE_SIDE_EFFECTS (arg0)))
return non_lvalue (convert (type, arg0));
/* If second arg is constant zero, result is zero, but first arg
must be evaluated. */
truth and/or operations and the transformation will still be
valid. Also note that we only care about order for the
ANDIF and ORIF operators. If B contains side effects, this
- might change the truth-value of A. */
+ might change the truth-value of A. */
if (TREE_CODE (arg0) == TREE_CODE (arg1)
&& (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
|| TREE_CODE (arg0) == TRUTH_ORIF_EXPR
/* If either arg is constant zero, drop it. */
if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
return non_lvalue (convert (type, arg1));
- if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
+ if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1)
+ /* Preserve sequence points. */
+ && (code != TRUTH_ORIF_EXPR || ! TREE_SIDE_EFFECTS (arg0)))
return non_lvalue (convert (type, arg0));
/* If second arg is constant true, result is true, but we must
evaluate first arg. */
case EQ_EXPR:
case GE_EXPR:
case LE_EXPR:
- if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ if (! FLOAT_TYPE_P (TREE_TYPE (arg0)))
return constant_boolean_node (1, type);
code = EQ_EXPR;
TREE_SET_CODE (t, code);
case NE_EXPR:
/* For NE, we can only do this simplification if integer. */
- if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ if (FLOAT_TYPE_P (TREE_TYPE (arg0)))
break;
/* ... fall through ... */
case GT_EXPR:
}
else if (TREE_INT_CST_HIGH (arg1) == -1
- && (- TREE_INT_CST_LOW (arg1)
+ && (TREE_INT_CST_LOW (arg1)
== ((unsigned HOST_WIDE_INT) 1 << (width - 1)))
&& ! TREE_UNSIGNED (TREE_TYPE (arg1)))
switch (TREE_CODE (t))
}
else if (TREE_INT_CST_HIGH (arg1) == 0
- && (TREE_INT_CST_LOW (arg1)
- == ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1)
- && TREE_UNSIGNED (TREE_TYPE (arg1)))
-
+ && (TREE_INT_CST_LOW (arg1)
+ == ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1)
+ && TREE_UNSIGNED (TREE_TYPE (arg1))
+ /* signed_type does not work on pointer types. */
+ && INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
switch (TREE_CODE (t))
{
case LE_EXPR:
default:
break;
}
+
+ else if (TREE_INT_CST_HIGH (arg1) == 0
+ && (TREE_INT_CST_LOW (arg1)
+ == ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1)
+ && TREE_UNSIGNED (TREE_TYPE (arg1)))
+ switch (TREE_CODE (t))
+ {
+ case GT_EXPR:
+ return omit_one_operand (type,
+ convert (type, integer_zero_node),
+ arg0);
+ case GE_EXPR:
+ TREE_SET_CODE (t, EQ_EXPR);
+ break;
+
+ case LE_EXPR:
+ return omit_one_operand (type,
+ convert (type, integer_one_node),
+ arg0);
+ case LT_EXPR:
+ TREE_SET_CODE (t, NE_EXPR);
+ break;
+
+ default:
+ break;
+ }
}
}
fold (build (code, type, imag0, imag1))));
}
+ /* Optimize comparisons of strlen vs zero to a compare of the
+ first character of the string vs zero. To wit,
+ strlen(ptr) == 0 => *ptr == 0
+ strlen(ptr) != 0 => *ptr != 0
+ Other cases should reduce to one of these two (or a constant)
+ due to the return value of strlen being unsigned. */
+ if ((code == EQ_EXPR || code == NE_EXPR)
+ && integer_zerop (arg1)
+ && TREE_CODE (arg0) == CALL_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 0)) == ADDR_EXPR)
+ {
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0);
+ tree arglist;
+
+ if (TREE_CODE (fndecl) == FUNCTION_DECL
+ && DECL_BUILT_IN (fndecl)
+ && DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD
+ && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_STRLEN
+ && (arglist = TREE_OPERAND (arg0, 1))
+ && TREE_CODE (TREE_TYPE (TREE_VALUE (arglist))) == POINTER_TYPE
+ && ! TREE_CHAIN (arglist))
+ return fold (build (code, type,
+ build1 (INDIRECT_REF, char_type_node,
+ TREE_VALUE(arglist)),
+ integer_zero_node));
+ }
+
/* From here on, the only cases we handle are when the result is
known to be a constant.
if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
&& (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
|| ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
- || flag_fast_math)
+ || flag_unsafe_math_optimizations)
&& operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
arg1, TREE_OPERAND (arg0, 1)))
{
STRIP_NOPS (arg2);
- /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
+ /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or -abs (A),
depending on the comparison operation. */
if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
? real_zerop (TREE_OPERAND (arg0, 1))
{
case EQ_EXPR:
return
- pedantic_non_lvalue (convert (type, negate_expr (arg1)));
+ pedantic_non_lvalue
+ (convert (type,
+ negate_expr
+ (convert (TREE_TYPE (TREE_OPERAND (t, 1)),
+ arg1))));
+
case NE_EXPR:
return pedantic_non_lvalue (convert (type, arg1));
case GE_EXPR:
return t;
}
+ case CALL_EXPR:
+ /* Check for a built-in function. */
+ if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
+ && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (expr, 0), 0))
+ == FUNCTION_DECL)
+ && DECL_BUILT_IN (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
+ {
+ tree tmp = fold_builtin (expr);
+ if (tmp)
+ return tmp;
+ }
+ return t;
+
default:
return t;
} /* switch (code) */
return (multiple_of_p (type, TREE_OPERAND (top, 0), bottom)
&& multiple_of_p (type, TREE_OPERAND (top, 1), bottom));
+ case LSHIFT_EXPR:
+ if (TREE_CODE (TREE_OPERAND (top, 1)) == INTEGER_CST)
+ {
+ tree op1, t1;
+
+ op1 = TREE_OPERAND (top, 1);
+ /* const_binop may not detect overflow correctly,
+ so check for it explicitly here. */
+ if (TYPE_PRECISION (TREE_TYPE (size_one_node))
+ > TREE_INT_CST_LOW (op1)
+ && TREE_INT_CST_HIGH (op1) == 0
+ && 0 != (t1 = convert (type,
+ const_binop (LSHIFT_EXPR, size_one_node,
+ op1, 0)))
+ && ! TREE_OVERFLOW (t1))
+ return multiple_of_p (type, t1, bottom);
+ }
+ return 0;
+
case NOP_EXPR:
/* Can't handle conversions from non-integral or wider integral type. */
if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (top, 0))) != INTEGER_TYPE)
< TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (top, 0)))))
return 0;
- /* .. fall through ... */
+ /* .. fall through ... */
case SAVE_EXPR:
return multiple_of_p (type, TREE_OPERAND (top, 0), bottom);
case INTEGER_CST:
- if ((TREE_CODE (bottom) != INTEGER_CST)
- || (tree_int_cst_sgn (top) < 0)
- || (tree_int_cst_sgn (bottom) < 0))
+ if (TREE_CODE (bottom) != INTEGER_CST
+ || (TREE_UNSIGNED (type)
+ && (tree_int_cst_sgn (top) < 0
+ || tree_int_cst_sgn (bottom) < 0)))
return 0;
return integer_zerop (const_binop (TRUNC_MOD_EXPR,
top, bottom, 0));
{
switch (TREE_CODE (t))
{
+ case ABS_EXPR:
+ case FFS_EXPR:
+ return 1;
case INTEGER_CST:
return tree_int_cst_sgn (t) >= 0;
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0))
+ && tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case TRUNC_MOD_EXPR:
+ case CEIL_MOD_EXPR:
+ case FLOOR_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0));
case COND_EXPR:
return tree_expr_nonnegative_p (TREE_OPERAND (t, 1))
&& tree_expr_nonnegative_p (TREE_OPERAND (t, 2));
+ case COMPOUND_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case MIN_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0))
+ && tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case MAX_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0))
+ || tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case MODIFY_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
case BIND_EXPR:
return tree_expr_nonnegative_p (TREE_OPERAND (t, 1));
+ case SAVE_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0));
+ case NON_LVALUE_EXPR:
+ return tree_expr_nonnegative_p (TREE_OPERAND (t, 0));
case RTL_EXPR:
return rtl_expr_nonnegative_p (RTL_EXPR_RTL (t));
default:
- /* We don't know sign of `t', so be safe and return false. */
- return 0;
+ if (truth_value_p (TREE_CODE (t)))
+ /* Truth values evaluate to 0 or 1, which is nonnegative. */
+ return 1;
+ else
+ /* We don't know sign of `t', so be conservative and return false. */
+ return 0;
}
}
return CONST_DOUBLE_HIGH (r) >= 0;
return 0;
+ case CONST_VECTOR:
+ {
+ int units, i;
+ rtx elt;
+
+ units = CONST_VECTOR_NUNITS (r);
+
+ for (i = 0; i < units; ++i)
+ {
+ elt = CONST_VECTOR_ELT (r, i);
+ if (!rtl_expr_nonnegative_p (elt))
+ return 0;
+ }
+
+ return 1;
+ }
+
case SYMBOL_REF:
case LABEL_REF:
/* These are always nonnegative. */