/* Fold a constant sub-tree into a single node for C-compiler
- Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
+ Copyright (C) 1987, 88, 92, 93, 94, 95, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
-
-/*@@ Fix lossage on folding division of big integers. */
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
/*@@ This file should be rewritten to use an arbitrary precision
@@ representation for "struct tree_int_cst" and "struct tree_real_cst".
/* Handle floating overflow for `const_binop'. */
static jmp_buf float_error;
-static void encode PROTO((short *, HOST_WIDE_INT, HOST_WIDE_INT));
-static void decode PROTO((short *, HOST_WIDE_INT *, HOST_WIDE_INT *));
-static int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
+static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
+static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
+int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT, HOST_WIDE_INT *,
HOST_WIDE_INT *, HOST_WIDE_INT *,
static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
static tree eval_subst PROTO((tree, tree, tree, tree, tree));
static tree omit_one_operand PROTO((tree, tree, tree));
+static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
tree, tree));
static tree decode_field_reference PROTO((tree, int *, int *,
enum machine_mode *, int *,
- int *, tree *));
+ int *, tree *, tree *));
static int all_ones_mask_p PROTO((tree, int));
static int simple_operand_p PROTO((tree));
static tree range_test PROTO((enum tree_code, tree, enum tree_code,
enum tree_code, tree, tree, tree));
+static tree unextend PROTO((tree, int, int, tree));
static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
+static tree strip_compound_expr PROTO((tree, tree));
#ifndef BRANCH_COST
#define BRANCH_COST 1
#endif
-/* Yield nonzero if a signed left shift of A by B bits overflows. */
-#define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
-
/* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
Then this yields nonzero if overflow occurred during the addition.
#define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
\f
/* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
- We do that by representing the two-word integer as MAX_SHORTS shorts,
- with only 8 bits stored in each short, as a positive number. */
+ We do that by representing the two-word integer in 4 words, with only
+ HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
+
+#define LOWPART(x) \
+ ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
+#define HIGHPART(x) \
+ ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
+#define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
-/* Unpack a two-word integer into MAX_SHORTS shorts.
+/* Unpack a two-word integer into 4 words.
LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
- SHORTS points to the array of shorts. */
+ WORDS points to the array of HOST_WIDE_INTs. */
static void
-encode (shorts, low, hi)
- short *shorts;
+encode (words, low, hi)
+ HOST_WIDE_INT *words;
HOST_WIDE_INT low, hi;
{
- register int i;
-
- for (i = 0; i < MAX_SHORTS / 2; i++)
- {
- shorts[i] = (low >> (i * 8)) & 0xff;
- shorts[i + MAX_SHORTS / 2] = (hi >> (i * 8) & 0xff);
- }
+ words[0] = LOWPART (low);
+ words[1] = HIGHPART (low);
+ words[2] = LOWPART (hi);
+ words[3] = HIGHPART (hi);
}
-/* Pack an array of MAX_SHORTS shorts into a two-word integer.
- SHORTS points to the array of shorts.
+/* Pack an array of 4 words into a two-word integer.
+ WORDS points to the array of words.
The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
static void
-decode (shorts, low, hi)
- short *shorts;
+decode (words, low, hi)
+ HOST_WIDE_INT *words;
HOST_WIDE_INT *low, *hi;
{
- register int i;
- HOST_WIDE_INT lv = 0, hv = 0;
-
- for (i = 0; i < MAX_SHORTS / 2; i++)
- {
- lv |= (HOST_WIDE_INT) shorts[i] << (i * 8);
- hv |= (HOST_WIDE_INT) shorts[i + MAX_SHORTS / 2] << (i * 8);
- }
-
- *low = lv, *hi = hv;
+ *low = words[0] | words[1] * BASE;
+ *hi = words[2] | words[3] * BASE;
}
\f
/* Make the integer constant T valid for its type
/* Unsigned types do not suffer sign extension or overflow. */
if (TREE_UNSIGNED (TREE_TYPE (t)))
- return 0;
+ return overflow;
/* If the value's sign bit is set, extend the sign. */
if (prec != 2 * HOST_BITS_PER_WIDE_INT
/* Add two doubleword integers with doubleword result.
Each argument is given as two `HOST_WIDE_INT' pieces.
One argument is L1 and H1; the other, L2 and H2.
- The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
- We use the 8-shorts representation internally. */
+ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int
add_double (l1, h1, l2, h2, lv, hv)
HOST_WIDE_INT l1, h1, l2, h2;
HOST_WIDE_INT *lv, *hv;
{
- short arg1[MAX_SHORTS];
- short arg2[MAX_SHORTS];
- register int carry = 0;
- register int i;
+ HOST_WIDE_INT l, h;
- encode (arg1, l1, h1);
- encode (arg2, l2, h2);
+ l = l1 + l2;
+ h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
- for (i = 0; i < MAX_SHORTS; i++)
- {
- carry += arg1[i] + arg2[i];
- arg1[i] = carry & 0xff;
- carry >>= 8;
- }
-
- decode (arg1, lv, hv);
- return overflow_sum_sign (h1, h2, *hv);
+ *lv = l;
+ *hv = h;
+ return overflow_sum_sign (h1, h2, h);
}
/* Negate a doubleword integer with doubleword result.
Return nonzero if the operation overflows, assuming it's signed.
The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
- The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
- We use the 8-shorts representation internally. */
+ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int
neg_double (l1, h1, lv, hv)
Return nonzero if the operation overflows, assuming it's signed.
Each argument is given as two `HOST_WIDE_INT' pieces.
One argument is L1 and H1; the other, L2 and H2.
- The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
- We use the 8-shorts representation internally. */
+ The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int
mul_double (l1, h1, l2, h2, lv, hv)
HOST_WIDE_INT l1, h1, l2, h2;
HOST_WIDE_INT *lv, *hv;
{
- short arg1[MAX_SHORTS];
- short arg2[MAX_SHORTS];
- short prod[MAX_SHORTS * 2];
- register int carry = 0;
+ HOST_WIDE_INT arg1[4];
+ HOST_WIDE_INT arg2[4];
+ HOST_WIDE_INT prod[4 * 2];
+ register unsigned HOST_WIDE_INT carry;
register int i, j, k;
HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
- /* These cases are used extensively, arising from pointer combinations. */
- if (h2 == 0)
- {
- if (l2 == 2)
- {
- int overflow = left_shift_overflows (h1, 1);
- unsigned HOST_WIDE_INT temp = l1 + l1;
- *hv = (h1 << 1) + (temp < l1);
- *lv = temp;
- return overflow;
- }
- if (l2 == 4)
- {
- int overflow = left_shift_overflows (h1, 2);
- unsigned HOST_WIDE_INT temp = l1 + l1;
- h1 = (h1 << 2) + ((temp < l1) << 1);
- l1 = temp;
- temp += temp;
- h1 += (temp < l1);
- *lv = temp;
- *hv = h1;
- return overflow;
- }
- if (l2 == 8)
- {
- int overflow = left_shift_overflows (h1, 3);
- unsigned HOST_WIDE_INT temp = l1 + l1;
- h1 = (h1 << 3) + ((temp < l1) << 2);
- l1 = temp;
- temp += temp;
- h1 += (temp < l1) << 1;
- l1 = temp;
- temp += temp;
- h1 += (temp < l1);
- *lv = temp;
- *hv = h1;
- return overflow;
- }
- }
-
encode (arg1, l1, h1);
encode (arg2, l2, h2);
- bzero (prod, sizeof prod);
+ bzero ((char *) prod, sizeof prod);
- for (i = 0; i < MAX_SHORTS; i++)
- for (j = 0; j < MAX_SHORTS; j++)
- {
- k = i + j;
- carry = arg1[i] * arg2[j];
- while (carry)
- {
- carry += prod[k];
- prod[k] = carry & 0xff;
- carry >>= 8;
- k++;
- }
- }
+ for (i = 0; i < 4; i++)
+ {
+ carry = 0;
+ for (j = 0; j < 4; j++)
+ {
+ k = i + j;
+ /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
+ carry += arg1[i] * arg2[j];
+ /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
+ carry += prod[k];
+ prod[k] = LOWPART (carry);
+ carry = HIGHPART (carry);
+ }
+ prod[i + 4] = carry;
+ }
- decode (prod, lv, hv); /* This ignores
- prod[MAX_SHORTS] -> prod[MAX_SHORTS*2-1] */
+ decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
/* Check for overflow by calculating the top half of the answer in full;
it should agree with the low half's sign bit. */
- decode (prod+MAX_SHORTS, &toplow, &tophigh);
+ decode (prod+4, &toplow, &tophigh);
if (h1 < 0)
{
neg_double (l2, h2, &neglow, &neghigh);
HOST_WIDE_INT *lv, *hv;
int arith;
{
- short arg1[MAX_SHORTS];
- register int i;
- register int carry;
-
if (count < 0)
{
rshift_double (l1, h1, - count, prec, lv, hv, arith);
return;
}
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ count %= prec;
+#endif
- encode (arg1, l1, h1);
-
- if (count > prec)
- count = prec;
-
- while (count > 0)
+ if (count >= HOST_BITS_PER_WIDE_INT)
{
- carry = 0;
- for (i = 0; i < MAX_SHORTS; i++)
- {
- carry += arg1[i] << 1;
- arg1[i] = carry & 0xff;
- carry >>= 8;
- }
- count--;
+ *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
+ *lv = 0;
+ }
+ else
+ {
+ *hv = (((unsigned HOST_WIDE_INT) h1 << count)
+ | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
+ *lv = (unsigned HOST_WIDE_INT) l1 << count;
}
-
- decode (arg1, lv, hv);
}
/* Shift the doubleword integer in L1, H1 right by COUNT places
HOST_WIDE_INT *lv, *hv;
int arith;
{
- short arg1[MAX_SHORTS];
- register int i;
- register int carry;
-
- encode (arg1, l1, h1);
-
- if (count > prec)
- count = prec;
+ unsigned HOST_WIDE_INT signmask;
+ signmask = (arith
+ ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
+ : 0);
+
+#ifdef SHIFT_COUNT_TRUNCATED
+ if (SHIFT_COUNT_TRUNCATED)
+ count %= prec;
+#endif
- while (count > 0)
+ if (count >= HOST_BITS_PER_WIDE_INT)
{
- carry = arith && arg1[7] >> 7;
- for (i = MAX_SHORTS - 1; i >= 0; i--)
- {
- carry <<= 8;
- carry += arg1[i];
- arg1[i] = (carry >> 1) & 0xff;
- }
- count--;
+ *hv = signmask;
+ *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
+ | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
+ }
+ else
+ {
+ *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
+ | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
+ *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
+ | ((unsigned HOST_WIDE_INT) h1 >> count));
}
-
- decode (arg1, lv, hv);
}
\f
-/* Rotate the doubldword integer in L1, H1 left by COUNT places
+/* Rotate the doubleword integer in L1, H1 left by COUNT places
keeping only PREC bits of result.
Rotate right if COUNT is negative.
Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
int prec;
HOST_WIDE_INT *lv, *hv;
{
- short arg1[MAX_SHORTS];
- register int i;
- register int carry;
+ HOST_WIDE_INT s1l, s1h, s2l, s2h;
+ count %= prec;
if (count < 0)
- {
- rrotate_double (l1, h1, - count, prec, lv, hv);
- return;
- }
-
- encode (arg1, l1, h1);
-
- if (count > prec)
- count = prec;
+ count += prec;
- carry = arg1[MAX_SHORTS - 1] >> 7;
- while (count > 0)
- {
- for (i = 0; i < MAX_SHORTS; i++)
- {
- carry += arg1[i] << 1;
- arg1[i] = carry & 0xff;
- carry >>= 8;
- }
- count--;
- }
-
- decode (arg1, lv, hv);
+ lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
+ rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
+ *lv = s1l | s2l;
+ *hv = s1h | s2h;
}
/* Rotate the doubleword integer in L1, H1 left by COUNT places
int prec;
HOST_WIDE_INT *lv, *hv;
{
- short arg1[MAX_SHORTS];
- register int i;
- register int carry;
-
- encode (arg1, l1, h1);
+ HOST_WIDE_INT s1l, s1h, s2l, s2h;
- if (count > prec)
- count = prec;
-
- carry = arg1[0] & 1;
- while (count > 0)
- {
- for (i = MAX_SHORTS - 1; i >= 0; i--)
- {
- carry <<= 8;
- carry += arg1[i];
- arg1[i] = (carry >> 1) & 0xff;
- }
- count--;
- }
+ count %= prec;
+ if (count < 0)
+ count += prec;
- decode (arg1, lv, hv);
+ rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
+ lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
+ *lv = s1l | s2l;
+ *hv = s1h | s2h;
}
\f
/* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
Return nonzero if the operation overflows.
UNS nonzero says do unsigned division. */
-static int
+int
div_and_round_double (code, uns,
lnum_orig, hnum_orig, lden_orig, hden_orig,
lquo, hquo, lrem, hrem)
HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
{
int quo_neg = 0;
- short num[MAX_SHORTS + 1]; /* extra element for scaling. */
- short den[MAX_SHORTS], quo[MAX_SHORTS];
- register int i, j, work;
- register int carry = 0;
+ HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
+ HOST_WIDE_INT den[4], quo[4];
+ register int i, j;
+ unsigned HOST_WIDE_INT work;
+ register unsigned HOST_WIDE_INT carry = 0;
HOST_WIDE_INT lnum = lnum_orig;
HOST_WIDE_INT hnum = hnum_orig;
HOST_WIDE_INT lden = lden_orig;
goto finish_up;
}
- bzero (quo, sizeof quo);
+ bzero ((char *) quo, sizeof quo);
- bzero (num, sizeof num); /* to zero 9th element */
- bzero (den, sizeof den);
+ bzero ((char *) num, sizeof num); /* to zero 9th element */
+ bzero ((char *) den, sizeof den);
encode (num, lnum, hnum);
encode (den, lden, hden);
- /* This code requires more than just hden == 0.
- We also have to require that we don't need more than three bytes
- to hold CARRY. If we ever did need four bytes to hold it, we
- would lose part of it when computing WORK on the next round. */
- if (hden == 0 && (((unsigned HOST_WIDE_INT) lden << 8) >> 8) == lden)
- { /* simpler algorithm */
+ /* Special code for when the divisor < BASE. */
+ if (hden == 0 && lden < BASE)
+ {
/* hnum != 0 already checked. */
- for (i = MAX_SHORTS - 1; i >= 0; i--)
+ for (i = 4 - 1; i >= 0; i--)
{
- work = num[i] + (carry << 8);
+ work = num[i] + carry * BASE;
quo[i] = work / (unsigned HOST_WIDE_INT) lden;
carry = work % (unsigned HOST_WIDE_INT) lden;
}
}
- else { /* full double precision,
- with thanks to Don Knuth's
- "Seminumerical Algorithms". */
-#define BASE 256
- int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
+ else
+ {
+ /* Full double precision division,
+ with thanks to Don Knuth's "Seminumerical Algorithms". */
+ int num_hi_sig, den_hi_sig;
+ unsigned HOST_WIDE_INT quo_est, scale;
/* Find the highest non-zero divisor digit. */
- for (i = MAX_SHORTS - 1; ; i--)
+ for (i = 4 - 1; ; i--)
if (den[i] != 0) {
den_hi_sig = i;
break;
}
- for (i = MAX_SHORTS - 1; ; i--)
- if (num[i] != 0) {
- num_hi_sig = i;
- break;
- }
- quo_hi_sig = num_hi_sig - den_hi_sig + 1;
/* Insure that the first digit of the divisor is at least BASE/2.
This is required by the quotient digit estimation algorithm. */
scale = BASE / (den[den_hi_sig] + 1);
if (scale > 1) { /* scale divisor and dividend */
carry = 0;
- for (i = 0; i <= MAX_SHORTS - 1; i++) {
+ for (i = 0; i <= 4 - 1; i++) {
work = (num[i] * scale) + carry;
- num[i] = work & 0xff;
- carry = work >> 8;
- if (num[i] != 0) num_hi_sig = i;
- }
+ num[i] = LOWPART (work);
+ carry = HIGHPART (work);
+ } num[4] = carry;
carry = 0;
- for (i = 0; i <= MAX_SHORTS - 1; i++) {
+ for (i = 0; i <= 4 - 1; i++) {
work = (den[i] * scale) + carry;
- den[i] = work & 0xff;
- carry = work >> 8;
+ den[i] = LOWPART (work);
+ carry = HIGHPART (work);
if (den[i] != 0) den_hi_sig = i;
}
}
+ num_hi_sig = 4;
+
/* Main loop */
- for (i = quo_hi_sig; i > 0; i--) {
+ for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
/* guess the next quotient digit, quo_est, by dividing the first
two remaining dividend digits by the high order quotient digit.
quo_est is never low and is at most 2 high. */
+ unsigned HOST_WIDE_INT tmp;
- int num_hi; /* index of highest remaining dividend digit */
-
- num_hi = i + den_hi_sig;
-
- work = (num[num_hi] * BASE) + (num_hi > 0 ? num[num_hi - 1] : 0);
- if (num[num_hi] != den[den_hi_sig]) {
+ num_hi_sig = i + den_hi_sig + 1;
+ work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
+ if (num[num_hi_sig] != den[den_hi_sig])
quo_est = work / den[den_hi_sig];
- }
- else {
+ else
quo_est = BASE - 1;
- }
/* refine quo_est so it's usually correct, and at most one high. */
- while ((den[den_hi_sig - 1] * quo_est)
- > (((work - (quo_est * den[den_hi_sig])) * BASE)
- + ((num_hi - 1) > 0 ? num[num_hi - 2] : 0)))
+ tmp = work - quo_est * den[den_hi_sig];
+ if (tmp < BASE
+ && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
quo_est--;
/* Try QUO_EST as the quotient digit, by multiplying the
Keep in mind that QUO_EST is the I - 1st digit. */
carry = 0;
-
for (j = 0; j <= den_hi_sig; j++)
{
- int digit;
-
- work = num[i + j - 1] - (quo_est * den[j]) + carry;
- digit = work & 0xff;
- carry = work >> 8;
- if (digit < 0)
- {
- digit += BASE;
- carry--;
- }
- num[i + j - 1] = digit;
+ work = quo_est * den[j] + carry;
+ carry = HIGHPART (work);
+ work = num[i + j] - LOWPART (work);
+ num[i + j] = LOWPART (work);
+ carry += HIGHPART (work) != 0;
}
/* if quo_est was high by one, then num[i] went negative and
we need to correct things. */
- if (num[num_hi] < 0)
+ if (num[num_hi_sig] < carry)
{
quo_est--;
carry = 0; /* add divisor back in */
for (j = 0; j <= den_hi_sig; j++)
{
- work = num[i + j - 1] + den[j] + carry;
- if (work > BASE)
- {
- work -= BASE;
- carry = 1;
- }
- else
- {
- carry = 0;
- }
- num[i + j - 1] = work;
+ work = num[i + j] + den[j] + carry;
+ carry = HIGHPART (work);
+ num[i + j] = LOWPART (work);
}
- num [num_hi] += carry;
+ num [num_hi_sig] += carry;
}
/* store the quotient digit. */
- quo[i - 1] = quo_est;
+ quo[i] = quo_est;
}
}
register tree arg1, arg2;
int notrunc;
{
+ STRIP_NOPS (arg1); STRIP_NOPS (arg2);
+
if (TREE_CODE (arg1) == INTEGER_CST)
{
register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
break;
case MULT_EXPR:
- /* Optimize simple cases. */
- if (int1h == 0)
- {
- unsigned HOST_WIDE_INT temp;
-
- switch (int1l)
- {
- case 0:
- t = build_int_2 (0, 0);
- goto got_it;
- case 1:
- t = build_int_2 (int2l, int2h);
- goto got_it;
- case 2:
- overflow = left_shift_overflows (int2h, 1);
- temp = int2l + int2l;
- int2h = (int2h << 1) + (temp < int2l);
- t = build_int_2 (temp, int2h);
- goto got_it;
-#if 0 /* This code can lose carries. */
- case 3:
- temp = int2l + int2l + int2l;
- int2h = int2h * 3 + (temp < int2l);
- t = build_int_2 (temp, int2h);
- goto got_it;
-#endif
- case 4:
- overflow = left_shift_overflows (int2h, 2);
- temp = int2l + int2l;
- int2h = (int2h << 2) + ((temp < int2l) << 1);
- int2l = temp;
- temp += temp;
- int2h += (temp < int2l);
- t = build_int_2 (temp, int2h);
- goto got_it;
- case 8:
- overflow = left_shift_overflows (int2h, 3);
- temp = int2l + int2l;
- int2h = (int2h << 3) + ((temp < int2l) << 2);
- int2l = temp;
- temp += temp;
- int2h += (temp < int2l) << 1;
- int2l = temp;
- temp += temp;
- int2h += (temp < int2l);
- t = build_int_2 (temp, int2h);
- goto got_it;
- default:
- break;
- }
- }
-
- if (int2h == 0)
- {
- if (int2l == 0)
- {
- t = build_int_2 (0, 0);
- break;
- }
- if (int2l == 1)
- {
- t = build_int_2 (int1l, int1h);
- break;
- }
- }
-
overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
t = build_int_2 (low, hi);
break;
got_it:
TREE_TYPE (t) = TREE_TYPE (arg1);
TREE_OVERFLOW (t)
- = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
+ = ((notrunc ? !uns && overflow : force_fit_type (t, overflow && !uns))
| TREE_OVERFLOW (arg1)
| TREE_OVERFLOW (arg2));
TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
#if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
if (TREE_CODE (arg1) == REAL_CST)
{
- REAL_VALUE_TYPE d1 = TREE_REAL_CST (arg1);
- REAL_VALUE_TYPE d2 = TREE_REAL_CST (arg2);
+ REAL_VALUE_TYPE d1;
+ REAL_VALUE_TYPE d2;
int overflow = 0;
REAL_VALUE_TYPE value;
tree t;
- if (setjmp (float_error))
+ d1 = TREE_REAL_CST (arg1);
+ d2 = TREE_REAL_CST (arg2);
+
+ /* If either operand is a NaN, just return it. Otherwise, set up
+ for floating-point trap; we return an overflow. */
+ if (REAL_VALUE_ISNAN (d1))
+ return arg1;
+ else if (REAL_VALUE_ISNAN (d2))
+ return arg2;
+ else if (setjmp (float_error))
{
t = copy_node (arg1);
overflow = 1;
tree
size_int (number)
- unsigned int number;
+ unsigned HOST_WIDE_INT number;
{
register tree t;
/* Type-size nodes already made for small sizes. */
&& TREE_INT_CST_HIGH (arg0) == 0)
return arg1;
/* Handle general case of two integer constants. */
- return const_binop (code, arg0, arg1, 1);
+ return const_binop (code, arg0, arg1, 0);
}
if (arg0 == error_mark_node || arg1 == error_mark_node)
{
if (TREE_CODE (arg1) == INTEGER_CST)
{
+ /* If we would build a constant wider than GCC supports,
+ leave the conversion unfolded. */
+ if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
+ return t;
+
/* Given an integer constant, make new constant with new type,
appropriately sign-extended or truncated. */
t = build_int_2 (TREE_INT_CST_LOW (arg1),
#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
else if (TREE_CODE (arg1) == REAL_CST)
{
- REAL_VALUE_TYPE l, x, u;
+ /* Don't initialize these, use assignments.
+ Initialized local aggregates don't work on old compilers. */
+ REAL_VALUE_TYPE x;
+ REAL_VALUE_TYPE l;
+ REAL_VALUE_TYPE u;
+ tree type1 = TREE_TYPE (arg1);
- l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
x = TREE_REAL_CST (arg1);
- u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
-
+ l = real_value_from_int_cst (type1, TYPE_MIN_VALUE (type));
+ u = real_value_from_int_cst (type1, TYPE_MAX_VALUE (type));
/* See if X will be in range after truncation towards 0.
To compensate for truncation, move the bounds away from 0,
but reject if X exactly equals the adjusted bounds. */
l--;
u++;
#endif
- if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
+ /* If X is a NaN, use zero instead and show we have an overflow.
+ Otherwise, range check. */
+ if (REAL_VALUE_ISNAN (x))
+ overflow = 1, x = dconst0;
+ else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
overflow = 1;
#ifndef REAL_ARITHMETIC
{
- REAL_VALUE_TYPE d;
HOST_WIDE_INT low, high;
HOST_WIDE_INT half_word
= (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
- d = TREE_REAL_CST (arg1);
- if (d < 0)
- d = -d;
+ if (x < 0)
+ x = -x;
- high = (HOST_WIDE_INT) (d / half_word / half_word);
- d -= (REAL_VALUE_TYPE) high * half_word * half_word;
- if (d >= (REAL_VALUE_TYPE) half_word * half_word / 2)
+ high = (HOST_WIDE_INT) (x / half_word / half_word);
+ x -= (REAL_VALUE_TYPE) high * half_word * half_word;
+ if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
{
- low = d - (REAL_VALUE_TYPE) half_word * half_word / 2;
+ low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
}
else
- low = (HOST_WIDE_INT) d;
+ low = (HOST_WIDE_INT) x;
if (TREE_REAL_CST (arg1) < 0)
neg_double (low, high, &low, &high);
t = build_int_2 (low, high);
#else
{
HOST_WIDE_INT low, high;
- REAL_VALUE_TO_INT (&low, &high, (TREE_REAL_CST (arg1)));
+ REAL_VALUE_TO_INT (&low, &high, x);
t = build_int_2 (low, high);
}
#endif
#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
if (TREE_CODE (arg1) == REAL_CST)
{
- if (setjmp (float_error))
+ if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
+ {
+ t = arg1;
+ TREE_TYPE (arg1) = type;
+ return t;
+ }
+ else if (setjmp (float_error))
{
overflow = 1;
t = copy_node (arg1);
return result;
}
+/* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
+ Zero means allow extended lvalues. */
+
+int pedantic_lvalues;
+
/* When pedantic, return an expr equal to X but certainly not valid as a
pedantic lvalue. Otherwise, return X. */
pedantic_non_lvalue (x)
tree x;
{
- if (pedantic)
+ if (pedantic_lvalues)
return non_lvalue (x);
else
return x;
/* Detect when real constants are equal. */
if (TREE_CODE (arg0) == TREE_CODE (arg1)
&& TREE_CODE (arg0) == REAL_CST)
- return !bcmp (&TREE_REAL_CST (arg0), &TREE_REAL_CST (arg1),
+ return !bcmp ((char *) &TREE_REAL_CST (arg0),
+ (char *) &TREE_REAL_CST (arg1),
sizeof (REAL_VALUE_TYPE));
if (only_const)
if (operand_equal_p (arg0, arg1, 0))
return 1;
- if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
+ if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
+ || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
return 0;
/* Duplicate what shorten_compare does to ARG1 and see if that gives the
return non_lvalue (t);
}
+
+/* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
+
+static tree
+pedantic_omit_one_operand (type, result, omitted)
+ tree type, result, omitted;
+{
+ tree t = convert (type, result);
+
+ if (TREE_SIDE_EFFECTS (omitted))
+ return build (COMPOUND_EXPR, type, omitted, t);
+
+ return pedantic_non_lvalue (t);
+}
+
+
\f
/* Return a simplified tree node for the truth-negation of ARG. This
never alters ARG itself. We assume that ARG is an operation that
case SAVE_EXPR:
return build1 (TRUTH_NOT_EXPR, type, arg);
+
+ case CLEANUP_POINT_EXPR:
+ return build1 (CLEANUP_POINT_EXPR, type,
+ invert_truthvalue (TREE_OPERAND (arg, 0)));
}
if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
abort ();
return 0;
}
-#if BYTES_BIG_ENDIAN
- lbitpos = lnbitsize - lbitsize - lbitpos;
-#endif
+ if (BYTES_BIG_ENDIAN)
+ lbitpos = lnbitsize - lbitsize - lbitpos;
/* Make the mask to be used against the extracted field. */
mask = build_int_2 (~0, ~0);
*PMASK is set to the mask used. This is either contained in a
BIT_AND_EXPR or derived from the width of the field.
+ *PAND_MASK is set the the mask found in a BIT_AND_EXPR, if any.
+
Return 0 if this is not a component reference or is one that we can't
do anything with. */
static tree
decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
- pvolatilep, pmask)
+ pvolatilep, pmask, pand_mask)
tree exp;
int *pbitsize, *pbitpos;
enum machine_mode *pmode;
int *punsignedp, *pvolatilep;
tree *pmask;
+ tree *pand_mask;
{
- tree mask = 0;
- tree inner;
- tree offset;
+ tree and_mask = 0;
+ tree mask, inner, offset;
+ tree unsigned_type;
+ int precision;
/* All the optimizations using this function assume integer fields.
There are problems with FP fields since the type_for_size call
if (TREE_CODE (exp) == BIT_AND_EXPR)
{
- mask = TREE_OPERAND (exp, 1);
+ and_mask = TREE_OPERAND (exp, 1);
exp = TREE_OPERAND (exp, 0);
- STRIP_NOPS (exp); STRIP_NOPS (mask);
- if (TREE_CODE (mask) != INTEGER_CST)
+ STRIP_NOPS (exp); STRIP_NOPS (and_mask);
+ if (TREE_CODE (and_mask) != INTEGER_CST)
return 0;
}
- if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
- && TREE_CODE (exp) != BIT_FIELD_REF)
- return 0;
inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
punsignedp, pvolatilep);
- if (inner == exp || *pbitsize < 0 || offset != 0)
+ if ((inner == exp && and_mask == 0)
+ || *pbitsize < 0 || offset != 0)
return 0;
- if (mask == 0)
- {
- tree unsigned_type = type_for_size (*pbitsize, 1);
- int precision = TYPE_PRECISION (unsigned_type);
-
- mask = build_int_2 (~0, ~0);
- TREE_TYPE (mask) = unsigned_type;
- force_fit_type (mask, 0);
- mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
- mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
- }
+ /* Compute the mask to access the bitfield. */
+ unsigned_type = type_for_size (*pbitsize, 1);
+ precision = TYPE_PRECISION (unsigned_type);
+
+ mask = build_int_2 (~0, ~0);
+ TREE_TYPE (mask) = unsigned_type;
+ force_fit_type (mask, 0);
+ mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
+ mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
+
+ /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
+ if (and_mask != 0)
+ mask = fold (build (BIT_AND_EXPR, unsigned_type,
+ convert (unsigned_type, and_mask), mask));
*pmask = mask;
+ *pand_mask = and_mask;
return inner;
}
TREE_TYPE (tmask) = signed_type (type);
force_fit_type (tmask, 0);
return
- operand_equal_p (mask,
- const_binop (RSHIFT_EXPR,
- const_binop (LSHIFT_EXPR, tmask,
- size_int (precision - size), 0),
- size_int (precision - size), 0),
- 0);
+ tree_int_cst_equal (mask,
+ const_binop (RSHIFT_EXPR,
+ const_binop (LSHIFT_EXPR, tmask,
+ size_int (precision - size),
+ 0),
+ size_int (precision - size), 0));
}
/* Subroutine for fold_truthop: determine if an operand is simple enough
const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
}
\f
+/* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P
+ bit value. Arrange things so the extra bits will be set to zero if and
+ only if C is signed-extended to its full width. If MASK is nonzero,
+ it is an INTEGER_CST that should be AND'ed with the extra bits. */
+
+static tree
+unextend (c, p, unsignedp, mask)
+ tree c;
+ int p;
+ int unsignedp;
+ tree mask;
+{
+ tree type = TREE_TYPE (c);
+ int modesize = GET_MODE_BITSIZE (TYPE_MODE (type));
+ tree temp;
+
+ if (p == modesize || unsignedp)
+ return c;
+
+ if (TREE_UNSIGNED (type))
+ c = convert (signed_type (type), c);
+
+ /* We work by getting just the sign bit into the low-order bit, then
+ into the high-order bit, then sign-extend. We then XOR that value
+ with C. */
+ temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
+ temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
+ temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
+ temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
+ if (mask != 0)
+ temp = const_binop (BIT_AND_EXPR, temp, convert (TREE_TYPE (c), mask), 0);
+
+ return convert (type, const_binop (BIT_XOR_EXPR, c, temp, 0));
+}
+\f
/* Find ways of folding logical expressions of LHS and RHS:
Try to merge two comparisons to the same innermost item.
Look for range tests like "ch >= '0' && ch <= '9'".
enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
enum machine_mode lnmode, rnmode;
tree ll_mask, lr_mask, rl_mask, rr_mask;
+ tree ll_and_mask, lr_and_mask, rl_and_mask, rr_and_mask;
tree l_const, r_const;
tree type, result;
int first_bit, end_bit;
volatilep = 0;
ll_inner = decode_field_reference (ll_arg,
&ll_bitsize, &ll_bitpos, &ll_mode,
- &ll_unsignedp, &volatilep, &ll_mask);
+ &ll_unsignedp, &volatilep, &ll_mask,
+ &ll_and_mask);
lr_inner = decode_field_reference (lr_arg,
&lr_bitsize, &lr_bitpos, &lr_mode,
- &lr_unsignedp, &volatilep, &lr_mask);
+ &lr_unsignedp, &volatilep, &lr_mask,
+ &lr_and_mask);
rl_inner = decode_field_reference (rl_arg,
&rl_bitsize, &rl_bitpos, &rl_mode,
- &rl_unsignedp, &volatilep, &rl_mask);
+ &rl_unsignedp, &volatilep, &rl_mask,
+ &rl_and_mask);
rr_inner = decode_field_reference (rr_arg,
&rr_bitsize, &rr_bitpos, &rr_mode,
- &rr_unsignedp, &volatilep, &rr_mask);
+ &rr_unsignedp, &volatilep, &rr_mask,
+ &rr_and_mask);
/* It must be true that the inner operation on the lhs of each
comparison must be the same if we are to be able to do anything.
type = type_for_size (lnbitsize, 1);
xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
-#if BYTES_BIG_ENDIAN
- xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
- xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
-#endif
+ if (BYTES_BIG_ENDIAN)
+ {
+ xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
+ xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
+ }
ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
size_int (xll_bitpos), 0);
rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
size_int (xrl_bitpos), 0);
- /* Make sure the constants are interpreted as unsigned, so we
- don't have sign bits outside the range of their type. */
-
if (l_const)
{
- l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
- l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
- size_int (xll_bitpos), 0);
+ l_const = convert (type, l_const);
+ l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask);
+ l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
+ if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
+ fold (build1 (BIT_NOT_EXPR,
+ type, ll_mask)),
+ 0)))
+ {
+ warning ("comparison is always %s",
+ wanted_code == NE_EXPR ? "one" : "zero");
+
+ return convert (truth_type,
+ wanted_code == NE_EXPR
+ ? integer_one_node : integer_zero_node);
+ }
}
if (r_const)
{
- r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
- r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
- size_int (xrl_bitpos), 0);
+ r_const = convert (type, r_const);
+ r_const = unextend (r_const, rl_bitsize, rl_unsignedp, rl_and_mask);
+ r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
+ if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
+ fold (build1 (BIT_NOT_EXPR,
+ type, rl_mask)),
+ 0)))
+ {
+ warning ("comparison is always %s",
+ wanted_code == NE_EXPR ? "one" : "zero");
+
+ return convert (truth_type,
+ wanted_code == NE_EXPR
+ ? integer_one_node : integer_zero_node);
+ }
}
/* If the right sides are not constant, do the same for it. Also,
rnbitpos = first_bit & ~ (rnbitsize - 1);
xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
-#if BYTES_BIG_ENDIAN
- xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
- xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
-#endif
+ if (BYTES_BIG_ENDIAN)
+ {
+ xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
+ xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
+ }
lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
size_int (xlr_bitpos), 0);
const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
}
\f
+/* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
+ S, a SAVE_EXPR, return the expression actually being evaluated. Note
+ that we may sometimes modify the tree. */
+
+static tree
+strip_compound_expr (t, s)
+ tree t;
+ tree s;
+{
+ tree type = TREE_TYPE (t);
+ enum tree_code code = TREE_CODE (t);
+
+ /* See if this is the COMPOUND_EXPR we want to eliminate. */
+ if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
+ && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
+ return TREE_OPERAND (t, 1);
+
+ /* See if this is a COND_EXPR or a simple arithmetic operator. We
+ don't bother handling any other types. */
+ else if (code == COND_EXPR)
+ {
+ TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
+ TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
+ TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
+ }
+ else if (TREE_CODE_CLASS (code) == '1')
+ TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
+ else if (TREE_CODE_CLASS (code) == '<'
+ || TREE_CODE_CLASS (code) == '2')
+ {
+ TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
+ TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
+ }
+
+ return t;
+}
+\f
/* Perform constant folding and related simplification of EXPR.
The related simplifications include x*1 => x, x*0 => 0, etc.,
and application of the associative law.
fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
/* If this was a conversion, and all we did was to move into
- inside the COND_EXPR, bring it back out. Then return so we
- don't get into an infinite recursion loop taking the conversion
- out and then back in. */
+ inside the COND_EXPR, bring it back out. But leave it if
+ it is a conversion from integer to integer and the
+ result precision is no wider than a word since such a
+ conversion is cheap and may be optimized away by combine,
+ while it couldn't if it were outside the COND_EXPR. Then return
+ so we don't get into an infinite recursion loop taking the
+ conversion out and then back in. */
if ((code == NOP_EXPR || code == CONVERT_EXPR
|| code == NON_LVALUE_EXPR)
&& TREE_CODE (TREE_OPERAND (t, 1)) == code
&& TREE_CODE (TREE_OPERAND (t, 2)) == code
&& (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
- == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
+ == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
+ && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
+ && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
+ && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
t = build1 (code, type,
build (COND_EXPR,
TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
}
else
{
+ tree testtype = TREE_TYPE (arg1);
test = arg1;
- true_value = integer_one_node;
- false_value = integer_zero_node;
+ true_value = convert (testtype, integer_one_node);
+ false_value = convert (testtype, integer_zero_node);
}
/* If ARG0 is complex we want to make sure we only evaluate
succeed in folding one part to a constant, we do not need
to make this SAVE_EXPR. Since we do this optimization
primarily to see if we do end up with constant and this
- SAVE_EXPR interfers with later optimizations, suppressing
+ SAVE_EXPR interferes with later optimizations, suppressing
it when we can is important. */
- if ((TREE_CODE (arg0) != VAR_DECL && TREE_CODE (arg0) != PARM_DECL)
- || TREE_SIDE_EFFECTS (arg0))
+ if (TREE_CODE (arg0) != SAVE_EXPR
+ && ((TREE_CODE (arg0) != VAR_DECL
+ && TREE_CODE (arg0) != PARM_DECL)
+ || TREE_SIDE_EFFECTS (arg0)))
{
tree lhs = fold (build (code, type, arg0, true_value));
tree rhs = fold (build (code, type, arg0, false_value));
fold (build (code, type, arg0, false_value))));
if (TREE_CODE (arg0) == SAVE_EXPR)
return build (COMPOUND_EXPR, type,
- convert (void_type_node, arg0), test);
+ convert (void_type_node, arg0),
+ strip_compound_expr (test, arg0));
else
return convert (type, test);
}
}
else
{
+ tree testtype = TREE_TYPE (arg0);
test = arg0;
- true_value = integer_one_node;
- false_value = integer_zero_node;
+ true_value = convert (testtype, integer_one_node);
+ false_value = convert (testtype, integer_zero_node);
}
- if ((TREE_CODE (arg1) != VAR_DECL && TREE_CODE (arg1) != PARM_DECL)
- || TREE_SIDE_EFFECTS (arg1))
+ if (TREE_CODE (arg1) != SAVE_EXPR
+ && ((TREE_CODE (arg1) != VAR_DECL
+ && TREE_CODE (arg1) != PARM_DECL)
+ || TREE_SIDE_EFFECTS (arg1)))
{
tree lhs = fold (build (code, type, true_value, arg1));
tree rhs = fold (build (code, type, false_value, arg1));
- if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
+ if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
+ || TREE_CONSTANT (arg1))
return fold (build (COND_EXPR, type, test, lhs, rhs));
arg1 = save_expr (arg1);
fold (build (code, type, false_value, arg1))));
if (TREE_CODE (arg1) == SAVE_EXPR)
return build (COMPOUND_EXPR, type,
- convert (void_type_node, arg1), test);
+ convert (void_type_node, arg1),
+ strip_compound_expr (test, arg1));
else
return convert (type, test);
}
case FIX_TRUNC_EXPR:
/* Other kinds of FIX are not handled properly by fold_convert. */
- /* In addition to the cases of two conversions in a row
- handled below, if we are converting something to its own
- type via an object of identical or wider precision, neither
- conversion is needed. */
- if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
- || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
- && TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == TREE_TYPE (t)
- && ((INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
- && INTEGRAL_TYPE_P (TREE_TYPE (t)))
- || (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
- && FLOAT_TYPE_P (TREE_TYPE (t))))
- && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
- >= TYPE_PRECISION (TREE_TYPE (t))))
- return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
-
- /* Two conversions in a row are not needed unless:
- - the intermediate type is narrower than both initial and final, or
- - the intermediate type and innermost type differ in signedness,
- and the outermost type is wider than the intermediate, or
- - the initial type is a pointer type and the precisions of the
- intermediate and final types differ, or
- - the final type is a pointer type and the precisions of the
- initial and intermediate types differ. */
- if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
- || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
- && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
- > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
- ||
- TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
- > TYPE_PRECISION (TREE_TYPE (t)))
- && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
- == INTEGER_TYPE)
- && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
- == INTEGER_TYPE)
- && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
- != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
- && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
- < TYPE_PRECISION (TREE_TYPE (t))))
- && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
- && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
- > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
- ==
- (TREE_UNSIGNED (TREE_TYPE (t))
- && (TYPE_PRECISION (TREE_TYPE (t))
- > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
- && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
- == POINTER_TYPE)
- && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
- != TYPE_PRECISION (TREE_TYPE (t))))
- && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
- && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
- != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
- return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+ if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
+ return TREE_OPERAND (t, 0);
+
+ /* Handle cases of two conversions in a row. */
+ if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
+ || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
+ {
+ tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+ tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0));
+ tree final_type = TREE_TYPE (t);
+ int inside_int = INTEGRAL_TYPE_P (inside_type);
+ int inside_ptr = POINTER_TYPE_P (inside_type);
+ int inside_float = FLOAT_TYPE_P (inside_type);
+ int inside_prec = TYPE_PRECISION (inside_type);
+ int inside_unsignedp = TREE_UNSIGNED (inside_type);
+ int inter_int = INTEGRAL_TYPE_P (inter_type);
+ int inter_ptr = POINTER_TYPE_P (inter_type);
+ int inter_float = FLOAT_TYPE_P (inter_type);
+ int inter_prec = TYPE_PRECISION (inter_type);
+ int inter_unsignedp = TREE_UNSIGNED (inter_type);
+ int final_int = INTEGRAL_TYPE_P (final_type);
+ int final_ptr = POINTER_TYPE_P (final_type);
+ int final_float = FLOAT_TYPE_P (final_type);
+ int final_prec = TYPE_PRECISION (final_type);
+ int final_unsignedp = TREE_UNSIGNED (final_type);
+
+ /* In addition to the cases of two conversions in a row
+ handled below, if we are converting something to its own
+ type via an object of identical or wider precision, neither
+ conversion is needed. */
+ if (inside_type == final_type
+ && ((inter_int && final_int) || (inter_float && final_float))
+ && inter_prec >= final_prec)
+ return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
+
+ /* Likewise, if the intermediate and final types are either both
+ float or both integer, we don't need the middle conversion if
+ it is wider than the final type and doesn't change the signedness
+ (for integers). Avoid this if the final type is a pointer
+ since then we sometimes need the inner conversion. Likewise if
+ the outer has a precision not equal to the size of its mode. */
+ if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
+ || (inter_float && inside_float))
+ && inter_prec >= inside_prec
+ && (inter_float || inter_unsignedp == inside_unsignedp)
+ && ! (final_prec != GET_MODE_BITSIZE (TYPE_MODE (final_type))
+ && TYPE_MODE (final_type) == TYPE_MODE (inter_type))
+ && ! final_ptr)
+ return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+
+ /* Two conversions in a row are not needed unless:
+ - some conversion is floating-point (overstrict for now), or
+ - the intermediate type is narrower than both initial and
+ final, or
+ - the intermediate type and innermost type differ in signedness,
+ and the outermost type is wider than the intermediate, or
+ - the initial type is a pointer type and the precisions of the
+ intermediate and final types differ, or
+ - the final type is a pointer type and the precisions of the
+ initial and intermediate types differ. */
+ if (! inside_float && ! inter_float && ! final_float
+ && (inter_prec > inside_prec || inter_prec > final_prec)
+ && ! (inside_int && inter_int
+ && inter_unsignedp != inside_unsignedp
+ && inter_prec < final_prec)
+ && ((inter_unsignedp && inter_prec > inside_prec)
+ == (final_unsignedp && final_prec > inter_prec))
+ && ! (inside_ptr && inter_prec != final_prec)
+ && ! (final_ptr && inside_prec != inter_prec)
+ && ! (final_prec != GET_MODE_BITSIZE (TYPE_MODE (final_type))
+ && TYPE_MODE (final_type) == TYPE_MODE (inter_type))
+ && ! final_ptr)
+ return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
+ }
if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
&& TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
return t;
#endif /* 0 */
+ case COMPONENT_REF:
+ if (TREE_CODE (arg0) == CONSTRUCTOR)
+ {
+ tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
+ if (m)
+ t = TREE_VALUE (m);
+ }
+ return t;
+
case RANGE_EXPR:
TREE_CONSTANT (t) = wins;
return t;
TREE_TYPE (t) = type;
TREE_OVERFLOW (t)
= (TREE_OVERFLOW (arg0)
- | force_fit_type (t, overflow));
+ | force_fit_type (t, overflow && !TREE_UNSIGNED (type)));
TREE_CONSTANT_OVERFLOW (t)
= TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
}
associate:
/* In most languages, can't associate operations on floats
through parentheses. Rather than remember where the parentheses
- were, we don't associate floats at all. It shouldn't matter much. */
- if (FLOAT_TYPE_P (type))
+ were, we don't associate floats at all. It shouldn't matter much.
+ However, associating multiplications is only very slightly
+ inaccurate, so do that if -ffast-math is specified. */
+ if (FLOAT_TYPE_P (type)
+ && ! (flag_fast_math && code == MULT_EXPR))
goto binary;
+
/* The varsign == -1 cases happen only for addition and subtraction.
It says that the arg that was split was really CON minus VAR.
The rest of the code applies to all associative operations. */
return t;
/* Otherwise return (CON +- ARG1) - VAR. */
- TREE_SET_CODE (t, MINUS_EXPR);
- TREE_OPERAND (t, 1) = var;
- TREE_OPERAND (t, 0)
- = fold (build (code, TREE_TYPE (t), con, arg1));
+ t = build (MINUS_EXPR, type,
+ fold (build (code, type, con, arg1)), var);
}
else
{
return t;
/* Otherwise return VAR +- (ARG1 +- CON). */
- TREE_OPERAND (t, 1) = tem
- = fold (build (code, TREE_TYPE (t), arg1, con));
- TREE_OPERAND (t, 0) = var;
+ tem = fold (build (code, type, arg1, con));
+ t = build (code, type, var, tem);
+
if (integer_zerop (tem)
&& (code == PLUS_EXPR || code == MINUS_EXPR))
return convert (type, var);
convert (TREE_TYPE (t), con)));
}
- TREE_OPERAND (t, 0)
- = fold (build (code, TREE_TYPE (t), arg0, con));
- TREE_OPERAND (t, 1) = var;
+ t = build (TREE_CODE (t), type,
+ fold (build (code, TREE_TYPE (t), arg0, con)), var);
+
if (integer_zerop (TREE_OPERAND (t, 0))
&& TREE_CODE (t) == PLUS_EXPR)
return convert (TREE_TYPE (t), var);
Also note that operand_equal_p is always false if an operand
is volatile. */
- if (operand_equal_p (arg0, arg1,
- FLOAT_TYPE_P (type) && ! flag_fast_math))
+ if ((! FLOAT_TYPE_P (type) || flag_fast_math)
+ && operand_equal_p (arg0, arg1, 0))
return convert (type, integer_zero_node);
goto associate;
case BIT_IOR_EXPR:
bit_ior:
+ {
+ register enum tree_code code0, code1;
+
if (integer_all_onesp (arg1))
return omit_one_operand (type, arg1, arg0);
if (integer_zerop (arg1))
if (t1 != NULL_TREE)
return t1;
- /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
+ /* (A << C1) | (A >> C2) if A is unsigned and C1+C2 is the size of A
is a rotate of A by C1 bits. */
+ /* (A << B) | (A >> (Z - B)) if A is unsigned and Z is the size of A
+ is a rotate of A by B bits. */
- if ((TREE_CODE (arg0) == RSHIFT_EXPR
- || TREE_CODE (arg0) == LSHIFT_EXPR)
- && (TREE_CODE (arg1) == RSHIFT_EXPR
- || TREE_CODE (arg1) == LSHIFT_EXPR)
- && TREE_CODE (arg0) != TREE_CODE (arg1)
+ code0 = TREE_CODE (arg0);
+ code1 = TREE_CODE (arg1);
+ if (((code0 == RSHIFT_EXPR && code1 == LSHIFT_EXPR)
+ || (code1 == RSHIFT_EXPR && code0 == LSHIFT_EXPR))
&& operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
- && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
- && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
- && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
- && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
- && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
- && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
- + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
+ && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
+ {
+ register tree tree01, tree11;
+ register enum tree_code code01, code11;
+
+ tree01 = TREE_OPERAND (arg0, 1);
+ tree11 = TREE_OPERAND (arg1, 1);
+ code01 = TREE_CODE (tree01);
+ code11 = TREE_CODE (tree11);
+ if (code01 == INTEGER_CST
+ && code11 == INTEGER_CST
+ && TREE_INT_CST_HIGH (tree01) == 0
+ && TREE_INT_CST_HIGH (tree11) == 0
+ && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11))
== TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
- return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
- TREE_CODE (arg0) == LSHIFT_EXPR
- ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
+ return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
+ code0 == LSHIFT_EXPR ? tree01 : tree11);
+ else if (code11 == MINUS_EXPR
+ && TREE_CODE (TREE_OPERAND (tree11, 0)) == INTEGER_CST
+ && TREE_INT_CST_HIGH (TREE_OPERAND (tree11, 0)) == 0
+ && TREE_INT_CST_LOW (TREE_OPERAND (tree11, 0))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))
+ && operand_equal_p (tree01, TREE_OPERAND (tree11, 1), 0))
+ return build (code0 == LSHIFT_EXPR ? LROTATE_EXPR : RROTATE_EXPR,
+ type, TREE_OPERAND (arg0, 0), tree01);
+ else if (code01 == MINUS_EXPR
+ && TREE_CODE (TREE_OPERAND (tree01, 0)) == INTEGER_CST
+ && TREE_INT_CST_HIGH (TREE_OPERAND (tree01, 0)) == 0
+ && TREE_INT_CST_LOW (TREE_OPERAND (tree01, 0))
+ == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))
+ && operand_equal_p (tree11, TREE_OPERAND (tree01, 1), 0))
+ return build (code0 != LSHIFT_EXPR ? LROTATE_EXPR : RROTATE_EXPR,
+ type, TREE_OPERAND (arg0, 0), tree11);
+ }
goto associate;
+ }
case BIT_XOR_EXPR:
if (integer_zerop (arg1))
}
goto binary;
+ case RDIV_EXPR:
+ /* In most cases, do nothing with a divide by zero. */
+#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
+#ifndef REAL_INFINITY
+ if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
+ return t;
+#endif
+#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
+
+ /* In IEEE floating point, x/1 is not equivalent to x for snans.
+ However, ANSI says we can drop signals, so we can do this anyway. */
+ if (real_onep (arg1))
+ return non_lvalue (convert (type, arg0));
+
+ /* If ARG1 is a constant, we can convert this to a multiply by the
+ reciprocal. This does not have the same rounding properties,
+ so only do this if -ffast-math. We can actually always safely
+ do it if ARG1 is a power of two, but it's hard to tell if it is
+ or not in a portable manner. */
+ if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
+ && 0 != (tem = const_binop (code, build_real (type, dconst1),
+ arg1, 0)))
+ return fold (build (MULT_EXPR, type, arg0, tem));
+
+ goto binary;
+
case TRUNC_DIV_EXPR:
case ROUND_DIV_EXPR:
case FLOOR_DIV_EXPR:
case CEIL_DIV_EXPR:
case EXACT_DIV_EXPR:
- case RDIV_EXPR:
if (integer_onep (arg1))
return non_lvalue (convert (type, arg0));
if (integer_zerop (arg1))
return t;
+ /* If we have ((a / C1) / C2) where both division are the same type, try
+ to simplify. First see if C1 * C2 overflows or not. */
+ if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
+ {
+ tree new_divisor;
+
+ new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
+ tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
+
+ if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
+ && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
+ {
+ /* If no overflow, divide by C1*C2. */
+ return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
+ }
+ }
+
/* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
expressions, which often appear in the offsets or sizes of
tree c2 = integer_zero_node;
tree xarg0 = arg0;
- if (TREE_CODE (xarg0) == SAVE_EXPR)
+ if (TREE_CODE (xarg0) == SAVE_EXPR && SAVE_EXPR_RTL (xarg0) == 0)
have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
STRIP_NOPS (xarg0);
xarg0 = TREE_OPERAND (xarg0, 0);
}
- if (TREE_CODE (xarg0) == SAVE_EXPR)
+ if (TREE_CODE (xarg0) == SAVE_EXPR && SAVE_EXPR_RTL (xarg0) == 0)
have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
STRIP_NOPS (xarg0);
}
}
-#if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
-#ifndef REAL_INFINITY
- if (TREE_CODE (arg1) == REAL_CST
- && real_zerop (arg1))
- return t;
-#endif
-#endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
-
goto binary;
case CEIL_MOD_EXPR:
return non_lvalue (convert (type, arg0));
/* Since negative shift count is not well-defined,
don't try to compute it in the compiler. */
- if (tree_int_cst_sgn (arg1) < 0)
+ if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
return t;
+ /* Rewrite an LROTATE_EXPR by a constant into an
+ RROTATE_EXPR by a new constant. */
+ if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST)
+ {
+ TREE_SET_CODE (t, RROTATE_EXPR);
+ code = RROTATE_EXPR;
+ TREE_OPERAND (t, 1) = arg1
+ = const_binop
+ (MINUS_EXPR,
+ convert (TREE_TYPE (arg1),
+ build_int_2 (GET_MODE_BITSIZE (TYPE_MODE (type)), 0)),
+ arg1, 0);
+ if (tree_int_cst_sgn (arg1) < 0)
+ return t;
+ }
+
+ /* If we have a rotate of a bit operation with the rotate count and
+ the second operand of the bit operation both constant,
+ permute the two operations. */
+ if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
+ && (TREE_CODE (arg0) == BIT_AND_EXPR
+ || TREE_CODE (arg0) == BIT_ANDTC_EXPR
+ || TREE_CODE (arg0) == BIT_IOR_EXPR
+ || TREE_CODE (arg0) == BIT_XOR_EXPR)
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
+ return fold (build (TREE_CODE (arg0), type,
+ fold (build (code, type,
+ TREE_OPERAND (arg0, 0), arg1)),
+ fold (build (code, type,
+ TREE_OPERAND (arg0, 1), arg1))));
+
+ /* Two consecutive rotates adding up to the width of the mode can
+ be ignored. */
+ if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
+ && TREE_CODE (arg0) == RROTATE_EXPR
+ && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
+ && TREE_INT_CST_HIGH (arg1) == 0
+ && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
+ && ((TREE_INT_CST_LOW (arg1)
+ + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
+ == GET_MODE_BITSIZE (TYPE_MODE (type))))
+ return TREE_OPERAND (arg0, 0);
+
goto binary;
case MIN_EXPR:
and its values must be 0 or 1.
("true" is a fixed value perhaps depending on the language,
but we don't handle values other than 1 correctly yet.) */
- return invert_truthvalue (arg0);
+ tem = invert_truthvalue (arg0);
+ /* Avoid infinite recursion. */
+ if (TREE_CODE (tem) == TRUTH_NOT_EXPR)
+ return t;
+ return convert (type, tem);
case TRUTH_ANDIF_EXPR:
/* Note that the operands of this must be ints
and the other one. */
{
tree constop = 0, varop;
- tree *constoploc;
+ int constopnum = -1;
if (TREE_CONSTANT (arg1))
- constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
+ constopnum = 1, constop = arg1, varop = arg0;
if (TREE_CONSTANT (arg0))
- constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
+ constopnum = 0, constop = arg0, varop = arg1;
if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
{
= fold (build (PLUS_EXPR, TREE_TYPE (varop),
constop, TREE_OPERAND (varop, 1)));
TREE_SET_CODE (varop, PREINCREMENT_EXPR);
- *constoploc = newconst;
+
+ t = build (code, type, TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1));
+ TREE_OPERAND (t, constopnum) = newconst;
return t;
}
}
= fold (build (MINUS_EXPR, TREE_TYPE (varop),
constop, TREE_OPERAND (varop, 1)));
TREE_SET_CODE (varop, PREDECREMENT_EXPR);
- *constoploc = newconst;
+ t = build (code, type, TREE_OPERAND (t, 0),
+ TREE_OPERAND (t, 1));
+ TREE_OPERAND (t, constopnum) = newconst;
return t;
}
}
{
case GE_EXPR:
code = GT_EXPR;
- TREE_SET_CODE (t, code);
arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
- TREE_OPERAND (t, 1) = arg1;
+ t = build (code, type, TREE_OPERAND (t, 0), arg1);
break;
case LT_EXPR:
code = LE_EXPR;
- TREE_SET_CODE (t, code);
arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
- TREE_OPERAND (t, 1) = arg1;
+ t = build (code, type, TREE_OPERAND (t, 0), arg1);
+ break;
}
}
return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
arg0, integer_zero_node);
+ /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
+ and similarly for >= into !=. */
+ if ((code == LT_EXPR || code == GE_EXPR)
+ && TREE_UNSIGNED (TREE_TYPE (arg0))
+ && TREE_CODE (arg1) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (arg1, 0)))
+ return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
+ build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
+ TREE_OPERAND (arg1, 1)),
+ convert (TREE_TYPE (arg0), integer_zero_node));
+
+ else if ((code == LT_EXPR || code == GE_EXPR)
+ && TREE_UNSIGNED (TREE_TYPE (arg0))
+ && (TREE_CODE (arg1) == NOP_EXPR
+ || TREE_CODE (arg1) == CONVERT_EXPR)
+ && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
+ && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
+ return
+ build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
+ convert (TREE_TYPE (arg0),
+ build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
+ TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
+ convert (TREE_TYPE (arg0), integer_zero_node));
+
/* Simplify comparison of something with itself. (For IEEE
floating-point, we can only do some of these simplifications.) */
if (operand_equal_p (arg0, arg1, 0))
return pedantic_non_lvalue
(TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
- return pedantic_non_lvalue (omit_one_operand (type, arg1, arg0));
+ return pedantic_omit_one_operand (type, arg1, arg0);
/* If the second operand is zero, invert the comparison and swap
the second and third operands. Likewise if the second operand
if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
{
- arg0 = TREE_OPERAND (t, 0) = tem;
- TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
- TREE_OPERAND (t, 2) = arg1;
- arg1 = TREE_OPERAND (t, 1);
+ t = build (code, type, tem,
+ TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
+ arg0 = tem;
+ arg1 = TREE_OPERAND (t, 2);
+ STRIP_NOPS (arg1);
}
}
tree arg2 = TREE_OPERAND (t, 2);
enum tree_code comp_code = TREE_CODE (arg0);
+ STRIP_NOPS (arg2);
+
/* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
depending on the comparison operation. */
- if (integer_zerop (TREE_OPERAND (arg0, 1))
+ if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
+ ? real_zerop (TREE_OPERAND (arg0, 1))
+ : integer_zerop (TREE_OPERAND (arg0, 1)))
&& TREE_CODE (arg2) == NEGATE_EXPR
&& operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
switch (comp_code)
case GE_EXPR:
case GT_EXPR:
return pedantic_non_lvalue
- (fold (build1 (ABS_EXPR, type, arg1)));
+ (convert (type, fold (build1 (ABS_EXPR,
+ TREE_TYPE (arg1), arg1))));
case LE_EXPR:
case LT_EXPR:
return pedantic_non_lvalue
(fold (build1 (NEGATE_EXPR, type,
- fold (build1 (ABS_EXPR, type, arg1)))));
+ convert (type,
+ fold (build1 (ABS_EXPR,
+ TREE_TYPE (arg1),
+ arg1))))));
}
/* If this is A != 0 ? A : 0, this is simply A. For ==, it is
if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
arg2, TREE_OPERAND (arg0, 0)))
- switch (comp_code)
- {
- case EQ_EXPR:
- return pedantic_non_lvalue (convert (type, arg2));
- case NE_EXPR:
- return pedantic_non_lvalue (convert (type, arg1));
- case LE_EXPR:
- case LT_EXPR:
- return pedantic_non_lvalue
- (fold (build (MIN_EXPR, type, arg1, arg2)));
- case GE_EXPR:
- case GT_EXPR:
- return pedantic_non_lvalue
- (fold (build (MAX_EXPR, type, arg1, arg2)));
- }
+ {
+ tree comp_op0 = TREE_OPERAND (arg0, 0);
+ tree comp_op1 = TREE_OPERAND (arg0, 1);
+ tree comp_type = TREE_TYPE (comp_op0);
+
+ switch (comp_code)
+ {
+ case EQ_EXPR:
+ return pedantic_non_lvalue (convert (type, arg2));
+ case NE_EXPR:
+ return pedantic_non_lvalue (convert (type, arg1));
+ case LE_EXPR:
+ case LT_EXPR:
+ return pedantic_non_lvalue
+ (convert (type, (fold (build (MIN_EXPR, comp_type,
+ comp_op0, comp_op1)))));
+ case GE_EXPR:
+ case GT_EXPR:
+ return pedantic_non_lvalue
+ (convert (type, fold (build (MAX_EXPR, comp_type,
+ comp_op0, comp_op1))));
+ }
+ }
/* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
we might still be able to simplify this. For example,
{
case EQ_EXPR:
/* We can replace A with C1 in this case. */
- arg1 = TREE_OPERAND (t, 1)
- = convert (type, TREE_OPERAND (arg0, 1));
+ arg1 = convert (type, TREE_OPERAND (arg0, 1));
+ t = build (code, type, TREE_OPERAND (t, 0), arg1,
+ TREE_OPERAND (t, 2));
break;
case LT_EXPR:
}
}
+ /* If the second operand is simpler than the third, swap them
+ since that produces better jump optimization results. */
+ if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
+ || TREE_CODE (arg1) == SAVE_EXPR)
+ && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
+ || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
+ || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
+ {
+ /* See if this can be inverted. If it can't, possibly because
+ it was a floating-point inequality comparison, don't do
+ anything. */
+ tem = invert_truthvalue (arg0);
+
+ if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
+ {
+ t = build (code, type, tem,
+ TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
+ arg0 = tem;
+ arg1 = TREE_OPERAND (t, 2);
+ STRIP_NOPS (arg1);
+ }
+ }
+
/* Convert A ? 1 : 0 to simply A. */
if (integer_onep (TREE_OPERAND (t, 1))
&& integer_zerop (TREE_OPERAND (t, 2))
&& type == TREE_TYPE (arg0))
return pedantic_non_lvalue (arg0);
-
/* Look for expressions of the form A & 2 ? 2 : 0. The result of this
operation is simply A & 2. */
TREE_OPERAND (arg0, 1)))));
return t;
+ /* Pull arithmetic ops out of the CLEANUP_POINT_EXPR where
+ appropriate. */
+ case CLEANUP_POINT_EXPR:
+ if (! TREE_SIDE_EFFECTS (arg0))
+ return convert (type, arg0);
+
+ {
+ enum tree_code code0 = TREE_CODE (arg0);
+ int kind0 = TREE_CODE_CLASS (code0);
+ tree arg00 = TREE_OPERAND (arg0, 0);
+ tree arg01;
+
+ if (kind0 == '1' || code0 == TRUTH_NOT_EXPR)
+ return fold (build1 (code0, type,
+ fold (build1 (CLEANUP_POINT_EXPR,
+ TREE_TYPE (arg00), arg00))));
+
+ if (kind0 == '<' || kind0 == '2'
+ || code0 == TRUTH_ANDIF_EXPR || code0 == TRUTH_ORIF_EXPR
+ || code0 == TRUTH_AND_EXPR || code0 == TRUTH_OR_EXPR
+ || code0 == TRUTH_XOR_EXPR)
+ {
+ arg01 = TREE_OPERAND (arg0, 1);
+
+ if (! TREE_SIDE_EFFECTS (arg00))
+ return fold (build (code0, type, arg00,
+ fold (build1 (CLEANUP_POINT_EXPR,
+ TREE_TYPE (arg01), arg01))));
+
+ if (! TREE_SIDE_EFFECTS (arg01))
+ return fold (build (code0, type,
+ fold (build1 (CLEANUP_POINT_EXPR,
+ TREE_TYPE (arg00), arg00)),
+ arg01));
+ }
+
+ return t;
+ }
+
default:
return t;
} /* switch (code) */