1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ This file should be rewritten to use an arbitrary precision
21 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
22 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
23 @@ The routines that translate from the ap rep should
24 @@ warn if precision et. al. is lost.
25 @@ This would also make life easier when this technology is used
26 @@ for cross-compilers. */
29 /* The entry points in this file are fold, size_int and size_binop.
31 fold takes a tree as argument and returns a simplified tree.
33 size_binop takes a tree code for an arithmetic operation
34 and two operands that are trees, and produces a tree for the
35 result, assuming the type comes from `sizetype'.
37 size_int takes an integer value, and creates a tree constant
38 with type from `sizetype'. */
46 /* Handle floating overflow for `const_binop'. */
47 static jmp_buf float_error;
49 static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
50 static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
51 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
52 HOST_WIDE_INT, HOST_WIDE_INT,
53 HOST_WIDE_INT, HOST_WIDE_INT *,
54 HOST_WIDE_INT *, HOST_WIDE_INT *,
56 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
57 static tree const_binop PROTO((enum tree_code, tree, tree, int));
58 static tree fold_convert PROTO((tree, tree));
59 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
60 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
61 static int truth_value_p PROTO((enum tree_code));
62 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
63 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
64 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
65 static tree omit_one_operand PROTO((tree, tree, tree));
66 static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
67 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
68 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
69 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
71 static tree decode_field_reference PROTO((tree, int *, int *,
72 enum machine_mode *, int *,
74 static int all_ones_mask_p PROTO((tree, int));
75 static int simple_operand_p PROTO((tree));
76 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
77 enum tree_code, tree, tree, tree));
78 static tree unextend PROTO((tree, int, int));
79 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
80 static tree strip_compound_expr PROTO((tree, tree));
86 /* Yield nonzero if a signed left shift of A by B bits overflows. */
87 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
89 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
90 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
91 Then this yields nonzero if overflow occurred during the addition.
92 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
93 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
94 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
96 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
97 We do that by representing the two-word integer in 4 words, with only
98 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
101 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
102 #define HIGHPART(x) \
103 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
104 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
106 /* Unpack a two-word integer into 4 words.
107 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
108 WORDS points to the array of HOST_WIDE_INTs. */
111 encode (words, low, hi)
112 HOST_WIDE_INT *words;
113 HOST_WIDE_INT low, hi;
115 words[0] = LOWPART (low);
116 words[1] = HIGHPART (low);
117 words[2] = LOWPART (hi);
118 words[3] = HIGHPART (hi);
121 /* Pack an array of 4 words into a two-word integer.
122 WORDS points to the array of words.
123 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
126 decode (words, low, hi)
127 HOST_WIDE_INT *words;
128 HOST_WIDE_INT *low, *hi;
130 *low = words[0] | words[1] * BASE;
131 *hi = words[2] | words[3] * BASE;
134 /* Make the integer constant T valid for its type
135 by setting to 0 or 1 all the bits in the constant
136 that don't belong in the type.
137 Yield 1 if a signed overflow occurs, 0 otherwise.
138 If OVERFLOW is nonzero, a signed overflow has already occurred
139 in calculating T, so propagate it.
141 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
145 force_fit_type (t, overflow)
149 HOST_WIDE_INT low, high;
152 if (TREE_CODE (t) == REAL_CST)
154 #ifdef CHECK_FLOAT_VALUE
155 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
161 else if (TREE_CODE (t) != INTEGER_CST)
164 low = TREE_INT_CST_LOW (t);
165 high = TREE_INT_CST_HIGH (t);
167 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
170 prec = TYPE_PRECISION (TREE_TYPE (t));
172 /* First clear all bits that are beyond the type's precision. */
174 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
176 else if (prec > HOST_BITS_PER_WIDE_INT)
178 TREE_INT_CST_HIGH (t)
179 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
183 TREE_INT_CST_HIGH (t) = 0;
184 if (prec < HOST_BITS_PER_WIDE_INT)
185 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
188 /* Unsigned types do not suffer sign extension or overflow. */
189 if (TREE_UNSIGNED (TREE_TYPE (t)))
192 /* If the value's sign bit is set, extend the sign. */
193 if (prec != 2 * HOST_BITS_PER_WIDE_INT
194 && (prec > HOST_BITS_PER_WIDE_INT
195 ? (TREE_INT_CST_HIGH (t)
196 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
197 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
199 /* Value is negative:
200 set to 1 all the bits that are outside this type's precision. */
201 if (prec > HOST_BITS_PER_WIDE_INT)
203 TREE_INT_CST_HIGH (t)
204 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
208 TREE_INT_CST_HIGH (t) = -1;
209 if (prec < HOST_BITS_PER_WIDE_INT)
210 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
214 /* Yield nonzero if signed overflow occurred. */
216 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
220 /* Add two doubleword integers with doubleword result.
221 Each argument is given as two `HOST_WIDE_INT' pieces.
222 One argument is L1 and H1; the other, L2 and H2.
223 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
226 add_double (l1, h1, l2, h2, lv, hv)
227 HOST_WIDE_INT l1, h1, l2, h2;
228 HOST_WIDE_INT *lv, *hv;
233 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
237 return overflow_sum_sign (h1, h2, h);
240 /* Negate a doubleword integer with doubleword result.
241 Return nonzero if the operation overflows, assuming it's signed.
242 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
243 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
246 neg_double (l1, h1, lv, hv)
247 HOST_WIDE_INT l1, h1;
248 HOST_WIDE_INT *lv, *hv;
254 return (*hv & h1) < 0;
264 /* Multiply two doubleword integers with doubleword result.
265 Return nonzero if the operation overflows, assuming it's signed.
266 Each argument is given as two `HOST_WIDE_INT' pieces.
267 One argument is L1 and H1; the other, L2 and H2.
268 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
271 mul_double (l1, h1, l2, h2, lv, hv)
272 HOST_WIDE_INT l1, h1, l2, h2;
273 HOST_WIDE_INT *lv, *hv;
275 HOST_WIDE_INT arg1[4];
276 HOST_WIDE_INT arg2[4];
277 HOST_WIDE_INT prod[4 * 2];
278 register unsigned HOST_WIDE_INT carry;
279 register int i, j, k;
280 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
282 encode (arg1, l1, h1);
283 encode (arg2, l2, h2);
285 bzero ((char *) prod, sizeof prod);
287 for (i = 0; i < 4; i++)
290 for (j = 0; j < 4; j++)
293 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
294 carry += arg1[i] * arg2[j];
295 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
297 prod[k] = LOWPART (carry);
298 carry = HIGHPART (carry);
303 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
305 /* Check for overflow by calculating the top half of the answer in full;
306 it should agree with the low half's sign bit. */
307 decode (prod+4, &toplow, &tophigh);
310 neg_double (l2, h2, &neglow, &neghigh);
311 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
315 neg_double (l1, h1, &neglow, &neghigh);
316 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
318 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
321 /* Shift the doubleword integer in L1, H1 left by COUNT places
322 keeping only PREC bits of result.
323 Shift right if COUNT is negative.
324 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
325 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
328 lshift_double (l1, h1, count, prec, lv, hv, arith)
329 HOST_WIDE_INT l1, h1, count;
331 HOST_WIDE_INT *lv, *hv;
336 rshift_double (l1, h1, - count, prec, lv, hv, arith);
341 count = (unsigned HOST_WIDE_INT) count & prec;
343 if (count >= HOST_BITS_PER_WIDE_INT)
345 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
350 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
351 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
352 *lv = (unsigned HOST_WIDE_INT) l1 << count;
356 /* Shift the doubleword integer in L1, H1 right by COUNT places
357 keeping only PREC bits of result. COUNT must be positive.
358 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
359 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
362 rshift_double (l1, h1, count, prec, lv, hv, arith)
363 HOST_WIDE_INT l1, h1, count;
365 HOST_WIDE_INT *lv, *hv;
368 unsigned HOST_WIDE_INT signmask;
370 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
374 count = (unsigned HOST_WIDE_INT) count % prec;
376 if (count >= HOST_BITS_PER_WIDE_INT)
379 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
380 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
384 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
385 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
386 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
387 | ((unsigned HOST_WIDE_INT) h1 >> count));
391 /* Rotate the doubleword integer in L1, H1 left by COUNT places
392 keeping only PREC bits of result.
393 Rotate right if COUNT is negative.
394 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
397 lrotate_double (l1, h1, count, prec, lv, hv)
398 HOST_WIDE_INT l1, h1, count;
400 HOST_WIDE_INT *lv, *hv;
402 HOST_WIDE_INT s1l, s1h, s2l, s2h;
408 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
409 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
414 /* Rotate the doubleword integer in L1, H1 left by COUNT places
415 keeping only PREC bits of result. COUNT must be positive.
416 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
419 rrotate_double (l1, h1, count, prec, lv, hv)
420 HOST_WIDE_INT l1, h1, count;
422 HOST_WIDE_INT *lv, *hv;
424 HOST_WIDE_INT s1l, s1h, s2l, s2h;
430 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
431 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
436 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
437 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
438 CODE is a tree code for a kind of division, one of
439 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
441 It controls how the quotient is rounded to a integer.
442 Return nonzero if the operation overflows.
443 UNS nonzero says do unsigned division. */
446 div_and_round_double (code, uns,
447 lnum_orig, hnum_orig, lden_orig, hden_orig,
448 lquo, hquo, lrem, hrem)
451 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
452 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
453 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
456 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
457 HOST_WIDE_INT den[4], quo[4];
459 unsigned HOST_WIDE_INT work;
460 register int carry = 0;
461 HOST_WIDE_INT lnum = lnum_orig;
462 HOST_WIDE_INT hnum = hnum_orig;
463 HOST_WIDE_INT lden = lden_orig;
464 HOST_WIDE_INT hden = hden_orig;
467 if ((hden == 0) && (lden == 0))
470 /* calculate quotient sign and convert operands to unsigned. */
476 /* (minimum integer) / (-1) is the only overflow case. */
477 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
483 neg_double (lden, hden, &lden, &hden);
487 if (hnum == 0 && hden == 0)
488 { /* single precision */
490 /* This unsigned division rounds toward zero. */
491 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
496 { /* trivial case: dividend < divisor */
497 /* hden != 0 already checked. */
504 bzero ((char *) quo, sizeof quo);
506 bzero ((char *) num, sizeof num); /* to zero 9th element */
507 bzero ((char *) den, sizeof den);
509 encode (num, lnum, hnum);
510 encode (den, lden, hden);
512 /* Special code for when the divisor < BASE. */
513 if (hden == 0 && lden < BASE)
515 /* hnum != 0 already checked. */
516 for (i = 4 - 1; i >= 0; i--)
518 work = num[i] + carry * BASE;
519 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
520 carry = work % (unsigned HOST_WIDE_INT) lden;
525 /* Full double precision division,
526 with thanks to Don Knuth's "Seminumerical Algorithms". */
527 int quo_est, scale, num_hi_sig, den_hi_sig;
529 /* Find the highest non-zero divisor digit. */
530 for (i = 4 - 1; ; i--)
536 /* Insure that the first digit of the divisor is at least BASE/2.
537 This is required by the quotient digit estimation algorithm. */
539 scale = BASE / (den[den_hi_sig] + 1);
540 if (scale > 1) { /* scale divisor and dividend */
542 for (i = 0; i <= 4 - 1; i++) {
543 work = (num[i] * scale) + carry;
544 num[i] = LOWPART (work);
545 carry = HIGHPART (work);
548 for (i = 0; i <= 4 - 1; i++) {
549 work = (den[i] * scale) + carry;
550 den[i] = LOWPART (work);
551 carry = HIGHPART (work);
552 if (den[i] != 0) den_hi_sig = i;
559 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
560 /* guess the next quotient digit, quo_est, by dividing the first
561 two remaining dividend digits by the high order quotient digit.
562 quo_est is never low and is at most 2 high. */
563 unsigned HOST_WIDE_INT tmp;
565 num_hi_sig = i + den_hi_sig + 1;
566 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
567 if (num[num_hi_sig] != den[den_hi_sig])
568 quo_est = work / den[den_hi_sig];
572 /* refine quo_est so it's usually correct, and at most one high. */
573 tmp = work - quo_est * den[den_hi_sig];
575 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
578 /* Try QUO_EST as the quotient digit, by multiplying the
579 divisor by QUO_EST and subtracting from the remaining dividend.
580 Keep in mind that QUO_EST is the I - 1st digit. */
583 for (j = 0; j <= den_hi_sig; j++)
585 work = quo_est * den[j] + carry;
586 carry = HIGHPART (work);
587 work = num[i + j] - LOWPART (work);
588 num[i + j] = LOWPART (work);
589 carry += HIGHPART (work) != 0;
592 /* if quo_est was high by one, then num[i] went negative and
593 we need to correct things. */
595 if (num[num_hi_sig] < carry)
598 carry = 0; /* add divisor back in */
599 for (j = 0; j <= den_hi_sig; j++)
601 work = num[i + j] + den[j] + carry;
602 carry = HIGHPART (work);
603 num[i + j] = LOWPART (work);
605 num [num_hi_sig] += carry;
608 /* store the quotient digit. */
613 decode (quo, lquo, hquo);
616 /* if result is negative, make it so. */
618 neg_double (*lquo, *hquo, lquo, hquo);
620 /* compute trial remainder: rem = num - (quo * den) */
621 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
622 neg_double (*lrem, *hrem, lrem, hrem);
623 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
628 case TRUNC_MOD_EXPR: /* round toward zero */
629 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
633 case FLOOR_MOD_EXPR: /* round toward negative infinity */
634 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
637 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
640 else return overflow;
644 case CEIL_MOD_EXPR: /* round toward positive infinity */
645 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
647 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
650 else return overflow;
654 case ROUND_MOD_EXPR: /* round to closest integer */
656 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
657 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
659 /* get absolute values */
660 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
661 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
663 /* if (2 * abs (lrem) >= abs (lden)) */
664 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
665 labs_rem, habs_rem, <wice, &htwice);
666 if (((unsigned HOST_WIDE_INT) habs_den
667 < (unsigned HOST_WIDE_INT) htwice)
668 || (((unsigned HOST_WIDE_INT) habs_den
669 == (unsigned HOST_WIDE_INT) htwice)
670 && ((HOST_WIDE_INT unsigned) labs_den
671 < (unsigned HOST_WIDE_INT) ltwice)))
675 add_double (*lquo, *hquo,
676 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
679 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
682 else return overflow;
690 /* compute true remainder: rem = num - (quo * den) */
691 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
692 neg_double (*lrem, *hrem, lrem, hrem);
693 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
697 #ifndef REAL_ARITHMETIC
698 /* Effectively truncate a real value to represent the nearest possible value
699 in a narrower mode. The result is actually represented in the same data
700 type as the argument, but its value is usually different.
702 A trap may occur during the FP operations and it is the responsibility
703 of the calling function to have a handler established. */
706 real_value_truncate (mode, arg)
707 enum machine_mode mode;
710 return REAL_VALUE_TRUNCATE (mode, arg);
713 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
715 /* Check for infinity in an IEEE double precision number. */
721 /* The IEEE 64-bit double format. */
726 unsigned exponent : 11;
727 unsigned mantissa1 : 20;
732 unsigned mantissa1 : 20;
733 unsigned exponent : 11;
739 if (u.big_endian.sign == 1)
742 return (u.big_endian.exponent == 2047
743 && u.big_endian.mantissa1 == 0
744 && u.big_endian.mantissa2 == 0);
749 return (u.little_endian.exponent == 2047
750 && u.little_endian.mantissa1 == 0
751 && u.little_endian.mantissa2 == 0);
755 /* Check whether an IEEE double precision number is a NaN. */
761 /* The IEEE 64-bit double format. */
766 unsigned exponent : 11;
767 unsigned mantissa1 : 20;
772 unsigned mantissa1 : 20;
773 unsigned exponent : 11;
779 if (u.big_endian.sign == 1)
782 return (u.big_endian.exponent == 2047
783 && (u.big_endian.mantissa1 != 0
784 || u.big_endian.mantissa2 != 0));
789 return (u.little_endian.exponent == 2047
790 && (u.little_endian.mantissa1 != 0
791 || u.little_endian.mantissa2 != 0));
795 /* Check for a negative IEEE double precision number. */
801 /* The IEEE 64-bit double format. */
806 unsigned exponent : 11;
807 unsigned mantissa1 : 20;
812 unsigned mantissa1 : 20;
813 unsigned exponent : 11;
819 if (u.big_endian.sign == 1)
822 return u.big_endian.sign;
827 return u.little_endian.sign;
830 #else /* Target not IEEE */
832 /* Let's assume other float formats don't have infinity.
833 (This can be overridden by redefining REAL_VALUE_ISINF.) */
841 /* Let's assume other float formats don't have NaNs.
842 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
850 /* Let's assume other float formats don't have minus zero.
851 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
858 #endif /* Target not IEEE */
859 #endif /* no REAL_ARITHMETIC */
861 /* Split a tree IN into a constant and a variable part
862 that could be combined with CODE to make IN.
863 CODE must be a commutative arithmetic operation.
864 Store the constant part into *CONP and the variable in &VARP.
865 Return 1 if this was done; zero means the tree IN did not decompose
868 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
869 Therefore, we must tell the caller whether the variable part
870 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
871 The value stored is the coefficient for the variable term.
872 The constant term we return should always be added;
873 we negate it if necessary. */
876 split_tree (in, code, varp, conp, varsignp)
882 register tree outtype = TREE_TYPE (in);
886 /* Strip any conversions that don't change the machine mode. */
887 while ((TREE_CODE (in) == NOP_EXPR
888 || TREE_CODE (in) == CONVERT_EXPR)
889 && (TYPE_MODE (TREE_TYPE (in))
890 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
891 in = TREE_OPERAND (in, 0);
893 if (TREE_CODE (in) == code
894 || (! FLOAT_TYPE_P (TREE_TYPE (in))
895 /* We can associate addition and subtraction together
896 (even though the C standard doesn't say so)
897 for integers because the value is not affected.
898 For reals, the value might be affected, so we can't. */
899 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
900 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
902 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
903 if (code == INTEGER_CST)
905 *conp = TREE_OPERAND (in, 0);
906 *varp = TREE_OPERAND (in, 1);
907 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
908 && TREE_TYPE (*varp) != outtype)
909 *varp = convert (outtype, *varp);
910 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
913 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
915 *conp = TREE_OPERAND (in, 1);
916 *varp = TREE_OPERAND (in, 0);
918 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
919 && TREE_TYPE (*varp) != outtype)
920 *varp = convert (outtype, *varp);
921 if (TREE_CODE (in) == MINUS_EXPR)
923 /* If operation is subtraction and constant is second,
924 must negate it to get an additive constant.
925 And this cannot be done unless it is a manifest constant.
926 It could also be the address of a static variable.
927 We cannot negate that, so give up. */
928 if (TREE_CODE (*conp) == INTEGER_CST)
929 /* Subtracting from integer_zero_node loses for long long. */
930 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
936 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
938 *conp = TREE_OPERAND (in, 0);
939 *varp = TREE_OPERAND (in, 1);
940 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
941 && TREE_TYPE (*varp) != outtype)
942 *varp = convert (outtype, *varp);
943 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
950 /* Combine two constants NUM and ARG2 under operation CODE
951 to produce a new constant.
952 We assume ARG1 and ARG2 have the same data type,
953 or at least are the same kind of constant and the same machine mode.
955 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
958 const_binop (code, arg1, arg2, notrunc)
960 register tree arg1, arg2;
963 if (TREE_CODE (arg1) == INTEGER_CST)
965 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
966 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
967 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
968 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
969 HOST_WIDE_INT low, hi;
970 HOST_WIDE_INT garbagel, garbageh;
972 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
978 t = build_int_2 (int1l | int2l, int1h | int2h);
982 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
986 t = build_int_2 (int1l & int2l, int1h & int2h);
990 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
996 /* It's unclear from the C standard whether shifts can overflow.
997 The following code ignores overflow; perhaps a C standard
998 interpretation ruling is needed. */
999 lshift_double (int1l, int1h, int2l,
1000 TYPE_PRECISION (TREE_TYPE (arg1)),
1003 t = build_int_2 (low, hi);
1004 TREE_TYPE (t) = TREE_TYPE (arg1);
1006 force_fit_type (t, 0);
1007 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1008 TREE_CONSTANT_OVERFLOW (t)
1009 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1015 lrotate_double (int1l, int1h, int2l,
1016 TYPE_PRECISION (TREE_TYPE (arg1)),
1018 t = build_int_2 (low, hi);
1025 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1028 overflow = int2h < hi;
1030 t = build_int_2 (int2l, int2h);
1036 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1039 overflow = int1h < hi;
1041 t = build_int_2 (int1l, int1h);
1044 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1045 t = build_int_2 (low, hi);
1049 if (int2h == 0 && int2l == 0)
1051 t = build_int_2 (int1l, int1h);
1054 neg_double (int2l, int2h, &low, &hi);
1055 add_double (int1l, int1h, low, hi, &low, &hi);
1056 overflow = overflow_sum_sign (hi, int2h, int1h);
1057 t = build_int_2 (low, hi);
1061 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1062 t = build_int_2 (low, hi);
1065 case TRUNC_DIV_EXPR:
1066 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1067 case EXACT_DIV_EXPR:
1068 /* This is a shortcut for a common special case.
1069 It reduces the number of tree nodes generated
1071 if (int2h == 0 && int2l > 0
1072 && TREE_TYPE (arg1) == sizetype
1073 && int1h == 0 && int1l >= 0)
1075 if (code == CEIL_DIV_EXPR)
1077 return size_int (int1l / int2l);
1079 case ROUND_DIV_EXPR:
1080 if (int2h == 0 && int2l == 1)
1082 t = build_int_2 (int1l, int1h);
1085 if (int1l == int2l && int1h == int2h)
1087 if ((int1l | int1h) == 0)
1089 t = build_int_2 (1, 0);
1092 overflow = div_and_round_double (code, uns,
1093 int1l, int1h, int2l, int2h,
1094 &low, &hi, &garbagel, &garbageh);
1095 t = build_int_2 (low, hi);
1098 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1099 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1100 overflow = div_and_round_double (code, uns,
1101 int1l, int1h, int2l, int2h,
1102 &garbagel, &garbageh, &low, &hi);
1103 t = build_int_2 (low, hi);
1110 low = (((unsigned HOST_WIDE_INT) int1h
1111 < (unsigned HOST_WIDE_INT) int2h)
1112 || (((unsigned HOST_WIDE_INT) int1h
1113 == (unsigned HOST_WIDE_INT) int2h)
1114 && ((unsigned HOST_WIDE_INT) int1l
1115 < (unsigned HOST_WIDE_INT) int2l)));
1119 low = ((int1h < int2h)
1120 || ((int1h == int2h)
1121 && ((unsigned HOST_WIDE_INT) int1l
1122 < (unsigned HOST_WIDE_INT) int2l)));
1124 if (low == (code == MIN_EXPR))
1125 t = build_int_2 (int1l, int1h);
1127 t = build_int_2 (int2l, int2h);
1134 TREE_TYPE (t) = TREE_TYPE (arg1);
1136 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow && !uns))
1137 | TREE_OVERFLOW (arg1)
1138 | TREE_OVERFLOW (arg2));
1139 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1140 | TREE_CONSTANT_OVERFLOW (arg1)
1141 | TREE_CONSTANT_OVERFLOW (arg2));
1144 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1145 if (TREE_CODE (arg1) == REAL_CST)
1150 REAL_VALUE_TYPE value;
1153 d1 = TREE_REAL_CST (arg1);
1154 d2 = TREE_REAL_CST (arg2);
1156 /* If either operand is a NaN, just return it. Otherwise, set up
1157 for floating-point trap; we return an overflow. */
1158 if (REAL_VALUE_ISNAN (d1))
1160 else if (REAL_VALUE_ISNAN (d2))
1162 else if (setjmp (float_error))
1164 t = copy_node (arg1);
1169 set_float_handler (float_error);
1171 #ifdef REAL_ARITHMETIC
1172 REAL_ARITHMETIC (value, code, d1, d2);
1189 #ifndef REAL_INFINITY
1198 value = MIN (d1, d2);
1202 value = MAX (d1, d2);
1208 #endif /* no REAL_ARITHMETIC */
1209 t = build_real (TREE_TYPE (arg1),
1210 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1212 set_float_handler (NULL_PTR);
1215 = (force_fit_type (t, overflow)
1216 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1217 TREE_CONSTANT_OVERFLOW (t)
1219 | TREE_CONSTANT_OVERFLOW (arg1)
1220 | TREE_CONSTANT_OVERFLOW (arg2);
1223 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1224 if (TREE_CODE (arg1) == COMPLEX_CST)
1226 register tree r1 = TREE_REALPART (arg1);
1227 register tree i1 = TREE_IMAGPART (arg1);
1228 register tree r2 = TREE_REALPART (arg2);
1229 register tree i2 = TREE_IMAGPART (arg2);
1235 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1236 const_binop (PLUS_EXPR, i1, i2, notrunc));
1240 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1241 const_binop (MINUS_EXPR, i1, i2, notrunc));
1245 t = build_complex (const_binop (MINUS_EXPR,
1246 const_binop (MULT_EXPR,
1248 const_binop (MULT_EXPR,
1251 const_binop (PLUS_EXPR,
1252 const_binop (MULT_EXPR,
1254 const_binop (MULT_EXPR,
1261 register tree magsquared
1262 = const_binop (PLUS_EXPR,
1263 const_binop (MULT_EXPR, r2, r2, notrunc),
1264 const_binop (MULT_EXPR, i2, i2, notrunc),
1268 (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1269 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1270 const_binop (PLUS_EXPR,
1271 const_binop (MULT_EXPR, r1, r2,
1273 const_binop (MULT_EXPR, i1, i2,
1276 magsquared, notrunc),
1277 const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1278 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1279 const_binop (MINUS_EXPR,
1280 const_binop (MULT_EXPR, i1, r2,
1282 const_binop (MULT_EXPR, r1, i2,
1285 magsquared, notrunc));
1292 TREE_TYPE (t) = TREE_TYPE (arg1);
1298 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1302 unsigned HOST_WIDE_INT number;
1305 /* Type-size nodes already made for small sizes. */
1306 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1308 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1309 && size_table[number] != 0)
1310 return size_table[number];
1311 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1313 push_obstacks_nochange ();
1314 /* Make this a permanent node. */
1315 end_temporary_allocation ();
1316 t = build_int_2 (number, 0);
1317 TREE_TYPE (t) = sizetype;
1318 size_table[number] = t;
1323 t = build_int_2 (number, 0);
1324 TREE_TYPE (t) = sizetype;
1329 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1330 CODE is a tree code. Data type is taken from `sizetype',
1331 If the operands are constant, so is the result. */
1334 size_binop (code, arg0, arg1)
1335 enum tree_code code;
1338 /* Handle the special case of two integer constants faster. */
1339 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1341 /* And some specific cases even faster than that. */
1342 if (code == PLUS_EXPR
1343 && TREE_INT_CST_LOW (arg0) == 0
1344 && TREE_INT_CST_HIGH (arg0) == 0)
1346 if (code == MINUS_EXPR
1347 && TREE_INT_CST_LOW (arg1) == 0
1348 && TREE_INT_CST_HIGH (arg1) == 0)
1350 if (code == MULT_EXPR
1351 && TREE_INT_CST_LOW (arg0) == 1
1352 && TREE_INT_CST_HIGH (arg0) == 0)
1354 /* Handle general case of two integer constants. */
1355 return const_binop (code, arg0, arg1, 1);
1358 if (arg0 == error_mark_node || arg1 == error_mark_node)
1359 return error_mark_node;
1361 return fold (build (code, sizetype, arg0, arg1));
1364 /* Given T, a tree representing type conversion of ARG1, a constant,
1365 return a constant tree representing the result of conversion. */
1368 fold_convert (t, arg1)
1372 register tree type = TREE_TYPE (t);
1375 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1377 if (TREE_CODE (arg1) == INTEGER_CST)
1379 /* If we would build a constant wider than GCC supports,
1380 leave the conversion unfolded. */
1381 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1384 /* Given an integer constant, make new constant with new type,
1385 appropriately sign-extended or truncated. */
1386 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1387 TREE_INT_CST_HIGH (arg1));
1388 TREE_TYPE (t) = type;
1389 /* Indicate an overflow if (1) ARG1 already overflowed,
1390 or (2) force_fit_type indicates an overflow.
1391 Tell force_fit_type that an overflow has already occurred
1392 if ARG1 is a too-large unsigned value and T is signed. */
1394 = (TREE_OVERFLOW (arg1)
1395 | force_fit_type (t,
1396 (TREE_INT_CST_HIGH (arg1) < 0
1397 & (TREE_UNSIGNED (type)
1398 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1399 TREE_CONSTANT_OVERFLOW (t)
1400 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1402 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1403 else if (TREE_CODE (arg1) == REAL_CST)
1405 /* Don't initialize these, use assignments.
1406 Initialized local aggregates don't work on old compilers. */
1411 x = TREE_REAL_CST (arg1);
1412 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1413 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1414 /* See if X will be in range after truncation towards 0.
1415 To compensate for truncation, move the bounds away from 0,
1416 but reject if X exactly equals the adjusted bounds. */
1417 #ifdef REAL_ARITHMETIC
1418 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1419 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1424 /* If X is a NaN, use zero instead and show we have an overflow.
1425 Otherwise, range check. */
1426 if (REAL_VALUE_ISNAN (x))
1427 overflow = 1, x = dconst0;
1428 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1431 #ifndef REAL_ARITHMETIC
1433 HOST_WIDE_INT low, high;
1434 HOST_WIDE_INT half_word
1435 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1440 high = (HOST_WIDE_INT) (x / half_word / half_word);
1441 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1442 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1444 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1445 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1448 low = (HOST_WIDE_INT) x;
1449 if (TREE_REAL_CST (arg1) < 0)
1450 neg_double (low, high, &low, &high);
1451 t = build_int_2 (low, high);
1455 HOST_WIDE_INT low, high;
1456 REAL_VALUE_TO_INT (&low, &high, x);
1457 t = build_int_2 (low, high);
1460 TREE_TYPE (t) = type;
1462 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1463 TREE_CONSTANT_OVERFLOW (t)
1464 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1466 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1467 TREE_TYPE (t) = type;
1469 else if (TREE_CODE (type) == REAL_TYPE)
1471 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1472 if (TREE_CODE (arg1) == INTEGER_CST)
1473 return build_real_from_int_cst (type, arg1);
1474 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1475 if (TREE_CODE (arg1) == REAL_CST)
1477 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1479 else if (setjmp (float_error))
1482 t = copy_node (arg1);
1485 set_float_handler (float_error);
1487 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1488 TREE_REAL_CST (arg1)));
1489 set_float_handler (NULL_PTR);
1493 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1494 TREE_CONSTANT_OVERFLOW (t)
1495 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1499 TREE_CONSTANT (t) = 1;
1503 /* Return an expr equal to X but certainly not valid as an lvalue.
1504 Also make sure it is not valid as an null pointer constant. */
1512 /* These things are certainly not lvalues. */
1513 if (TREE_CODE (x) == NON_LVALUE_EXPR
1514 || TREE_CODE (x) == INTEGER_CST
1515 || TREE_CODE (x) == REAL_CST
1516 || TREE_CODE (x) == STRING_CST
1517 || TREE_CODE (x) == ADDR_EXPR)
1519 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1521 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1522 so convert_for_assignment won't strip it.
1523 This is so this 0 won't be treated as a null pointer constant. */
1524 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1525 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1531 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1532 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1536 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
1537 Zero means allow extended lvalues. */
1539 int pedantic_lvalues;
1541 /* When pedantic, return an expr equal to X but certainly not valid as a
1542 pedantic lvalue. Otherwise, return X. */
1545 pedantic_non_lvalue (x)
1548 if (pedantic_lvalues)
1549 return non_lvalue (x);
1554 /* Given a tree comparison code, return the code that is the logical inverse
1555 of the given code. It is not safe to do this for floating-point
1556 comparisons, except for NE_EXPR and EQ_EXPR. */
1558 static enum tree_code
1559 invert_tree_comparison (code)
1560 enum tree_code code;
1581 /* Similar, but return the comparison that results if the operands are
1582 swapped. This is safe for floating-point. */
1584 static enum tree_code
1585 swap_tree_comparison (code)
1586 enum tree_code code;
1606 /* Return nonzero if CODE is a tree code that represents a truth value. */
1609 truth_value_p (code)
1610 enum tree_code code;
1612 return (TREE_CODE_CLASS (code) == '<'
1613 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1614 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1615 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1618 /* Return nonzero if two operands are necessarily equal.
1619 If ONLY_CONST is non-zero, only return non-zero for constants.
1620 This function tests whether the operands are indistinguishable;
1621 it does not test whether they are equal using C's == operation.
1622 The distinction is important for IEEE floating point, because
1623 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1624 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1627 operand_equal_p (arg0, arg1, only_const)
1631 /* If both types don't have the same signedness, then we can't consider
1632 them equal. We must check this before the STRIP_NOPS calls
1633 because they may change the signedness of the arguments. */
1634 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1640 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1641 We don't care about side effects in that case because the SAVE_EXPR
1642 takes care of that for us. */
1643 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1644 return ! only_const;
1646 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1649 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1650 && TREE_CODE (arg0) == ADDR_EXPR
1651 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1654 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1655 && TREE_CODE (arg0) == INTEGER_CST
1656 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1657 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1660 /* Detect when real constants are equal. */
1661 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1662 && TREE_CODE (arg0) == REAL_CST)
1663 return !bcmp ((char *) &TREE_REAL_CST (arg0),
1664 (char *) &TREE_REAL_CST (arg1),
1665 sizeof (REAL_VALUE_TYPE));
1673 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1675 /* This is needed for conversions and for COMPONENT_REF.
1676 Might as well play it safe and always test this. */
1677 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1680 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1683 /* Two conversions are equal only if signedness and modes match. */
1684 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1685 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1686 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1689 return operand_equal_p (TREE_OPERAND (arg0, 0),
1690 TREE_OPERAND (arg1, 0), 0);
1694 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1695 TREE_OPERAND (arg1, 0), 0)
1696 && operand_equal_p (TREE_OPERAND (arg0, 1),
1697 TREE_OPERAND (arg1, 1), 0));
1700 switch (TREE_CODE (arg0))
1703 return operand_equal_p (TREE_OPERAND (arg0, 0),
1704 TREE_OPERAND (arg1, 0), 0);
1708 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1709 TREE_OPERAND (arg1, 0), 0)
1710 && operand_equal_p (TREE_OPERAND (arg0, 1),
1711 TREE_OPERAND (arg1, 1), 0));
1714 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1715 TREE_OPERAND (arg1, 0), 0)
1716 && operand_equal_p (TREE_OPERAND (arg0, 1),
1717 TREE_OPERAND (arg1, 1), 0)
1718 && operand_equal_p (TREE_OPERAND (arg0, 2),
1719 TREE_OPERAND (arg1, 2), 0));
1727 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1728 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1730 When in doubt, return 0. */
1733 operand_equal_for_comparison_p (arg0, arg1, other)
1737 int unsignedp1, unsignedpo;
1738 tree primarg1, primother;
1739 unsigned correct_width;
1741 if (operand_equal_p (arg0, arg1, 0))
1744 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1745 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
1748 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1749 actual comparison operand, ARG0.
1751 First throw away any conversions to wider types
1752 already present in the operands. */
1754 primarg1 = get_narrower (arg1, &unsignedp1);
1755 primother = get_narrower (other, &unsignedpo);
1757 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1758 if (unsignedp1 == unsignedpo
1759 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1760 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1762 tree type = TREE_TYPE (arg0);
1764 /* Make sure shorter operand is extended the right way
1765 to match the longer operand. */
1766 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1767 TREE_TYPE (primarg1)),
1770 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1777 /* See if ARG is an expression that is either a comparison or is performing
1778 arithmetic on comparisons. The comparisons must only be comparing
1779 two different values, which will be stored in *CVAL1 and *CVAL2; if
1780 they are non-zero it means that some operands have already been found.
1781 No variables may be used anywhere else in the expression except in the
1782 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1783 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1785 If this is true, return 1. Otherwise, return zero. */
1788 twoval_comparison_p (arg, cval1, cval2, save_p)
1790 tree *cval1, *cval2;
1793 enum tree_code code = TREE_CODE (arg);
1794 char class = TREE_CODE_CLASS (code);
1796 /* We can handle some of the 'e' cases here. */
1797 if (class == 'e' && code == TRUTH_NOT_EXPR)
1799 else if (class == 'e'
1800 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1801 || code == COMPOUND_EXPR))
1804 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1805 the expression. There may be no way to make this work, but it needs
1806 to be looked at again for 2.6. */
1808 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1810 /* If we've already found a CVAL1 or CVAL2, this expression is
1811 two complex to handle. */
1812 if (*cval1 || *cval2)
1823 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1826 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1827 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1828 cval1, cval2, save_p));
1834 if (code == COND_EXPR)
1835 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1836 cval1, cval2, save_p)
1837 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1838 cval1, cval2, save_p)
1839 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1840 cval1, cval2, save_p));
1844 /* First see if we can handle the first operand, then the second. For
1845 the second operand, we know *CVAL1 can't be zero. It must be that
1846 one side of the comparison is each of the values; test for the
1847 case where this isn't true by failing if the two operands
1850 if (operand_equal_p (TREE_OPERAND (arg, 0),
1851 TREE_OPERAND (arg, 1), 0))
1855 *cval1 = TREE_OPERAND (arg, 0);
1856 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1858 else if (*cval2 == 0)
1859 *cval2 = TREE_OPERAND (arg, 0);
1860 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1865 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1867 else if (*cval2 == 0)
1868 *cval2 = TREE_OPERAND (arg, 1);
1869 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1880 /* ARG is a tree that is known to contain just arithmetic operations and
1881 comparisons. Evaluate the operations in the tree substituting NEW0 for
1882 any occurrence of OLD0 as an operand of a comparison and likewise for
1886 eval_subst (arg, old0, new0, old1, new1)
1888 tree old0, new0, old1, new1;
1890 tree type = TREE_TYPE (arg);
1891 enum tree_code code = TREE_CODE (arg);
1892 char class = TREE_CODE_CLASS (code);
1894 /* We can handle some of the 'e' cases here. */
1895 if (class == 'e' && code == TRUTH_NOT_EXPR)
1897 else if (class == 'e'
1898 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1904 return fold (build1 (code, type,
1905 eval_subst (TREE_OPERAND (arg, 0),
1906 old0, new0, old1, new1)));
1909 return fold (build (code, type,
1910 eval_subst (TREE_OPERAND (arg, 0),
1911 old0, new0, old1, new1),
1912 eval_subst (TREE_OPERAND (arg, 1),
1913 old0, new0, old1, new1)));
1919 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1922 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1925 return fold (build (code, type,
1926 eval_subst (TREE_OPERAND (arg, 0),
1927 old0, new0, old1, new1),
1928 eval_subst (TREE_OPERAND (arg, 1),
1929 old0, new0, old1, new1),
1930 eval_subst (TREE_OPERAND (arg, 2),
1931 old0, new0, old1, new1)));
1936 tree arg0 = TREE_OPERAND (arg, 0);
1937 tree arg1 = TREE_OPERAND (arg, 1);
1939 /* We need to check both for exact equality and tree equality. The
1940 former will be true if the operand has a side-effect. In that
1941 case, we know the operand occurred exactly once. */
1943 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1945 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1948 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1950 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1953 return fold (build (code, type, arg0, arg1));
1960 /* Return a tree for the case when the result of an expression is RESULT
1961 converted to TYPE and OMITTED was previously an operand of the expression
1962 but is now not needed (e.g., we folded OMITTED * 0).
1964 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1965 the conversion of RESULT to TYPE. */
1968 omit_one_operand (type, result, omitted)
1969 tree type, result, omitted;
1971 tree t = convert (type, result);
1973 if (TREE_SIDE_EFFECTS (omitted))
1974 return build (COMPOUND_EXPR, type, omitted, t);
1976 return non_lvalue (t);
1979 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
1982 pedantic_omit_one_operand (type, result, omitted)
1983 tree type, result, omitted;
1985 tree t = convert (type, result);
1987 if (TREE_SIDE_EFFECTS (omitted))
1988 return build (COMPOUND_EXPR, type, omitted, t);
1990 return pedantic_non_lvalue (t);
1995 /* Return a simplified tree node for the truth-negation of ARG. This
1996 never alters ARG itself. We assume that ARG is an operation that
1997 returns a truth value (0 or 1). */
2000 invert_truthvalue (arg)
2003 tree type = TREE_TYPE (arg);
2004 enum tree_code code = TREE_CODE (arg);
2006 if (code == ERROR_MARK)
2009 /* If this is a comparison, we can simply invert it, except for
2010 floating-point non-equality comparisons, in which case we just
2011 enclose a TRUTH_NOT_EXPR around what we have. */
2013 if (TREE_CODE_CLASS (code) == '<')
2015 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2016 && code != NE_EXPR && code != EQ_EXPR)
2017 return build1 (TRUTH_NOT_EXPR, type, arg);
2019 return build (invert_tree_comparison (code), type,
2020 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2026 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2027 && TREE_INT_CST_HIGH (arg) == 0, 0));
2029 case TRUTH_AND_EXPR:
2030 return build (TRUTH_OR_EXPR, type,
2031 invert_truthvalue (TREE_OPERAND (arg, 0)),
2032 invert_truthvalue (TREE_OPERAND (arg, 1)));
2035 return build (TRUTH_AND_EXPR, type,
2036 invert_truthvalue (TREE_OPERAND (arg, 0)),
2037 invert_truthvalue (TREE_OPERAND (arg, 1)));
2039 case TRUTH_XOR_EXPR:
2040 /* Here we can invert either operand. We invert the first operand
2041 unless the second operand is a TRUTH_NOT_EXPR in which case our
2042 result is the XOR of the first operand with the inside of the
2043 negation of the second operand. */
2045 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2046 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2047 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2049 return build (TRUTH_XOR_EXPR, type,
2050 invert_truthvalue (TREE_OPERAND (arg, 0)),
2051 TREE_OPERAND (arg, 1));
2053 case TRUTH_ANDIF_EXPR:
2054 return build (TRUTH_ORIF_EXPR, type,
2055 invert_truthvalue (TREE_OPERAND (arg, 0)),
2056 invert_truthvalue (TREE_OPERAND (arg, 1)));
2058 case TRUTH_ORIF_EXPR:
2059 return build (TRUTH_ANDIF_EXPR, type,
2060 invert_truthvalue (TREE_OPERAND (arg, 0)),
2061 invert_truthvalue (TREE_OPERAND (arg, 1)));
2063 case TRUTH_NOT_EXPR:
2064 return TREE_OPERAND (arg, 0);
2067 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2068 invert_truthvalue (TREE_OPERAND (arg, 1)),
2069 invert_truthvalue (TREE_OPERAND (arg, 2)));
2072 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2073 invert_truthvalue (TREE_OPERAND (arg, 1)));
2075 case NON_LVALUE_EXPR:
2076 return invert_truthvalue (TREE_OPERAND (arg, 0));
2081 return build1 (TREE_CODE (arg), type,
2082 invert_truthvalue (TREE_OPERAND (arg, 0)));
2085 if (!integer_onep (TREE_OPERAND (arg, 1)))
2087 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2090 return build1 (TRUTH_NOT_EXPR, type, arg);
2092 case CLEANUP_POINT_EXPR:
2093 return build1 (CLEANUP_POINT_EXPR, type,
2094 invert_truthvalue (TREE_OPERAND (arg, 0)));
2096 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2098 return build1 (TRUTH_NOT_EXPR, type, arg);
2101 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2102 operands are another bit-wise operation with a common input. If so,
2103 distribute the bit operations to save an operation and possibly two if
2104 constants are involved. For example, convert
2105 (A | B) & (A | C) into A | (B & C)
2106 Further simplification will occur if B and C are constants.
2108 If this optimization cannot be done, 0 will be returned. */
2111 distribute_bit_expr (code, type, arg0, arg1)
2112 enum tree_code code;
2119 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2120 || TREE_CODE (arg0) == code
2121 || (TREE_CODE (arg0) != BIT_AND_EXPR
2122 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2125 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2127 common = TREE_OPERAND (arg0, 0);
2128 left = TREE_OPERAND (arg0, 1);
2129 right = TREE_OPERAND (arg1, 1);
2131 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2133 common = TREE_OPERAND (arg0, 0);
2134 left = TREE_OPERAND (arg0, 1);
2135 right = TREE_OPERAND (arg1, 0);
2137 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2139 common = TREE_OPERAND (arg0, 1);
2140 left = TREE_OPERAND (arg0, 0);
2141 right = TREE_OPERAND (arg1, 1);
2143 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2145 common = TREE_OPERAND (arg0, 1);
2146 left = TREE_OPERAND (arg0, 0);
2147 right = TREE_OPERAND (arg1, 0);
2152 return fold (build (TREE_CODE (arg0), type, common,
2153 fold (build (code, type, left, right))));
2156 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2157 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2160 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2163 int bitsize, bitpos;
2166 tree result = build (BIT_FIELD_REF, type, inner,
2167 size_int (bitsize), size_int (bitpos));
2169 TREE_UNSIGNED (result) = unsignedp;
2174 /* Optimize a bit-field compare.
2176 There are two cases: First is a compare against a constant and the
2177 second is a comparison of two items where the fields are at the same
2178 bit position relative to the start of a chunk (byte, halfword, word)
2179 large enough to contain it. In these cases we can avoid the shift
2180 implicit in bitfield extractions.
2182 For constants, we emit a compare of the shifted constant with the
2183 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2184 compared. For two fields at the same position, we do the ANDs with the
2185 similar mask and compare the result of the ANDs.
2187 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2188 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2189 are the left and right operands of the comparison, respectively.
2191 If the optimization described above can be done, we return the resulting
2192 tree. Otherwise we return zero. */
2195 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2196 enum tree_code code;
2200 int lbitpos, lbitsize, rbitpos, rbitsize;
2201 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2202 tree type = TREE_TYPE (lhs);
2203 tree signed_type, unsigned_type;
2204 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2205 enum machine_mode lmode, rmode, lnmode, rnmode;
2206 int lunsignedp, runsignedp;
2207 int lvolatilep = 0, rvolatilep = 0;
2208 tree linner, rinner;
2212 /* Get all the information about the extractions being done. If the bit size
2213 if the same as the size of the underlying object, we aren't doing an
2214 extraction at all and so can do nothing. */
2215 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2216 &lunsignedp, &lvolatilep);
2217 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2223 /* If this is not a constant, we can only do something if bit positions,
2224 sizes, and signedness are the same. */
2225 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2226 &rmode, &runsignedp, &rvolatilep);
2228 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2229 || lunsignedp != runsignedp || offset != 0)
2233 /* See if we can find a mode to refer to this field. We should be able to,
2234 but fail if we can't. */
2235 lnmode = get_best_mode (lbitsize, lbitpos,
2236 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2238 if (lnmode == VOIDmode)
2241 /* Set signed and unsigned types of the precision of this mode for the
2243 signed_type = type_for_mode (lnmode, 0);
2244 unsigned_type = type_for_mode (lnmode, 1);
2248 rnmode = get_best_mode (rbitsize, rbitpos,
2249 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2251 if (rnmode == VOIDmode)
2255 /* Compute the bit position and size for the new reference and our offset
2256 within it. If the new reference is the same size as the original, we
2257 won't optimize anything, so return zero. */
2258 lnbitsize = GET_MODE_BITSIZE (lnmode);
2259 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2260 lbitpos -= lnbitpos;
2261 if (lnbitsize == lbitsize)
2266 rnbitsize = GET_MODE_BITSIZE (rnmode);
2267 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2268 rbitpos -= rnbitpos;
2269 if (rnbitsize == rbitsize)
2273 if (BYTES_BIG_ENDIAN)
2274 lbitpos = lnbitsize - lbitsize - lbitpos;
2276 /* Make the mask to be used against the extracted field. */
2277 mask = build_int_2 (~0, ~0);
2278 TREE_TYPE (mask) = unsigned_type;
2279 force_fit_type (mask, 0);
2280 mask = convert (unsigned_type, mask);
2281 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2282 mask = const_binop (RSHIFT_EXPR, mask,
2283 size_int (lnbitsize - lbitsize - lbitpos), 0);
2286 /* If not comparing with constant, just rework the comparison
2288 return build (code, compare_type,
2289 build (BIT_AND_EXPR, unsigned_type,
2290 make_bit_field_ref (linner, unsigned_type,
2291 lnbitsize, lnbitpos, 1),
2293 build (BIT_AND_EXPR, unsigned_type,
2294 make_bit_field_ref (rinner, unsigned_type,
2295 rnbitsize, rnbitpos, 1),
2298 /* Otherwise, we are handling the constant case. See if the constant is too
2299 big for the field. Warn and return a tree of for 0 (false) if so. We do
2300 this not only for its own sake, but to avoid having to test for this
2301 error case below. If we didn't, we might generate wrong code.
2303 For unsigned fields, the constant shifted right by the field length should
2304 be all zero. For signed fields, the high-order bits should agree with
2309 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2310 convert (unsigned_type, rhs),
2311 size_int (lbitsize), 0)))
2313 warning ("comparison is always %s due to width of bitfield",
2314 code == NE_EXPR ? "one" : "zero");
2315 return convert (compare_type,
2317 ? integer_one_node : integer_zero_node));
2322 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2323 size_int (lbitsize - 1), 0);
2324 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2326 warning ("comparison is always %s due to width of bitfield",
2327 code == NE_EXPR ? "one" : "zero");
2328 return convert (compare_type,
2330 ? integer_one_node : integer_zero_node));
2334 /* Single-bit compares should always be against zero. */
2335 if (lbitsize == 1 && ! integer_zerop (rhs))
2337 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2338 rhs = convert (type, integer_zero_node);
2341 /* Make a new bitfield reference, shift the constant over the
2342 appropriate number of bits and mask it with the computed mask
2343 (in case this was a signed field). If we changed it, make a new one. */
2344 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2347 TREE_SIDE_EFFECTS (lhs) = 1;
2348 TREE_THIS_VOLATILE (lhs) = 1;
2351 rhs = fold (const_binop (BIT_AND_EXPR,
2352 const_binop (LSHIFT_EXPR,
2353 convert (unsigned_type, rhs),
2354 size_int (lbitpos), 0),
2357 return build (code, compare_type,
2358 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2362 /* Subroutine for fold_truthop: decode a field reference.
2364 If EXP is a comparison reference, we return the innermost reference.
2366 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2367 set to the starting bit number.
2369 If the innermost field can be completely contained in a mode-sized
2370 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2372 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2373 otherwise it is not changed.
2375 *PUNSIGNEDP is set to the signedness of the field.
2377 *PMASK is set to the mask used. This is either contained in a
2378 BIT_AND_EXPR or derived from the width of the field.
2380 Return 0 if this is not a component reference or is one that we can't
2381 do anything with. */
2384 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2387 int *pbitsize, *pbitpos;
2388 enum machine_mode *pmode;
2389 int *punsignedp, *pvolatilep;
2393 tree mask, inner, offset;
2397 /* All the optimizations using this function assume integer fields.
2398 There are problems with FP fields since the type_for_size call
2399 below can fail for, e.g., XFmode. */
2400 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2405 if (TREE_CODE (exp) == BIT_AND_EXPR)
2407 and_mask = TREE_OPERAND (exp, 1);
2408 exp = TREE_OPERAND (exp, 0);
2409 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2410 if (TREE_CODE (and_mask) != INTEGER_CST)
2415 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2416 punsignedp, pvolatilep);
2417 if ((inner == exp && and_mask == 0)
2418 || *pbitsize < 0 || offset != 0)
2421 /* Compute the mask to access the bitfield. */
2422 unsigned_type = type_for_size (*pbitsize, 1);
2423 precision = TYPE_PRECISION (unsigned_type);
2425 mask = build_int_2 (~0, ~0);
2426 TREE_TYPE (mask) = unsigned_type;
2427 force_fit_type (mask, 0);
2428 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2429 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2431 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2433 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2434 convert (unsigned_type, and_mask), mask));
2440 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2444 all_ones_mask_p (mask, size)
2448 tree type = TREE_TYPE (mask);
2449 int precision = TYPE_PRECISION (type);
2452 tmask = build_int_2 (~0, ~0);
2453 TREE_TYPE (tmask) = signed_type (type);
2454 force_fit_type (tmask, 0);
2456 tree_int_cst_equal (mask,
2457 const_binop (RSHIFT_EXPR,
2458 const_binop (LSHIFT_EXPR, tmask,
2459 size_int (precision - size),
2461 size_int (precision - size), 0));
2464 /* Subroutine for fold_truthop: determine if an operand is simple enough
2465 to be evaluated unconditionally. */
2468 simple_operand_p (exp)
2471 /* Strip any conversions that don't change the machine mode. */
2472 while ((TREE_CODE (exp) == NOP_EXPR
2473 || TREE_CODE (exp) == CONVERT_EXPR)
2474 && (TYPE_MODE (TREE_TYPE (exp))
2475 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2476 exp = TREE_OPERAND (exp, 0);
2478 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2479 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2480 && ! TREE_ADDRESSABLE (exp)
2481 && ! TREE_THIS_VOLATILE (exp)
2482 && ! DECL_NONLOCAL (exp)
2483 /* Don't regard global variables as simple. They may be
2484 allocated in ways unknown to the compiler (shared memory,
2485 #pragma weak, etc). */
2486 && ! TREE_PUBLIC (exp)
2487 && ! DECL_EXTERNAL (exp)
2488 /* Loading a static variable is unduly expensive, but global
2489 registers aren't expensive. */
2490 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2493 /* Subroutine for fold_truthop: try to optimize a range test.
2495 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2497 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2498 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2499 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2502 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2503 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2504 larger than HI_CST (they may be equal).
2506 We return the simplified tree or 0 if no optimization is possible. */
2509 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2510 enum tree_code jcode, lo_code, hi_code;
2511 tree type, var, lo_cst, hi_cst;
2514 enum tree_code rcode;
2516 /* See if this is a range test and normalize the constant terms. */
2518 if (jcode == TRUTH_AND_EXPR)
2523 /* See if we have VAR != CST && VAR != CST+1. */
2524 if (! (hi_code == NE_EXPR
2525 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2526 && tree_int_cst_equal (integer_one_node,
2527 const_binop (MINUS_EXPR,
2528 hi_cst, lo_cst, 0))))
2536 if (hi_code == LT_EXPR)
2537 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2538 else if (hi_code != LE_EXPR)
2541 if (lo_code == GT_EXPR)
2542 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2544 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2557 /* See if we have VAR == CST || VAR == CST+1. */
2558 if (! (hi_code == EQ_EXPR
2559 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2560 && tree_int_cst_equal (integer_one_node,
2561 const_binop (MINUS_EXPR,
2562 hi_cst, lo_cst, 0))))
2570 if (hi_code == GE_EXPR)
2571 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2572 else if (hi_code != GT_EXPR)
2575 if (lo_code == LE_EXPR)
2576 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2578 /* We now have VAR < LO_CST || VAR > HI_CST. */
2587 /* When normalizing, it is possible to both increment the smaller constant
2588 and decrement the larger constant. See if they are still ordered. */
2589 if (tree_int_cst_lt (hi_cst, lo_cst))
2592 /* Fail if VAR isn't an integer. */
2593 utype = TREE_TYPE (var);
2594 if (! INTEGRAL_TYPE_P (utype))
2597 /* The range test is invalid if subtracting the two constants results
2598 in overflow. This can happen in traditional mode. */
2599 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2600 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2603 if (! TREE_UNSIGNED (utype))
2605 utype = unsigned_type (utype);
2606 var = convert (utype, var);
2607 lo_cst = convert (utype, lo_cst);
2608 hi_cst = convert (utype, hi_cst);
2611 return fold (convert (type,
2612 build (rcode, utype,
2613 build (MINUS_EXPR, utype, var, lo_cst),
2614 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2617 /* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P
2618 bit value. Arrange things so the extra bits will be set to zero if and]
2619 only if C is signed-extended to its full width. */
2622 unextend (c, p, unsignedp)
2627 tree type = TREE_TYPE (c);
2628 int modesize = GET_MODE_BITSIZE (TYPE_MODE (type));
2631 if (p == modesize || unsignedp)
2634 if (TREE_UNSIGNED (type))
2635 c = convert (signed_type (type), c);
2637 /* We work by getting just the sign bit into the low-order bit, then
2638 into the high-order bit, then sign-extend. We then XOR that value
2640 temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
2641 temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
2642 temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
2643 temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
2644 return convert (type, const_binop (BIT_XOR_EXPR, c, temp, 0));
2647 /* Find ways of folding logical expressions of LHS and RHS:
2648 Try to merge two comparisons to the same innermost item.
2649 Look for range tests like "ch >= '0' && ch <= '9'".
2650 Look for combinations of simple terms on machines with expensive branches
2651 and evaluate the RHS unconditionally.
2653 For example, if we have p->a == 2 && p->b == 4 and we can make an
2654 object large enough to span both A and B, we can do this with a comparison
2655 against the object ANDed with the a mask.
2657 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2658 operations to do this with one comparison.
2660 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2661 function and the one above.
2663 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2664 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2666 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2669 We return the simplified tree or 0 if no optimization is possible. */
2672 fold_truthop (code, truth_type, lhs, rhs)
2673 enum tree_code code;
2674 tree truth_type, lhs, rhs;
2676 /* If this is the "or" of two comparisons, we can do something if we
2677 the comparisons are NE_EXPR. If this is the "and", we can do something
2678 if the comparisons are EQ_EXPR. I.e.,
2679 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2681 WANTED_CODE is this operation code. For single bit fields, we can
2682 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2683 comparison for one-bit fields. */
2685 enum tree_code wanted_code;
2686 enum tree_code lcode, rcode;
2687 tree ll_arg, lr_arg, rl_arg, rr_arg;
2688 tree ll_inner, lr_inner, rl_inner, rr_inner;
2689 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2690 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2691 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2692 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2693 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2694 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2695 enum machine_mode lnmode, rnmode;
2696 tree ll_mask, lr_mask, rl_mask, rr_mask;
2697 tree l_const, r_const;
2699 int first_bit, end_bit;
2702 /* Start by getting the comparison codes and seeing if this looks like
2703 a range test. Fail if anything is volatile. If one operand is a
2704 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2707 if (TREE_SIDE_EFFECTS (lhs)
2708 || TREE_SIDE_EFFECTS (rhs))
2711 lcode = TREE_CODE (lhs);
2712 rcode = TREE_CODE (rhs);
2714 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2715 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2717 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2718 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2720 if (TREE_CODE_CLASS (lcode) != '<'
2721 || TREE_CODE_CLASS (rcode) != '<')
2724 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2725 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2727 ll_arg = TREE_OPERAND (lhs, 0);
2728 lr_arg = TREE_OPERAND (lhs, 1);
2729 rl_arg = TREE_OPERAND (rhs, 0);
2730 rr_arg = TREE_OPERAND (rhs, 1);
2732 if (TREE_CODE (lr_arg) == INTEGER_CST
2733 && TREE_CODE (rr_arg) == INTEGER_CST
2734 && operand_equal_p (ll_arg, rl_arg, 0))
2736 if (tree_int_cst_lt (lr_arg, rr_arg))
2737 result = range_test (code, truth_type, lcode, rcode,
2738 ll_arg, lr_arg, rr_arg);
2740 result = range_test (code, truth_type, rcode, lcode,
2741 ll_arg, rr_arg, lr_arg);
2743 /* If this isn't a range test, it also isn't a comparison that
2744 can be merged. However, it wins to evaluate the RHS unconditionally
2745 on machines with expensive branches. */
2747 if (result == 0 && BRANCH_COST >= 2)
2749 if (TREE_CODE (ll_arg) != VAR_DECL
2750 && TREE_CODE (ll_arg) != PARM_DECL)
2752 /* Avoid evaluating the variable part twice. */
2753 ll_arg = save_expr (ll_arg);
2754 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2755 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2757 return build (code, truth_type, lhs, rhs);
2762 /* If the RHS can be evaluated unconditionally and its operands are
2763 simple, it wins to evaluate the RHS unconditionally on machines
2764 with expensive branches. In this case, this isn't a comparison
2765 that can be merged. */
2767 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2768 are with zero (tmw). */
2770 if (BRANCH_COST >= 2
2771 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2772 && simple_operand_p (rl_arg)
2773 && simple_operand_p (rr_arg))
2774 return build (code, truth_type, lhs, rhs);
2776 /* See if the comparisons can be merged. Then get all the parameters for
2779 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2780 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2784 ll_inner = decode_field_reference (ll_arg,
2785 &ll_bitsize, &ll_bitpos, &ll_mode,
2786 &ll_unsignedp, &volatilep, &ll_mask);
2787 lr_inner = decode_field_reference (lr_arg,
2788 &lr_bitsize, &lr_bitpos, &lr_mode,
2789 &lr_unsignedp, &volatilep, &lr_mask);
2790 rl_inner = decode_field_reference (rl_arg,
2791 &rl_bitsize, &rl_bitpos, &rl_mode,
2792 &rl_unsignedp, &volatilep, &rl_mask);
2793 rr_inner = decode_field_reference (rr_arg,
2794 &rr_bitsize, &rr_bitpos, &rr_mode,
2795 &rr_unsignedp, &volatilep, &rr_mask);
2797 /* It must be true that the inner operation on the lhs of each
2798 comparison must be the same if we are to be able to do anything.
2799 Then see if we have constants. If not, the same must be true for
2801 if (volatilep || ll_inner == 0 || rl_inner == 0
2802 || ! operand_equal_p (ll_inner, rl_inner, 0))
2805 if (TREE_CODE (lr_arg) == INTEGER_CST
2806 && TREE_CODE (rr_arg) == INTEGER_CST)
2807 l_const = lr_arg, r_const = rr_arg;
2808 else if (lr_inner == 0 || rr_inner == 0
2809 || ! operand_equal_p (lr_inner, rr_inner, 0))
2812 l_const = r_const = 0;
2814 /* If either comparison code is not correct for our logical operation,
2815 fail. However, we can convert a one-bit comparison against zero into
2816 the opposite comparison against that bit being set in the field. */
2818 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2819 if (lcode != wanted_code)
2821 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2827 if (rcode != wanted_code)
2829 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2835 /* See if we can find a mode that contains both fields being compared on
2836 the left. If we can't, fail. Otherwise, update all constants and masks
2837 to be relative to a field of that size. */
2838 first_bit = MIN (ll_bitpos, rl_bitpos);
2839 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2840 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2841 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2843 if (lnmode == VOIDmode)
2846 lnbitsize = GET_MODE_BITSIZE (lnmode);
2847 lnbitpos = first_bit & ~ (lnbitsize - 1);
2848 type = type_for_size (lnbitsize, 1);
2849 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2851 if (BYTES_BIG_ENDIAN)
2853 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2854 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2857 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2858 size_int (xll_bitpos), 0);
2859 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2860 size_int (xrl_bitpos), 0);
2864 l_const = convert (type, unextend (l_const, ll_bitsize, ll_unsignedp));
2865 l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
2866 if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
2867 fold (build1 (BIT_NOT_EXPR,
2871 warning ("comparison is always %s",
2872 wanted_code == NE_EXPR ? "one" : "zero");
2874 return convert (truth_type,
2875 wanted_code == NE_EXPR
2876 ? integer_one_node : integer_zero_node);
2881 r_const = convert (type, unextend (r_const, rl_bitsize, rl_unsignedp));
2882 r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
2883 if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
2884 fold (build1 (BIT_NOT_EXPR,
2888 warning ("comparison is always %s",
2889 wanted_code == NE_EXPR ? "one" : "zero");
2891 return convert (truth_type,
2892 wanted_code == NE_EXPR
2893 ? integer_one_node : integer_zero_node);
2897 /* If the right sides are not constant, do the same for it. Also,
2898 disallow this optimization if a size or signedness mismatch occurs
2899 between the left and right sides. */
2902 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2903 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2904 /* Make sure the two fields on the right
2905 correspond to the left without being swapped. */
2906 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2909 first_bit = MIN (lr_bitpos, rr_bitpos);
2910 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2911 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2912 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2914 if (rnmode == VOIDmode)
2917 rnbitsize = GET_MODE_BITSIZE (rnmode);
2918 rnbitpos = first_bit & ~ (rnbitsize - 1);
2919 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2921 if (BYTES_BIG_ENDIAN)
2923 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2924 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2927 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2928 size_int (xlr_bitpos), 0);
2929 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2930 size_int (xrr_bitpos), 0);
2932 /* Make a mask that corresponds to both fields being compared.
2933 Do this for both items being compared. If the masks agree,
2934 we can do this by masking both and comparing the masked
2936 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2937 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2938 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2940 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2941 ll_unsignedp || rl_unsignedp);
2942 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2943 lr_unsignedp || rr_unsignedp);
2944 if (! all_ones_mask_p (ll_mask, lnbitsize))
2946 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2947 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2949 return build (wanted_code, truth_type, lhs, rhs);
2952 /* There is still another way we can do something: If both pairs of
2953 fields being compared are adjacent, we may be able to make a wider
2954 field containing them both. */
2955 if ((ll_bitsize + ll_bitpos == rl_bitpos
2956 && lr_bitsize + lr_bitpos == rr_bitpos)
2957 || (ll_bitpos == rl_bitpos + rl_bitsize
2958 && lr_bitpos == rr_bitpos + rr_bitsize))
2959 return build (wanted_code, truth_type,
2960 make_bit_field_ref (ll_inner, type,
2961 ll_bitsize + rl_bitsize,
2962 MIN (ll_bitpos, rl_bitpos),
2964 make_bit_field_ref (lr_inner, type,
2965 lr_bitsize + rr_bitsize,
2966 MIN (lr_bitpos, rr_bitpos),
2972 /* Handle the case of comparisons with constants. If there is something in
2973 common between the masks, those bits of the constants must be the same.
2974 If not, the condition is always false. Test for this to avoid generating
2975 incorrect code below. */
2976 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2977 if (! integer_zerop (result)
2978 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2979 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2981 if (wanted_code == NE_EXPR)
2983 warning ("`or' of unmatched not-equal tests is always 1");
2984 return convert (truth_type, integer_one_node);
2988 warning ("`and' of mutually exclusive equal-tests is always zero");
2989 return convert (truth_type, integer_zero_node);
2993 /* Construct the expression we will return. First get the component
2994 reference we will make. Unless the mask is all ones the width of
2995 that field, perform the mask operation. Then compare with the
2997 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2998 ll_unsignedp || rl_unsignedp);
3000 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
3001 if (! all_ones_mask_p (ll_mask, lnbitsize))
3002 result = build (BIT_AND_EXPR, type, result, ll_mask);
3004 return build (wanted_code, truth_type, result,
3005 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
3008 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
3009 S, a SAVE_EXPR, return the expression actually being evaluated. Note
3010 that we may sometimes modify the tree. */
3013 strip_compound_expr (t, s)
3017 tree type = TREE_TYPE (t);
3018 enum tree_code code = TREE_CODE (t);
3020 /* See if this is the COMPOUND_EXPR we want to eliminate. */
3021 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
3022 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
3023 return TREE_OPERAND (t, 1);
3025 /* See if this is a COND_EXPR or a simple arithmetic operator. We
3026 don't bother handling any other types. */
3027 else if (code == COND_EXPR)
3029 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3030 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3031 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
3033 else if (TREE_CODE_CLASS (code) == '1')
3034 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3035 else if (TREE_CODE_CLASS (code) == '<'
3036 || TREE_CODE_CLASS (code) == '2')
3038 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3039 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3045 /* Perform constant folding and related simplification of EXPR.
3046 The related simplifications include x*1 => x, x*0 => 0, etc.,
3047 and application of the associative law.
3048 NOP_EXPR conversions may be removed freely (as long as we
3049 are careful not to change the C type of the overall expression)
3050 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3051 but we can constant-fold them if they have constant operands. */
3057 register tree t = expr;
3058 tree t1 = NULL_TREE;
3060 tree type = TREE_TYPE (expr);
3061 register tree arg0, arg1;
3062 register enum tree_code code = TREE_CODE (t);
3066 /* WINS will be nonzero when the switch is done
3067 if all operands are constant. */
3071 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3072 if (code == RTL_EXPR)
3075 /* Return right away if already constant. */
3076 if (TREE_CONSTANT (t))
3078 if (code == CONST_DECL)
3079 return DECL_INITIAL (t);
3083 kind = TREE_CODE_CLASS (code);
3084 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3088 /* Special case for conversion ops that can have fixed point args. */
3089 arg0 = TREE_OPERAND (t, 0);
3091 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3093 STRIP_TYPE_NOPS (arg0);
3095 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3096 subop = TREE_REALPART (arg0);
3100 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3101 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3102 && TREE_CODE (subop) != REAL_CST
3103 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3105 /* Note that TREE_CONSTANT isn't enough:
3106 static var addresses are constant but we can't
3107 do arithmetic on them. */
3110 else if (kind == 'e' || kind == '<'
3111 || kind == '1' || kind == '2' || kind == 'r')
3113 register int len = tree_code_length[(int) code];
3115 for (i = 0; i < len; i++)
3117 tree op = TREE_OPERAND (t, i);
3121 continue; /* Valid for CALL_EXPR, at least. */
3123 if (kind == '<' || code == RSHIFT_EXPR)
3125 /* Signedness matters here. Perhaps we can refine this
3127 STRIP_TYPE_NOPS (op);
3131 /* Strip any conversions that don't change the mode. */
3135 if (TREE_CODE (op) == COMPLEX_CST)
3136 subop = TREE_REALPART (op);
3140 if (TREE_CODE (subop) != INTEGER_CST
3141 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3142 && TREE_CODE (subop) != REAL_CST
3143 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3145 /* Note that TREE_CONSTANT isn't enough:
3146 static var addresses are constant but we can't
3147 do arithmetic on them. */
3157 /* If this is a commutative operation, and ARG0 is a constant, move it
3158 to ARG1 to reduce the number of tests below. */
3159 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3160 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3161 || code == BIT_AND_EXPR)
3162 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3164 tem = arg0; arg0 = arg1; arg1 = tem;
3166 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3167 TREE_OPERAND (t, 1) = tem;
3170 /* Now WINS is set as described above,
3171 ARG0 is the first operand of EXPR,
3172 and ARG1 is the second operand (if it has more than one operand).
3174 First check for cases where an arithmetic operation is applied to a
3175 compound, conditional, or comparison operation. Push the arithmetic
3176 operation inside the compound or conditional to see if any folding
3177 can then be done. Convert comparison to conditional for this purpose.
3178 The also optimizes non-constant cases that used to be done in
3181 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3182 one of the operands is a comparison and the other is a comparison, a
3183 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3184 code below would make the expression more complex. Change it to a
3185 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3186 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3188 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3189 || code == EQ_EXPR || code == NE_EXPR)
3190 && ((truth_value_p (TREE_CODE (arg0))
3191 && (truth_value_p (TREE_CODE (arg1))
3192 || (TREE_CODE (arg1) == BIT_AND_EXPR
3193 && integer_onep (TREE_OPERAND (arg1, 1)))))
3194 || (truth_value_p (TREE_CODE (arg1))
3195 && (truth_value_p (TREE_CODE (arg0))
3196 || (TREE_CODE (arg0) == BIT_AND_EXPR
3197 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3199 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3200 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3204 if (code == EQ_EXPR)
3205 t = invert_truthvalue (t);
3210 if (TREE_CODE_CLASS (code) == '1')
3212 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3213 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3214 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3215 else if (TREE_CODE (arg0) == COND_EXPR)
3217 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3218 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3219 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3221 /* If this was a conversion, and all we did was to move into
3222 inside the COND_EXPR, bring it back out. But leave it if
3223 it is a conversion from integer to integer and the
3224 result precision is no wider than a word since such a
3225 conversion is cheap and may be optimized away by combine,
3226 while it couldn't if it were outside the COND_EXPR. Then return
3227 so we don't get into an infinite recursion loop taking the
3228 conversion out and then back in. */
3230 if ((code == NOP_EXPR || code == CONVERT_EXPR
3231 || code == NON_LVALUE_EXPR)
3232 && TREE_CODE (t) == COND_EXPR
3233 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3234 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3235 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3236 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
3237 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
3238 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
3239 && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
3240 t = build1 (code, type,
3242 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3243 TREE_OPERAND (t, 0),
3244 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3245 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3248 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3249 return fold (build (COND_EXPR, type, arg0,
3250 fold (build1 (code, type, integer_one_node)),
3251 fold (build1 (code, type, integer_zero_node))));
3253 else if (TREE_CODE_CLASS (code) == '2'
3254 || TREE_CODE_CLASS (code) == '<')
3256 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3257 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3258 fold (build (code, type,
3259 arg0, TREE_OPERAND (arg1, 1))));
3260 else if (TREE_CODE (arg1) == COND_EXPR
3261 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3263 tree test, true_value, false_value;
3265 if (TREE_CODE (arg1) == COND_EXPR)
3267 test = TREE_OPERAND (arg1, 0);
3268 true_value = TREE_OPERAND (arg1, 1);
3269 false_value = TREE_OPERAND (arg1, 2);
3274 true_value = integer_one_node;
3275 false_value = integer_zero_node;
3278 /* If ARG0 is complex we want to make sure we only evaluate
3279 it once. Though this is only required if it is volatile, it
3280 might be more efficient even if it is not. However, if we
3281 succeed in folding one part to a constant, we do not need
3282 to make this SAVE_EXPR. Since we do this optimization
3283 primarily to see if we do end up with constant and this
3284 SAVE_EXPR interferes with later optimizations, suppressing
3285 it when we can is important. */
3287 if (TREE_CODE (arg0) != SAVE_EXPR
3288 && ((TREE_CODE (arg0) != VAR_DECL
3289 && TREE_CODE (arg0) != PARM_DECL)
3290 || TREE_SIDE_EFFECTS (arg0)))
3292 tree lhs = fold (build (code, type, arg0, true_value));
3293 tree rhs = fold (build (code, type, arg0, false_value));
3295 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3296 return fold (build (COND_EXPR, type, test, lhs, rhs));
3298 arg0 = save_expr (arg0);
3301 test = fold (build (COND_EXPR, type, test,
3302 fold (build (code, type, arg0, true_value)),
3303 fold (build (code, type, arg0, false_value))));
3304 if (TREE_CODE (arg0) == SAVE_EXPR)
3305 return build (COMPOUND_EXPR, type,
3306 convert (void_type_node, arg0),
3307 strip_compound_expr (test, arg0));
3309 return convert (type, test);
3312 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3313 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3314 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3315 else if (TREE_CODE (arg0) == COND_EXPR
3316 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3318 tree test, true_value, false_value;
3320 if (TREE_CODE (arg0) == COND_EXPR)
3322 test = TREE_OPERAND (arg0, 0);
3323 true_value = TREE_OPERAND (arg0, 1);
3324 false_value = TREE_OPERAND (arg0, 2);
3329 true_value = integer_one_node;
3330 false_value = integer_zero_node;
3333 if (TREE_CODE (arg1) != SAVE_EXPR
3334 && ((TREE_CODE (arg1) != VAR_DECL
3335 && TREE_CODE (arg1) != PARM_DECL)
3336 || TREE_SIDE_EFFECTS (arg1)))
3338 tree lhs = fold (build (code, type, true_value, arg1));
3339 tree rhs = fold (build (code, type, false_value, arg1));
3341 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
3342 || TREE_CONSTANT (arg1))
3343 return fold (build (COND_EXPR, type, test, lhs, rhs));
3345 arg1 = save_expr (arg1);
3348 test = fold (build (COND_EXPR, type, test,
3349 fold (build (code, type, true_value, arg1)),
3350 fold (build (code, type, false_value, arg1))));
3351 if (TREE_CODE (arg1) == SAVE_EXPR)
3352 return build (COMPOUND_EXPR, type,
3353 convert (void_type_node, arg1),
3354 strip_compound_expr (test, arg1));
3356 return convert (type, test);
3359 else if (TREE_CODE_CLASS (code) == '<'
3360 && TREE_CODE (arg0) == COMPOUND_EXPR)
3361 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3362 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3363 else if (TREE_CODE_CLASS (code) == '<'
3364 && TREE_CODE (arg1) == COMPOUND_EXPR)
3365 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3366 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3378 return fold (DECL_INITIAL (t));
3383 case FIX_TRUNC_EXPR:
3384 /* Other kinds of FIX are not handled properly by fold_convert. */
3386 if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
3387 return TREE_OPERAND (t, 0);
3389 /* Handle cases of two conversions in a row. */
3390 if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3391 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3393 tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3394 tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0));
3395 tree final_type = TREE_TYPE (t);
3396 int inside_int = INTEGRAL_TYPE_P (inside_type);
3397 int inside_ptr = POINTER_TYPE_P (inside_type);
3398 int inside_float = FLOAT_TYPE_P (inside_type);
3399 int inside_prec = TYPE_PRECISION (inside_type);
3400 int inside_unsignedp = TREE_UNSIGNED (inside_type);
3401 int inter_int = INTEGRAL_TYPE_P (inter_type);
3402 int inter_ptr = POINTER_TYPE_P (inter_type);
3403 int inter_float = FLOAT_TYPE_P (inter_type);
3404 int inter_prec = TYPE_PRECISION (inter_type);
3405 int inter_unsignedp = TREE_UNSIGNED (inter_type);
3406 int final_int = INTEGRAL_TYPE_P (final_type);
3407 int final_ptr = POINTER_TYPE_P (final_type);
3408 int final_float = FLOAT_TYPE_P (final_type);
3409 int final_prec = TYPE_PRECISION (final_type);
3410 int final_unsignedp = TREE_UNSIGNED (final_type);
3412 /* In addition to the cases of two conversions in a row
3413 handled below, if we are converting something to its own
3414 type via an object of identical or wider precision, neither
3415 conversion is needed. */
3416 if (inside_type == final_type
3417 && ((inter_int && final_int) || (inter_float && final_float))
3418 && inter_prec >= final_prec)
3419 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3421 /* Likewise, if the intermediate and final types are either both
3422 float or both integer, we don't need the middle conversion if
3423 it is wider than the final type and doesn't change the signedness
3424 (for integers). Avoid this if the final type is a pointer
3425 since then we sometimes need the inner conversion. */
3426 if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
3427 || (inter_float && inside_float))
3428 && inter_prec >= inside_prec
3429 && (inter_float || inter_unsignedp == inside_unsignedp)
3431 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3433 /* Two conversions in a row are not needed unless:
3434 - some conversion is floating-point (overstrict for now), or
3435 - the intermediate type is narrower than both initial and
3437 - the intermediate type and innermost type differ in signedness,
3438 and the outermost type is wider than the intermediate, or
3439 - the initial type is a pointer type and the precisions of the
3440 intermediate and final types differ, or
3441 - the final type is a pointer type and the precisions of the
3442 initial and intermediate types differ. */
3443 if (! inside_float && ! inter_float && ! final_float
3444 && (inter_prec > inside_prec || inter_prec > final_prec)
3445 && ! (inside_int && inter_int
3446 && inter_unsignedp != inside_unsignedp
3447 && inter_prec < final_prec)
3448 && ((inter_unsignedp && inter_prec > inside_prec)
3449 == (final_unsignedp && final_prec > inter_prec))
3450 && ! (inside_ptr && inter_prec != final_prec)
3451 && ! (final_ptr && inside_prec != inter_prec))
3452 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3455 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3456 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3457 /* Detect assigning a bitfield. */
3458 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3459 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3461 /* Don't leave an assignment inside a conversion
3462 unless assigning a bitfield. */
3463 tree prev = TREE_OPERAND (t, 0);
3464 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3465 /* First do the assignment, then return converted constant. */
3466 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3472 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3475 return fold_convert (t, arg0);
3477 #if 0 /* This loses on &"foo"[0]. */
3482 /* Fold an expression like: "foo"[2] */
3483 if (TREE_CODE (arg0) == STRING_CST
3484 && TREE_CODE (arg1) == INTEGER_CST
3485 && !TREE_INT_CST_HIGH (arg1)
3486 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3488 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3489 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3490 force_fit_type (t, 0);
3497 if (TREE_CODE (arg0) == CONSTRUCTOR)
3499 tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
3506 TREE_CONSTANT (t) = wins;
3512 if (TREE_CODE (arg0) == INTEGER_CST)
3514 HOST_WIDE_INT low, high;
3515 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3516 TREE_INT_CST_HIGH (arg0),
3518 t = build_int_2 (low, high);
3519 TREE_TYPE (t) = type;
3521 = (TREE_OVERFLOW (arg0)
3522 | force_fit_type (t, overflow));
3523 TREE_CONSTANT_OVERFLOW (t)
3524 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3526 else if (TREE_CODE (arg0) == REAL_CST)
3527 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3528 TREE_TYPE (t) = type;
3530 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3531 return TREE_OPERAND (arg0, 0);
3533 /* Convert - (a - b) to (b - a) for non-floating-point. */
3534 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3535 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3536 TREE_OPERAND (arg0, 0));
3543 if (TREE_CODE (arg0) == INTEGER_CST)
3545 if (! TREE_UNSIGNED (type)
3546 && TREE_INT_CST_HIGH (arg0) < 0)
3548 HOST_WIDE_INT low, high;
3549 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3550 TREE_INT_CST_HIGH (arg0),
3552 t = build_int_2 (low, high);
3553 TREE_TYPE (t) = type;
3555 = (TREE_OVERFLOW (arg0)
3556 | force_fit_type (t, overflow));
3557 TREE_CONSTANT_OVERFLOW (t)
3558 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3561 else if (TREE_CODE (arg0) == REAL_CST)
3563 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3564 t = build_real (type,
3565 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3567 TREE_TYPE (t) = type;
3569 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3570 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3574 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3576 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3577 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3578 TREE_OPERAND (arg0, 0),
3579 fold (build1 (NEGATE_EXPR,
3580 TREE_TYPE (TREE_TYPE (arg0)),
3581 TREE_OPERAND (arg0, 1))));
3582 else if (TREE_CODE (arg0) == COMPLEX_CST)
3583 return build_complex (TREE_OPERAND (arg0, 0),
3584 fold (build1 (NEGATE_EXPR,
3585 TREE_TYPE (TREE_TYPE (arg0)),
3586 TREE_OPERAND (arg0, 1))));
3587 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3588 return fold (build (TREE_CODE (arg0), type,
3589 fold (build1 (CONJ_EXPR, type,
3590 TREE_OPERAND (arg0, 0))),
3591 fold (build1 (CONJ_EXPR,
3592 type, TREE_OPERAND (arg0, 1)))));
3593 else if (TREE_CODE (arg0) == CONJ_EXPR)
3594 return TREE_OPERAND (arg0, 0);
3600 if (TREE_CODE (arg0) == INTEGER_CST)
3601 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3602 ~ TREE_INT_CST_HIGH (arg0));
3603 TREE_TYPE (t) = type;
3604 force_fit_type (t, 0);
3605 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3606 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3608 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3609 return TREE_OPERAND (arg0, 0);
3613 /* A + (-B) -> A - B */
3614 if (TREE_CODE (arg1) == NEGATE_EXPR)
3615 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3616 else if (! FLOAT_TYPE_P (type))
3618 if (integer_zerop (arg1))
3619 return non_lvalue (convert (type, arg0));
3621 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3622 with a constant, and the two constants have no bits in common,
3623 we should treat this as a BIT_IOR_EXPR since this may produce more
3625 if (TREE_CODE (arg0) == BIT_AND_EXPR
3626 && TREE_CODE (arg1) == BIT_AND_EXPR
3627 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3628 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3629 && integer_zerop (const_binop (BIT_AND_EXPR,
3630 TREE_OPERAND (arg0, 1),
3631 TREE_OPERAND (arg1, 1), 0)))
3633 code = BIT_IOR_EXPR;
3637 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3638 about the case where C is a constant, just try one of the
3639 four possibilities. */
3641 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3642 && operand_equal_p (TREE_OPERAND (arg0, 1),
3643 TREE_OPERAND (arg1, 1), 0))
3644 return fold (build (MULT_EXPR, type,
3645 fold (build (PLUS_EXPR, type,
3646 TREE_OPERAND (arg0, 0),
3647 TREE_OPERAND (arg1, 0))),
3648 TREE_OPERAND (arg0, 1)));
3650 /* In IEEE floating point, x+0 may not equal x. */
3651 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3653 && real_zerop (arg1))
3654 return non_lvalue (convert (type, arg0));
3656 /* In most languages, can't associate operations on floats
3657 through parentheses. Rather than remember where the parentheses
3658 were, we don't associate floats at all. It shouldn't matter much.
3659 However, associating multiplications is only very slightly
3660 inaccurate, so do that if -ffast-math is specified. */
3661 if (FLOAT_TYPE_P (type)
3662 && ! (flag_fast_math && code == MULT_EXPR))
3665 /* The varsign == -1 cases happen only for addition and subtraction.
3666 It says that the arg that was split was really CON minus VAR.
3667 The rest of the code applies to all associative operations. */
3673 if (split_tree (arg0, code, &var, &con, &varsign))
3677 /* EXPR is (CON-VAR) +- ARG1. */
3678 /* If it is + and VAR==ARG1, return just CONST. */
3679 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3680 return convert (TREE_TYPE (t), con);
3682 /* If ARG0 is a constant, don't change things around;
3683 instead keep all the constant computations together. */
3685 if (TREE_CONSTANT (arg0))
3688 /* Otherwise return (CON +- ARG1) - VAR. */
3689 t = build (MINUS_EXPR, type,
3690 fold (build (code, type, con, arg1)), var);
3694 /* EXPR is (VAR+CON) +- ARG1. */
3695 /* If it is - and VAR==ARG1, return just CONST. */
3696 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3697 return convert (TREE_TYPE (t), con);
3699 /* If ARG0 is a constant, don't change things around;
3700 instead keep all the constant computations together. */
3702 if (TREE_CONSTANT (arg0))
3705 /* Otherwise return VAR +- (ARG1 +- CON). */
3706 tem = fold (build (code, type, arg1, con));
3707 t = build (code, type, var, tem);
3709 if (integer_zerop (tem)
3710 && (code == PLUS_EXPR || code == MINUS_EXPR))
3711 return convert (type, var);
3712 /* If we have x +/- (c - d) [c an explicit integer]
3713 change it to x -/+ (d - c) since if d is relocatable
3714 then the latter can be a single immediate insn
3715 and the former cannot. */
3716 if (TREE_CODE (tem) == MINUS_EXPR
3717 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3719 tree tem1 = TREE_OPERAND (tem, 1);
3720 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3721 TREE_OPERAND (tem, 0) = tem1;
3723 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3729 if (split_tree (arg1, code, &var, &con, &varsign))
3731 if (TREE_CONSTANT (arg1))
3736 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3738 /* EXPR is ARG0 +- (CON +- VAR). */
3739 if (TREE_CODE (t) == MINUS_EXPR
3740 && operand_equal_p (var, arg0, 0))
3742 /* If VAR and ARG0 cancel, return just CON or -CON. */
3743 if (code == PLUS_EXPR)
3744 return convert (TREE_TYPE (t), con);
3745 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3746 convert (TREE_TYPE (t), con)));
3749 t = build (TREE_CODE (t), type,
3750 fold (build (code, TREE_TYPE (t), arg0, con)), var);
3752 if (integer_zerop (TREE_OPERAND (t, 0))
3753 && TREE_CODE (t) == PLUS_EXPR)
3754 return convert (TREE_TYPE (t), var);
3759 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3760 if (TREE_CODE (arg1) == REAL_CST)
3762 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3764 t1 = const_binop (code, arg0, arg1, 0);
3765 if (t1 != NULL_TREE)
3767 /* The return value should always have
3768 the same type as the original expression. */
3769 TREE_TYPE (t1) = TREE_TYPE (t);
3775 if (! FLOAT_TYPE_P (type))
3777 if (! wins && integer_zerop (arg0))
3778 return build1 (NEGATE_EXPR, type, arg1);
3779 if (integer_zerop (arg1))
3780 return non_lvalue (convert (type, arg0));
3782 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3783 about the case where C is a constant, just try one of the
3784 four possibilities. */
3786 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3787 && operand_equal_p (TREE_OPERAND (arg0, 1),
3788 TREE_OPERAND (arg1, 1), 0))
3789 return fold (build (MULT_EXPR, type,
3790 fold (build (MINUS_EXPR, type,
3791 TREE_OPERAND (arg0, 0),
3792 TREE_OPERAND (arg1, 0))),
3793 TREE_OPERAND (arg0, 1)));
3795 /* Convert A - (-B) to A + B. */
3796 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3797 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3799 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3802 /* Except with IEEE floating point, 0-x equals -x. */
3803 if (! wins && real_zerop (arg0))
3804 return build1 (NEGATE_EXPR, type, arg1);
3805 /* Except with IEEE floating point, x-0 equals x. */
3806 if (real_zerop (arg1))
3807 return non_lvalue (convert (type, arg0));
3810 /* Fold &x - &x. This can happen from &x.foo - &x.
3811 This is unsafe for certain floats even in non-IEEE formats.
3812 In IEEE, it is unsafe because it does wrong for NaNs.
3813 Also note that operand_equal_p is always false if an operand
3816 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
3817 && operand_equal_p (arg0, arg1, 0))
3818 return convert (type, integer_zero_node);
3823 if (! FLOAT_TYPE_P (type))
3825 if (integer_zerop (arg1))
3826 return omit_one_operand (type, arg1, arg0);
3827 if (integer_onep (arg1))
3828 return non_lvalue (convert (type, arg0));
3830 /* ((A / C) * C) is A if the division is an
3831 EXACT_DIV_EXPR. Since C is normally a constant,
3832 just check for one of the four possibilities. */
3834 if (TREE_CODE (arg0) == EXACT_DIV_EXPR
3835 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3836 return TREE_OPERAND (arg0, 0);
3838 /* (a * (1 << b)) is (a << b) */
3839 if (TREE_CODE (arg1) == LSHIFT_EXPR
3840 && integer_onep (TREE_OPERAND (arg1, 0)))
3841 return fold (build (LSHIFT_EXPR, type, arg0,
3842 TREE_OPERAND (arg1, 1)));
3843 if (TREE_CODE (arg0) == LSHIFT_EXPR
3844 && integer_onep (TREE_OPERAND (arg0, 0)))
3845 return fold (build (LSHIFT_EXPR, type, arg1,
3846 TREE_OPERAND (arg0, 1)));
3850 /* x*0 is 0, except for IEEE floating point. */
3851 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3853 && real_zerop (arg1))
3854 return omit_one_operand (type, arg1, arg0);
3855 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3856 However, ANSI says we can drop signals,
3857 so we can do this anyway. */
3858 if (real_onep (arg1))
3859 return non_lvalue (convert (type, arg0));
3861 if (! wins && real_twop (arg1))
3863 tree arg = save_expr (arg0);
3864 return build (PLUS_EXPR, type, arg, arg);
3871 if (integer_all_onesp (arg1))
3872 return omit_one_operand (type, arg1, arg0);
3873 if (integer_zerop (arg1))
3874 return non_lvalue (convert (type, arg0));
3875 t1 = distribute_bit_expr (code, type, arg0, arg1);
3876 if (t1 != NULL_TREE)
3879 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3880 is a rotate of A by C1 bits. */
3882 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3883 || TREE_CODE (arg0) == LSHIFT_EXPR)
3884 && (TREE_CODE (arg1) == RSHIFT_EXPR
3885 || TREE_CODE (arg1) == LSHIFT_EXPR)
3886 && TREE_CODE (arg0) != TREE_CODE (arg1)
3887 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3888 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3889 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3890 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3891 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3892 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3893 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3894 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3895 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3896 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3897 TREE_CODE (arg0) == LSHIFT_EXPR
3898 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3903 if (integer_zerop (arg1))
3904 return non_lvalue (convert (type, arg0));
3905 if (integer_all_onesp (arg1))
3906 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3911 if (integer_all_onesp (arg1))
3912 return non_lvalue (convert (type, arg0));
3913 if (integer_zerop (arg1))
3914 return omit_one_operand (type, arg1, arg0);
3915 t1 = distribute_bit_expr (code, type, arg0, arg1);
3916 if (t1 != NULL_TREE)
3918 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3919 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3920 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3922 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3923 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3924 && (~TREE_INT_CST_LOW (arg0)
3925 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3926 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3928 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3929 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3931 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3932 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3933 && (~TREE_INT_CST_LOW (arg1)
3934 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3935 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3939 case BIT_ANDTC_EXPR:
3940 if (integer_all_onesp (arg0))
3941 return non_lvalue (convert (type, arg1));
3942 if (integer_zerop (arg0))
3943 return omit_one_operand (type, arg0, arg1);
3944 if (TREE_CODE (arg1) == INTEGER_CST)
3946 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3947 code = BIT_AND_EXPR;
3953 /* In most cases, do nothing with a divide by zero. */
3954 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3955 #ifndef REAL_INFINITY
3956 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
3959 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3961 /* In IEEE floating point, x/1 is not equivalent to x for snans.
3962 However, ANSI says we can drop signals, so we can do this anyway. */
3963 if (real_onep (arg1))
3964 return non_lvalue (convert (type, arg0));
3966 /* If ARG1 is a constant, we can convert this to a multiply by the
3967 reciprocal. This does not have the same rounding properties,
3968 so only do this if -ffast-math. We can actually always safely
3969 do it if ARG1 is a power of two, but it's hard to tell if it is
3970 or not in a portable manner. */
3971 if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
3972 && 0 != (tem = const_binop (code, build_real (type, dconst1),
3974 return fold (build (MULT_EXPR, type, arg0, tem));
3978 case TRUNC_DIV_EXPR:
3979 case ROUND_DIV_EXPR:
3980 case FLOOR_DIV_EXPR:
3982 case EXACT_DIV_EXPR:
3983 if (integer_onep (arg1))
3984 return non_lvalue (convert (type, arg0));
3985 if (integer_zerop (arg1))
3988 /* If we have ((a / C1) / C2) where both division are the same type, try
3989 to simplify. First see if C1 * C2 overflows or not. */
3990 if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
3991 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
3995 new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
3996 tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
3998 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
3999 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
4001 /* If no overflow, divide by C1*C2. */
4002 return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
4006 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
4007 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
4008 expressions, which often appear in the offsets or sizes of
4009 objects with a varying size. Only deal with positive divisors
4010 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
4012 Look for NOPs and SAVE_EXPRs inside. */
4014 if (TREE_CODE (arg1) == INTEGER_CST
4015 && tree_int_cst_sgn (arg1) >= 0)
4017 int have_save_expr = 0;
4018 tree c2 = integer_zero_node;
4021 if (TREE_CODE (xarg0) == SAVE_EXPR)
4022 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4026 if (TREE_CODE (xarg0) == PLUS_EXPR
4027 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4028 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4029 else if (TREE_CODE (xarg0) == MINUS_EXPR
4030 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4031 /* If we are doing this computation unsigned, the negate
4033 && ! TREE_UNSIGNED (type))
4035 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4036 xarg0 = TREE_OPERAND (xarg0, 0);
4039 if (TREE_CODE (xarg0) == SAVE_EXPR)
4040 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4044 if (TREE_CODE (xarg0) == MULT_EXPR
4045 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4046 && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
4047 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
4048 TREE_OPERAND (xarg0, 1), arg1, 1))
4049 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
4050 TREE_OPERAND (xarg0, 1), 1)))
4051 && (tree_int_cst_sgn (c2) >= 0
4052 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
4055 tree outer_div = integer_one_node;
4056 tree c1 = TREE_OPERAND (xarg0, 1);
4059 /* If C3 > C1, set them equal and do a divide by
4060 C3/C1 at the end of the operation. */
4061 if (tree_int_cst_lt (c1, c3))
4062 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
4064 /* The result is A * (C1/C3) + (C2/C3). */
4065 t = fold (build (PLUS_EXPR, type,
4066 fold (build (MULT_EXPR, type,
4067 TREE_OPERAND (xarg0, 0),
4068 const_binop (code, c1, c3, 1))),
4069 const_binop (code, c2, c3, 1)));
4071 if (! integer_onep (outer_div))
4072 t = fold (build (code, type, t, convert (type, outer_div)));
4084 case FLOOR_MOD_EXPR:
4085 case ROUND_MOD_EXPR:
4086 case TRUNC_MOD_EXPR:
4087 if (integer_onep (arg1))
4088 return omit_one_operand (type, integer_zero_node, arg0);
4089 if (integer_zerop (arg1))
4092 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4093 where C1 % C3 == 0. Handle similarly to the division case,
4094 but don't bother with SAVE_EXPRs. */
4096 if (TREE_CODE (arg1) == INTEGER_CST
4097 && ! integer_zerop (arg1))
4099 tree c2 = integer_zero_node;
4102 if (TREE_CODE (xarg0) == PLUS_EXPR
4103 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4104 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4105 else if (TREE_CODE (xarg0) == MINUS_EXPR
4106 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4107 && ! TREE_UNSIGNED (type))
4109 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4110 xarg0 = TREE_OPERAND (xarg0, 0);
4115 if (TREE_CODE (xarg0) == MULT_EXPR
4116 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4117 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4118 TREE_OPERAND (xarg0, 1),
4120 && tree_int_cst_sgn (c2) >= 0)
4121 /* The result is (C2%C3). */
4122 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4123 TREE_OPERAND (xarg0, 0));
4132 if (integer_zerop (arg1))
4133 return non_lvalue (convert (type, arg0));
4134 /* Since negative shift count is not well-defined,
4135 don't try to compute it in the compiler. */
4136 if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
4138 /* Rewrite an LROTATE_EXPR by a constant into an
4139 RROTATE_EXPR by a new constant. */
4140 if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST)
4142 TREE_SET_CODE (t, RROTATE_EXPR);
4143 code = RROTATE_EXPR;
4144 TREE_OPERAND (t, 1) = arg1
4147 convert (TREE_TYPE (arg1),
4148 build_int_2 (GET_MODE_BITSIZE (TYPE_MODE (type)), 0)),
4150 if (tree_int_cst_sgn (arg1) < 0)
4154 /* If we have a rotate of a bit operation with the rotate count and
4155 the second operand of the bit operation both constant,
4156 permute the two operations. */
4157 if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
4158 && (TREE_CODE (arg0) == BIT_AND_EXPR
4159 || TREE_CODE (arg0) == BIT_ANDTC_EXPR
4160 || TREE_CODE (arg0) == BIT_IOR_EXPR
4161 || TREE_CODE (arg0) == BIT_XOR_EXPR)
4162 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
4163 return fold (build (TREE_CODE (arg0), type,
4164 fold (build (code, type,
4165 TREE_OPERAND (arg0, 0), arg1)),
4166 fold (build (code, type,
4167 TREE_OPERAND (arg0, 1), arg1))));
4169 /* Two consecutive rotates adding up to the width of the mode can
4171 if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
4172 && TREE_CODE (arg0) == RROTATE_EXPR
4173 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4174 && TREE_INT_CST_HIGH (arg1) == 0
4175 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
4176 && ((TREE_INT_CST_LOW (arg1)
4177 + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
4178 == GET_MODE_BITSIZE (TYPE_MODE (type))))
4179 return TREE_OPERAND (arg0, 0);
4184 if (operand_equal_p (arg0, arg1, 0))
4186 if (INTEGRAL_TYPE_P (type)
4187 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4188 return omit_one_operand (type, arg1, arg0);
4192 if (operand_equal_p (arg0, arg1, 0))
4194 if (INTEGRAL_TYPE_P (type)
4195 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4196 return omit_one_operand (type, arg1, arg0);
4199 case TRUTH_NOT_EXPR:
4200 /* Note that the operand of this must be an int
4201 and its values must be 0 or 1.
4202 ("true" is a fixed value perhaps depending on the language,
4203 but we don't handle values other than 1 correctly yet.) */
4204 return invert_truthvalue (arg0);
4206 case TRUTH_ANDIF_EXPR:
4207 /* Note that the operands of this must be ints
4208 and their values must be 0 or 1.
4209 ("true" is a fixed value perhaps depending on the language.) */
4210 /* If first arg is constant zero, return it. */
4211 if (integer_zerop (arg0))
4213 case TRUTH_AND_EXPR:
4214 /* If either arg is constant true, drop it. */
4215 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4216 return non_lvalue (arg1);
4217 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4218 return non_lvalue (arg0);
4219 /* If second arg is constant zero, result is zero, but first arg
4220 must be evaluated. */
4221 if (integer_zerop (arg1))
4222 return omit_one_operand (type, arg1, arg0);
4225 /* We only do these simplifications if we are optimizing. */
4229 /* Check for things like (A || B) && (A || C). We can convert this
4230 to A || (B && C). Note that either operator can be any of the four
4231 truth and/or operations and the transformation will still be
4232 valid. Also note that we only care about order for the
4233 ANDIF and ORIF operators. */
4234 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4235 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4236 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4237 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4238 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4240 tree a00 = TREE_OPERAND (arg0, 0);
4241 tree a01 = TREE_OPERAND (arg0, 1);
4242 tree a10 = TREE_OPERAND (arg1, 0);
4243 tree a11 = TREE_OPERAND (arg1, 1);
4244 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
4245 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
4246 && (code == TRUTH_AND_EXPR
4247 || code == TRUTH_OR_EXPR));
4249 if (operand_equal_p (a00, a10, 0))
4250 return fold (build (TREE_CODE (arg0), type, a00,
4251 fold (build (code, type, a01, a11))));
4252 else if (commutative && operand_equal_p (a00, a11, 0))
4253 return fold (build (TREE_CODE (arg0), type, a00,
4254 fold (build (code, type, a01, a10))));
4255 else if (commutative && operand_equal_p (a01, a10, 0))
4256 return fold (build (TREE_CODE (arg0), type, a01,
4257 fold (build (code, type, a00, a11))));
4259 /* This case if tricky because we must either have commutative
4260 operators or else A10 must not have side-effects. */
4262 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
4263 && operand_equal_p (a01, a11, 0))
4264 return fold (build (TREE_CODE (arg0), type,
4265 fold (build (code, type, a00, a10)),
4269 /* Check for the possibility of merging component references. If our
4270 lhs is another similar operation, try to merge its rhs with our
4271 rhs. Then try to merge our lhs and rhs. */
4272 if (TREE_CODE (arg0) == code
4273 && 0 != (tem = fold_truthop (code, type,
4274 TREE_OPERAND (arg0, 1), arg1)))
4275 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4277 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
4282 case TRUTH_ORIF_EXPR:
4283 /* Note that the operands of this must be ints
4284 and their values must be 0 or true.
4285 ("true" is a fixed value perhaps depending on the language.) */
4286 /* If first arg is constant true, return it. */
4287 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4290 /* If either arg is constant zero, drop it. */
4291 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4292 return non_lvalue (arg1);
4293 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4294 return non_lvalue (arg0);
4295 /* If second arg is constant true, result is true, but we must
4296 evaluate first arg. */
4297 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4298 return omit_one_operand (type, arg1, arg0);
4301 case TRUTH_XOR_EXPR:
4302 /* If either arg is constant zero, drop it. */
4303 if (integer_zerop (arg0))
4304 return non_lvalue (arg1);
4305 if (integer_zerop (arg1))
4306 return non_lvalue (arg0);
4307 /* If either arg is constant true, this is a logical inversion. */
4308 if (integer_onep (arg0))
4309 return non_lvalue (invert_truthvalue (arg1));
4310 if (integer_onep (arg1))
4311 return non_lvalue (invert_truthvalue (arg0));
4320 /* If one arg is a constant integer, put it last. */
4321 if (TREE_CODE (arg0) == INTEGER_CST
4322 && TREE_CODE (arg1) != INTEGER_CST)
4324 TREE_OPERAND (t, 0) = arg1;
4325 TREE_OPERAND (t, 1) = arg0;
4326 arg0 = TREE_OPERAND (t, 0);
4327 arg1 = TREE_OPERAND (t, 1);
4328 code = swap_tree_comparison (code);
4329 TREE_SET_CODE (t, code);
4332 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4333 First, see if one arg is constant; find the constant arg
4334 and the other one. */
4336 tree constop = 0, varop;
4337 int constopnum = -1;
4339 if (TREE_CONSTANT (arg1))
4340 constopnum = 1, constop = arg1, varop = arg0;
4341 if (TREE_CONSTANT (arg0))
4342 constopnum = 0, constop = arg0, varop = arg1;
4344 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4346 /* This optimization is invalid for ordered comparisons
4347 if CONST+INCR overflows or if foo+incr might overflow.
4348 This optimization is invalid for floating point due to rounding.
4349 For pointer types we assume overflow doesn't happen. */
4350 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4351 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4352 && (code == EQ_EXPR || code == NE_EXPR)))
4355 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4356 constop, TREE_OPERAND (varop, 1)));
4357 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4359 t = build (code, type, TREE_OPERAND (t, 0),
4360 TREE_OPERAND (t, 1));
4361 TREE_OPERAND (t, constopnum) = newconst;
4365 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4367 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4368 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4369 && (code == EQ_EXPR || code == NE_EXPR)))
4372 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4373 constop, TREE_OPERAND (varop, 1)));
4374 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4375 t = build (code, type, TREE_OPERAND (t, 0),
4376 TREE_OPERAND (t, 1));
4377 TREE_OPERAND (t, constopnum) = newconst;
4383 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4384 if (TREE_CODE (arg1) == INTEGER_CST
4385 && TREE_CODE (arg0) != INTEGER_CST
4386 && tree_int_cst_sgn (arg1) > 0)
4388 switch (TREE_CODE (t))
4392 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4393 t = build (code, type, TREE_OPERAND (t, 0), arg1);
4398 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4399 t = build (code, type, TREE_OPERAND (t, 0), arg1);
4404 /* If this is an EQ or NE comparison with zero and ARG0 is
4405 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4406 two operations, but the latter can be done in one less insn
4407 one machine that have only two-operand insns or on which a
4408 constant cannot be the first operand. */
4409 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4410 && TREE_CODE (arg0) == BIT_AND_EXPR)
4412 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4413 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4415 fold (build (code, type,
4416 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4418 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4419 TREE_OPERAND (arg0, 1),
4420 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4421 convert (TREE_TYPE (arg0),
4424 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4425 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4427 fold (build (code, type,
4428 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4430 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4431 TREE_OPERAND (arg0, 0),
4432 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4433 convert (TREE_TYPE (arg0),
4438 /* If this is an NE or EQ comparison of zero against the result of a
4439 signed MOD operation whose second operand is a power of 2, make
4440 the MOD operation unsigned since it is simpler and equivalent. */
4441 if ((code == NE_EXPR || code == EQ_EXPR)
4442 && integer_zerop (arg1)
4443 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4444 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4445 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4446 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4447 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4448 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4450 tree newtype = unsigned_type (TREE_TYPE (arg0));
4451 tree newmod = build (TREE_CODE (arg0), newtype,
4452 convert (newtype, TREE_OPERAND (arg0, 0)),
4453 convert (newtype, TREE_OPERAND (arg0, 1)));
4455 return build (code, type, newmod, convert (newtype, arg1));
4458 /* If this is an NE comparison of zero with an AND of one, remove the
4459 comparison since the AND will give the correct value. */
4460 if (code == NE_EXPR && integer_zerop (arg1)
4461 && TREE_CODE (arg0) == BIT_AND_EXPR
4462 && integer_onep (TREE_OPERAND (arg0, 1)))
4463 return convert (type, arg0);
4465 /* If we have (A & C) == C where C is a power of 2, convert this into
4466 (A & C) != 0. Similarly for NE_EXPR. */
4467 if ((code == EQ_EXPR || code == NE_EXPR)
4468 && TREE_CODE (arg0) == BIT_AND_EXPR
4469 && integer_pow2p (TREE_OPERAND (arg0, 1))
4470 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4471 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4472 arg0, integer_zero_node);
4474 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
4475 and similarly for >= into !=. */
4476 if ((code == LT_EXPR || code == GE_EXPR)
4477 && TREE_UNSIGNED (TREE_TYPE (arg0))
4478 && TREE_CODE (arg1) == LSHIFT_EXPR
4479 && integer_onep (TREE_OPERAND (arg1, 0)))
4480 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4481 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4482 TREE_OPERAND (arg1, 1)),
4483 convert (TREE_TYPE (arg0), integer_zero_node));
4485 else if ((code == LT_EXPR || code == GE_EXPR)
4486 && TREE_UNSIGNED (TREE_TYPE (arg0))
4487 && (TREE_CODE (arg1) == NOP_EXPR
4488 || TREE_CODE (arg1) == CONVERT_EXPR)
4489 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
4490 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
4492 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4493 convert (TREE_TYPE (arg0),
4494 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4495 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
4496 convert (TREE_TYPE (arg0), integer_zero_node));
4498 /* Simplify comparison of something with itself. (For IEEE
4499 floating-point, we can only do some of these simplifications.) */
4500 if (operand_equal_p (arg0, arg1, 0))
4507 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4509 t = build_int_2 (1, 0);
4510 TREE_TYPE (t) = type;
4514 TREE_SET_CODE (t, code);
4518 /* For NE, we can only do this simplification if integer. */
4519 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4521 /* ... fall through ... */
4524 t = build_int_2 (0, 0);
4525 TREE_TYPE (t) = type;
4530 /* An unsigned comparison against 0 can be simplified. */
4531 if (integer_zerop (arg1)
4532 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4533 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4534 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4536 switch (TREE_CODE (t))
4540 TREE_SET_CODE (t, NE_EXPR);
4544 TREE_SET_CODE (t, EQ_EXPR);
4547 return omit_one_operand (type,
4548 convert (type, integer_one_node),
4551 return omit_one_operand (type,
4552 convert (type, integer_zero_node),
4557 /* If we are comparing an expression that just has comparisons
4558 of two integer values, arithmetic expressions of those comparisons,
4559 and constants, we can simplify it. There are only three cases
4560 to check: the two values can either be equal, the first can be
4561 greater, or the second can be greater. Fold the expression for
4562 those three values. Since each value must be 0 or 1, we have
4563 eight possibilities, each of which corresponds to the constant 0
4564 or 1 or one of the six possible comparisons.
4566 This handles common cases like (a > b) == 0 but also handles
4567 expressions like ((x > y) - (y > x)) > 0, which supposedly
4568 occur in macroized code. */
4570 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4572 tree cval1 = 0, cval2 = 0;
4575 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4576 /* Don't handle degenerate cases here; they should already
4577 have been handled anyway. */
4578 && cval1 != 0 && cval2 != 0
4579 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4580 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4581 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4582 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4583 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4585 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4586 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4588 /* We can't just pass T to eval_subst in case cval1 or cval2
4589 was the same as ARG1. */
4592 = fold (build (code, type,
4593 eval_subst (arg0, cval1, maxval, cval2, minval),
4596 = fold (build (code, type,
4597 eval_subst (arg0, cval1, maxval, cval2, maxval),
4600 = fold (build (code, type,
4601 eval_subst (arg0, cval1, minval, cval2, maxval),
4604 /* All three of these results should be 0 or 1. Confirm they
4605 are. Then use those values to select the proper code
4608 if ((integer_zerop (high_result)
4609 || integer_onep (high_result))
4610 && (integer_zerop (equal_result)
4611 || integer_onep (equal_result))
4612 && (integer_zerop (low_result)
4613 || integer_onep (low_result)))
4615 /* Make a 3-bit mask with the high-order bit being the
4616 value for `>', the next for '=', and the low for '<'. */
4617 switch ((integer_onep (high_result) * 4)
4618 + (integer_onep (equal_result) * 2)
4619 + integer_onep (low_result))
4623 return omit_one_operand (type, integer_zero_node, arg0);
4644 return omit_one_operand (type, integer_one_node, arg0);
4647 t = build (code, type, cval1, cval2);
4649 return save_expr (t);
4656 /* If this is a comparison of a field, we may be able to simplify it. */
4657 if ((TREE_CODE (arg0) == COMPONENT_REF
4658 || TREE_CODE (arg0) == BIT_FIELD_REF)
4659 && (code == EQ_EXPR || code == NE_EXPR)
4660 /* Handle the constant case even without -O
4661 to make sure the warnings are given. */
4662 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4664 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4668 /* If this is a comparison of complex values and either or both
4669 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4670 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4671 may prevent needless evaluations. */
4672 if ((code == EQ_EXPR || code == NE_EXPR)
4673 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4674 && (TREE_CODE (arg0) == COMPLEX_EXPR
4675 || TREE_CODE (arg1) == COMPLEX_EXPR))
4677 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4678 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4679 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4680 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4681 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4683 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4686 fold (build (code, type, real0, real1)),
4687 fold (build (code, type, imag0, imag1))));
4690 /* From here on, the only cases we handle are when the result is
4691 known to be a constant.
4693 To compute GT, swap the arguments and do LT.
4694 To compute GE, do LT and invert the result.
4695 To compute LE, swap the arguments, do LT and invert the result.
4696 To compute NE, do EQ and invert the result.
4698 Therefore, the code below must handle only EQ and LT. */
4700 if (code == LE_EXPR || code == GT_EXPR)
4702 tem = arg0, arg0 = arg1, arg1 = tem;
4703 code = swap_tree_comparison (code);
4706 /* Note that it is safe to invert for real values here because we
4707 will check below in the one case that it matters. */
4710 if (code == NE_EXPR || code == GE_EXPR)
4713 code = invert_tree_comparison (code);
4716 /* Compute a result for LT or EQ if args permit;
4717 otherwise return T. */
4718 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4720 if (code == EQ_EXPR)
4721 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4722 == TREE_INT_CST_LOW (arg1))
4723 && (TREE_INT_CST_HIGH (arg0)
4724 == TREE_INT_CST_HIGH (arg1)),
4727 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4728 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4729 : INT_CST_LT (arg0, arg1)),
4733 /* Assume a nonexplicit constant cannot equal an explicit one,
4734 since such code would be undefined anyway.
4735 Exception: on sysvr4, using #pragma weak,
4736 a label can come out as 0. */
4737 else if (TREE_CODE (arg1) == INTEGER_CST
4738 && !integer_zerop (arg1)
4739 && TREE_CONSTANT (arg0)
4740 && TREE_CODE (arg0) == ADDR_EXPR
4742 t1 = build_int_2 (0, 0);
4744 /* Two real constants can be compared explicitly. */
4745 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4747 /* If either operand is a NaN, the result is false with two
4748 exceptions: First, an NE_EXPR is true on NaNs, but that case
4749 is already handled correctly since we will be inverting the
4750 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4751 or a GE_EXPR into a LT_EXPR, we must return true so that it
4752 will be inverted into false. */
4754 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4755 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4756 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4758 else if (code == EQ_EXPR)
4759 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4760 TREE_REAL_CST (arg1)),
4763 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4764 TREE_REAL_CST (arg1)),
4768 if (t1 == NULL_TREE)
4772 TREE_INT_CST_LOW (t1) ^= 1;
4774 TREE_TYPE (t1) = type;
4778 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4779 so all simple results must be passed through pedantic_non_lvalue. */
4780 if (TREE_CODE (arg0) == INTEGER_CST)
4781 return pedantic_non_lvalue
4782 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4783 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4784 return pedantic_omit_one_operand (type, arg1, arg0);
4786 /* If the second operand is zero, invert the comparison and swap
4787 the second and third operands. Likewise if the second operand
4788 is constant and the third is not or if the third operand is
4789 equivalent to the first operand of the comparison. */
4791 if (integer_zerop (arg1)
4792 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4793 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4794 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4795 TREE_OPERAND (t, 2),
4796 TREE_OPERAND (arg0, 1))))
4798 /* See if this can be inverted. If it can't, possibly because
4799 it was a floating-point inequality comparison, don't do
4801 tem = invert_truthvalue (arg0);
4803 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4805 t = build (code, type, tem,
4806 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
4808 arg1 = TREE_OPERAND (t, 2);
4813 /* If we have A op B ? A : C, we may be able to convert this to a
4814 simpler expression, depending on the operation and the values
4815 of B and C. IEEE floating point prevents this though,
4816 because A or B might be -0.0 or a NaN. */
4818 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4819 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4820 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4822 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4823 arg1, TREE_OPERAND (arg0, 1)))
4825 tree arg2 = TREE_OPERAND (t, 2);
4826 enum tree_code comp_code = TREE_CODE (arg0);
4830 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4831 depending on the comparison operation. */
4832 if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
4833 ? real_zerop (TREE_OPERAND (arg0, 1))
4834 : integer_zerop (TREE_OPERAND (arg0, 1)))
4835 && TREE_CODE (arg2) == NEGATE_EXPR
4836 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4840 return pedantic_non_lvalue
4841 (fold (build1 (NEGATE_EXPR, type, arg1)));
4843 return pedantic_non_lvalue (convert (type, arg1));
4846 return pedantic_non_lvalue
4847 (fold (build1 (ABS_EXPR, type, arg1)));
4850 return pedantic_non_lvalue
4851 (fold (build1 (NEGATE_EXPR, type,
4852 fold (build1 (ABS_EXPR, type, arg1)))));
4855 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4858 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4860 if (comp_code == NE_EXPR)
4861 return pedantic_non_lvalue (convert (type, arg1));
4862 else if (comp_code == EQ_EXPR)
4863 return pedantic_non_lvalue (convert (type, integer_zero_node));
4866 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4867 or max (A, B), depending on the operation. */
4869 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4870 arg2, TREE_OPERAND (arg0, 0)))
4872 tree comp_op0 = TREE_OPERAND (arg0, 0);
4873 tree comp_op1 = TREE_OPERAND (arg0, 1);
4874 tree comp_type = TREE_TYPE (comp_op0);
4879 return pedantic_non_lvalue (convert (type, arg2));
4881 return pedantic_non_lvalue (convert (type, arg1));
4884 return pedantic_non_lvalue
4885 (convert (type, (fold (build (MIN_EXPR, comp_type,
4886 comp_op0, comp_op1)))));
4889 return pedantic_non_lvalue
4890 (convert (type, fold (build (MAX_EXPR, comp_type,
4891 comp_op0, comp_op1))));
4895 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4896 we might still be able to simplify this. For example,
4897 if C1 is one less or one more than C2, this might have started
4898 out as a MIN or MAX and been transformed by this function.
4899 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4901 if (INTEGRAL_TYPE_P (type)
4902 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4903 && TREE_CODE (arg2) == INTEGER_CST)
4907 /* We can replace A with C1 in this case. */
4908 arg1 = convert (type, TREE_OPERAND (arg0, 1));
4909 t = build (code, type, TREE_OPERAND (t, 0), arg1,
4910 TREE_OPERAND (t, 2));
4914 /* If C1 is C2 + 1, this is min(A, C2). */
4915 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4916 && operand_equal_p (TREE_OPERAND (arg0, 1),
4917 const_binop (PLUS_EXPR, arg2,
4918 integer_one_node, 0), 1))
4919 return pedantic_non_lvalue
4920 (fold (build (MIN_EXPR, type, arg1, arg2)));
4924 /* If C1 is C2 - 1, this is min(A, C2). */
4925 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4926 && operand_equal_p (TREE_OPERAND (arg0, 1),
4927 const_binop (MINUS_EXPR, arg2,
4928 integer_one_node, 0), 1))
4929 return pedantic_non_lvalue
4930 (fold (build (MIN_EXPR, type, arg1, arg2)));
4934 /* If C1 is C2 - 1, this is max(A, C2). */
4935 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4936 && operand_equal_p (TREE_OPERAND (arg0, 1),
4937 const_binop (MINUS_EXPR, arg2,
4938 integer_one_node, 0), 1))
4939 return pedantic_non_lvalue
4940 (fold (build (MAX_EXPR, type, arg1, arg2)));
4944 /* If C1 is C2 + 1, this is max(A, C2). */
4945 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4946 && operand_equal_p (TREE_OPERAND (arg0, 1),
4947 const_binop (PLUS_EXPR, arg2,
4948 integer_one_node, 0), 1))
4949 return pedantic_non_lvalue
4950 (fold (build (MAX_EXPR, type, arg1, arg2)));
4955 /* If the second operand is simpler than the third, swap them
4956 since that produces better jump optimization results. */
4957 if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
4958 || TREE_CODE (arg1) == SAVE_EXPR)
4959 && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
4960 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
4961 || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
4963 /* See if this can be inverted. If it can't, possibly because
4964 it was a floating-point inequality comparison, don't do
4966 tem = invert_truthvalue (arg0);
4968 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4970 t = build (code, type, tem,
4971 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
4973 arg1 = TREE_OPERAND (t, 2);
4978 /* Convert A ? 1 : 0 to simply A. */
4979 if (integer_onep (TREE_OPERAND (t, 1))
4980 && integer_zerop (TREE_OPERAND (t, 2))
4981 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4982 call to fold will try to move the conversion inside
4983 a COND, which will recurse. In that case, the COND_EXPR
4984 is probably the best choice, so leave it alone. */
4985 && type == TREE_TYPE (arg0))
4986 return pedantic_non_lvalue (arg0);
4988 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4989 operation is simply A & 2. */
4991 if (integer_zerop (TREE_OPERAND (t, 2))
4992 && TREE_CODE (arg0) == NE_EXPR
4993 && integer_zerop (TREE_OPERAND (arg0, 1))
4994 && integer_pow2p (arg1)
4995 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4996 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4998 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
5003 /* When pedantic, a compound expression can be neither an lvalue
5004 nor an integer constant expression. */
5005 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
5007 /* Don't let (0, 0) be null pointer constant. */
5008 if (integer_zerop (arg1))
5009 return non_lvalue (arg1);
5014 return build_complex (arg0, arg1);
5018 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5020 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5021 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
5022 TREE_OPERAND (arg0, 1));
5023 else if (TREE_CODE (arg0) == COMPLEX_CST)
5024 return TREE_REALPART (arg0);
5025 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5026 return fold (build (TREE_CODE (arg0), type,
5027 fold (build1 (REALPART_EXPR, type,
5028 TREE_OPERAND (arg0, 0))),
5029 fold (build1 (REALPART_EXPR,
5030 type, TREE_OPERAND (arg0, 1)))));
5034 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5035 return convert (type, integer_zero_node);
5036 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5037 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
5038 TREE_OPERAND (arg0, 0));
5039 else if (TREE_CODE (arg0) == COMPLEX_CST)
5040 return TREE_IMAGPART (arg0);
5041 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5042 return fold (build (TREE_CODE (arg0), type,
5043 fold (build1 (IMAGPART_EXPR, type,
5044 TREE_OPERAND (arg0, 0))),
5045 fold (build1 (IMAGPART_EXPR, type,
5046 TREE_OPERAND (arg0, 1)))));
5049 /* Pull arithmetic ops out of the CLEANUP_POINT_EXPR where
5051 case CLEANUP_POINT_EXPR:
5052 if (! TREE_SIDE_EFFECTS (arg0))
5053 return convert (type, arg0);
5056 enum tree_code code0 = TREE_CODE (arg0);
5057 int kind0 = TREE_CODE_CLASS (code0);
5058 tree arg00 = TREE_OPERAND (arg0, 0);
5061 if (kind0 == '1' || code0 == TRUTH_NOT_EXPR)
5062 return fold (build1 (code0, type,
5063 fold (build1 (CLEANUP_POINT_EXPR,
5064 TREE_TYPE (arg00), arg00))));
5066 if (kind0 == '<' || kind0 == '2'
5067 || code0 == TRUTH_ANDIF_EXPR || code0 == TRUTH_ORIF_EXPR
5068 || code0 == TRUTH_AND_EXPR || code0 == TRUTH_OR_EXPR
5069 || code0 == TRUTH_XOR_EXPR)
5071 arg01 = TREE_OPERAND (arg0, 1);
5073 if (! TREE_SIDE_EFFECTS (arg00))
5074 return fold (build (code0, type, arg00,
5075 fold (build1 (CLEANUP_POINT_EXPR,
5076 TREE_TYPE (arg01), arg01))));
5078 if (! TREE_SIDE_EFFECTS (arg01))
5079 return fold (build (code0, type,
5080 fold (build1 (CLEANUP_POINT_EXPR,
5081 TREE_TYPE (arg00), arg00)),
5090 } /* switch (code) */