1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ This file should be rewritten to use an arbitrary precision
21 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
22 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
23 @@ The routines that translate from the ap rep should
24 @@ warn if precision et. al. is lost.
25 @@ This would also make life easier when this technology is used
26 @@ for cross-compilers. */
29 /* The entry points in this file are fold, size_int and size_binop.
31 fold takes a tree as argument and returns a simplified tree.
33 size_binop takes a tree code for an arithmetic operation
34 and two operands that are trees, and produces a tree for the
35 result, assuming the type comes from `sizetype'.
37 size_int takes an integer value, and creates a tree constant
38 with type from `sizetype'. */
46 /* Handle floating overflow for `const_binop'. */
47 static jmp_buf float_error;
49 static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
50 static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
51 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
52 HOST_WIDE_INT, HOST_WIDE_INT,
53 HOST_WIDE_INT, HOST_WIDE_INT *,
54 HOST_WIDE_INT *, HOST_WIDE_INT *,
56 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
57 static tree const_binop PROTO((enum tree_code, tree, tree, int));
58 static tree fold_convert PROTO((tree, tree));
59 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
60 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
61 static int truth_value_p PROTO((enum tree_code));
62 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
63 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
64 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
65 static tree omit_one_operand PROTO((tree, tree, tree));
66 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
67 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
68 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
70 static tree decode_field_reference PROTO((tree, int *, int *,
71 enum machine_mode *, int *,
73 static int all_ones_mask_p PROTO((tree, int));
74 static int simple_operand_p PROTO((tree));
75 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
76 enum tree_code, tree, tree, tree));
77 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
78 static tree strip_compound_expr PROTO((tree, tree));
84 /* Yield nonzero if a signed left shift of A by B bits overflows. */
85 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
87 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
88 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
89 Then this yields nonzero if overflow occurred during the addition.
90 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
91 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
92 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
94 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
95 We do that by representing the two-word integer in 4 words, with only
96 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
99 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
100 #define HIGHPART(x) \
101 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
102 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
104 /* Unpack a two-word integer into 4 words.
105 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
106 WORDS points to the array of HOST_WIDE_INTs. */
109 encode (words, low, hi)
110 HOST_WIDE_INT *words;
111 HOST_WIDE_INT low, hi;
113 words[0] = LOWPART (low);
114 words[1] = HIGHPART (low);
115 words[2] = LOWPART (hi);
116 words[3] = HIGHPART (hi);
119 /* Pack an array of 4 words into a two-word integer.
120 WORDS points to the array of words.
121 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
124 decode (words, low, hi)
125 HOST_WIDE_INT *words;
126 HOST_WIDE_INT *low, *hi;
128 *low = words[0] | words[1] * BASE;
129 *hi = words[2] | words[3] * BASE;
132 /* Make the integer constant T valid for its type
133 by setting to 0 or 1 all the bits in the constant
134 that don't belong in the type.
135 Yield 1 if a signed overflow occurs, 0 otherwise.
136 If OVERFLOW is nonzero, a signed overflow has already occurred
137 in calculating T, so propagate it.
139 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
143 force_fit_type (t, overflow)
147 HOST_WIDE_INT low, high;
150 if (TREE_CODE (t) == REAL_CST)
152 #ifdef CHECK_FLOAT_VALUE
153 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
159 else if (TREE_CODE (t) != INTEGER_CST)
162 low = TREE_INT_CST_LOW (t);
163 high = TREE_INT_CST_HIGH (t);
165 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
168 prec = TYPE_PRECISION (TREE_TYPE (t));
170 /* First clear all bits that are beyond the type's precision. */
172 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
174 else if (prec > HOST_BITS_PER_WIDE_INT)
176 TREE_INT_CST_HIGH (t)
177 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
181 TREE_INT_CST_HIGH (t) = 0;
182 if (prec < HOST_BITS_PER_WIDE_INT)
183 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
186 /* Unsigned types do not suffer sign extension or overflow. */
187 if (TREE_UNSIGNED (TREE_TYPE (t)))
190 /* If the value's sign bit is set, extend the sign. */
191 if (prec != 2 * HOST_BITS_PER_WIDE_INT
192 && (prec > HOST_BITS_PER_WIDE_INT
193 ? (TREE_INT_CST_HIGH (t)
194 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
195 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
197 /* Value is negative:
198 set to 1 all the bits that are outside this type's precision. */
199 if (prec > HOST_BITS_PER_WIDE_INT)
201 TREE_INT_CST_HIGH (t)
202 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
206 TREE_INT_CST_HIGH (t) = -1;
207 if (prec < HOST_BITS_PER_WIDE_INT)
208 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
212 /* Yield nonzero if signed overflow occurred. */
214 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
218 /* Add two doubleword integers with doubleword result.
219 Each argument is given as two `HOST_WIDE_INT' pieces.
220 One argument is L1 and H1; the other, L2 and H2.
221 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
224 add_double (l1, h1, l2, h2, lv, hv)
225 HOST_WIDE_INT l1, h1, l2, h2;
226 HOST_WIDE_INT *lv, *hv;
231 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
235 return overflow_sum_sign (h1, h2, h);
238 /* Negate a doubleword integer with doubleword result.
239 Return nonzero if the operation overflows, assuming it's signed.
240 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
241 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
244 neg_double (l1, h1, lv, hv)
245 HOST_WIDE_INT l1, h1;
246 HOST_WIDE_INT *lv, *hv;
252 return (*hv & h1) < 0;
262 /* Multiply two doubleword integers with doubleword result.
263 Return nonzero if the operation overflows, assuming it's signed.
264 Each argument is given as two `HOST_WIDE_INT' pieces.
265 One argument is L1 and H1; the other, L2 and H2.
266 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
269 mul_double (l1, h1, l2, h2, lv, hv)
270 HOST_WIDE_INT l1, h1, l2, h2;
271 HOST_WIDE_INT *lv, *hv;
273 HOST_WIDE_INT arg1[4];
274 HOST_WIDE_INT arg2[4];
275 HOST_WIDE_INT prod[4 * 2];
276 register unsigned HOST_WIDE_INT carry;
277 register int i, j, k;
278 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
280 encode (arg1, l1, h1);
281 encode (arg2, l2, h2);
283 bzero ((char *) prod, sizeof prod);
285 for (i = 0; i < 4; i++)
288 for (j = 0; j < 4; j++)
291 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
292 carry += arg1[i] * arg2[j];
293 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
295 prod[k] = LOWPART (carry);
296 carry = HIGHPART (carry);
301 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
303 /* Check for overflow by calculating the top half of the answer in full;
304 it should agree with the low half's sign bit. */
305 decode (prod+4, &toplow, &tophigh);
308 neg_double (l2, h2, &neglow, &neghigh);
309 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
313 neg_double (l1, h1, &neglow, &neghigh);
314 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
316 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
319 /* Shift the doubleword integer in L1, H1 left by COUNT places
320 keeping only PREC bits of result.
321 Shift right if COUNT is negative.
322 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
323 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
326 lshift_double (l1, h1, count, prec, lv, hv, arith)
327 HOST_WIDE_INT l1, h1, count;
329 HOST_WIDE_INT *lv, *hv;
334 rshift_double (l1, h1, - count, prec, lv, hv, arith);
339 count = (unsigned HOST_WIDE_INT) count & prec;
341 if (count >= HOST_BITS_PER_WIDE_INT)
343 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
348 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
349 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
350 *lv = (unsigned HOST_WIDE_INT) l1 << count;
354 /* Shift the doubleword integer in L1, H1 right by COUNT places
355 keeping only PREC bits of result. COUNT must be positive.
356 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
357 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
360 rshift_double (l1, h1, count, prec, lv, hv, arith)
361 HOST_WIDE_INT l1, h1, count;
363 HOST_WIDE_INT *lv, *hv;
366 unsigned HOST_WIDE_INT signmask;
368 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
372 count = (unsigned HOST_WIDE_INT) count % prec;
374 if (count >= HOST_BITS_PER_WIDE_INT)
377 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
378 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
382 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
383 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
384 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
385 | ((unsigned HOST_WIDE_INT) h1 >> count));
389 /* Rotate the doubleword integer in L1, H1 left by COUNT places
390 keeping only PREC bits of result.
391 Rotate right if COUNT is negative.
392 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
395 lrotate_double (l1, h1, count, prec, lv, hv)
396 HOST_WIDE_INT l1, h1, count;
398 HOST_WIDE_INT *lv, *hv;
400 HOST_WIDE_INT arg1[4];
406 rrotate_double (l1, h1, - count, prec, lv, hv);
410 encode (arg1, l1, h1);
415 carry = arg1[4 - 1] >> 16 - 1;
418 for (i = 0; i < 4; i++)
420 carry += arg1[i] << 1;
421 arg1[i] = LOWPART (carry);
422 carry = HIGHPART (carry);
427 decode (arg1, lv, hv);
430 /* Rotate the doubleword integer in L1, H1 left by COUNT places
431 keeping only PREC bits of result. COUNT must be positive.
432 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
435 rrotate_double (l1, h1, count, prec, lv, hv)
436 HOST_WIDE_INT l1, h1, count;
438 HOST_WIDE_INT *lv, *hv;
440 HOST_WIDE_INT arg1[4];
444 encode (arg1, l1, h1);
452 for (i = 4 - 1; i >= 0; i--)
456 arg1[i] = LOWPART (carry >> 1);
461 decode (arg1, lv, hv);
464 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
465 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
466 CODE is a tree code for a kind of division, one of
467 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
469 It controls how the quotient is rounded to a integer.
470 Return nonzero if the operation overflows.
471 UNS nonzero says do unsigned division. */
474 div_and_round_double (code, uns,
475 lnum_orig, hnum_orig, lden_orig, hden_orig,
476 lquo, hquo, lrem, hrem)
479 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
480 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
481 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
484 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
485 HOST_WIDE_INT den[4], quo[4];
487 unsigned HOST_WIDE_INT work;
488 register int carry = 0;
489 HOST_WIDE_INT lnum = lnum_orig;
490 HOST_WIDE_INT hnum = hnum_orig;
491 HOST_WIDE_INT lden = lden_orig;
492 HOST_WIDE_INT hden = hden_orig;
495 if ((hden == 0) && (lden == 0))
498 /* calculate quotient sign and convert operands to unsigned. */
504 /* (minimum integer) / (-1) is the only overflow case. */
505 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
511 neg_double (lden, hden, &lden, &hden);
515 if (hnum == 0 && hden == 0)
516 { /* single precision */
518 /* This unsigned division rounds toward zero. */
519 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
524 { /* trivial case: dividend < divisor */
525 /* hden != 0 already checked. */
532 bzero ((char *) quo, sizeof quo);
534 bzero ((char *) num, sizeof num); /* to zero 9th element */
535 bzero ((char *) den, sizeof den);
537 encode (num, lnum, hnum);
538 encode (den, lden, hden);
540 /* Special code for when the divisor < BASE. */
541 if (hden == 0 && lden < BASE)
543 /* hnum != 0 already checked. */
544 for (i = 4 - 1; i >= 0; i--)
546 work = num[i] + carry * BASE;
547 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
548 carry = work % (unsigned HOST_WIDE_INT) lden;
553 /* Full double precision division,
554 with thanks to Don Knuth's "Seminumerical Algorithms". */
555 int quo_est, scale, num_hi_sig, den_hi_sig;
557 /* Find the highest non-zero divisor digit. */
558 for (i = 4 - 1; ; i--)
564 /* Insure that the first digit of the divisor is at least BASE/2.
565 This is required by the quotient digit estimation algorithm. */
567 scale = BASE / (den[den_hi_sig] + 1);
568 if (scale > 1) { /* scale divisor and dividend */
570 for (i = 0; i <= 4 - 1; i++) {
571 work = (num[i] * scale) + carry;
572 num[i] = LOWPART (work);
573 carry = HIGHPART (work);
576 for (i = 0; i <= 4 - 1; i++) {
577 work = (den[i] * scale) + carry;
578 den[i] = LOWPART (work);
579 carry = HIGHPART (work);
580 if (den[i] != 0) den_hi_sig = i;
587 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
588 /* guess the next quotient digit, quo_est, by dividing the first
589 two remaining dividend digits by the high order quotient digit.
590 quo_est is never low and is at most 2 high. */
591 unsigned HOST_WIDE_INT tmp;
593 num_hi_sig = i + den_hi_sig + 1;
594 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
595 if (num[num_hi_sig] != den[den_hi_sig])
596 quo_est = work / den[den_hi_sig];
600 /* refine quo_est so it's usually correct, and at most one high. */
601 tmp = work - quo_est * den[den_hi_sig];
603 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
606 /* Try QUO_EST as the quotient digit, by multiplying the
607 divisor by QUO_EST and subtracting from the remaining dividend.
608 Keep in mind that QUO_EST is the I - 1st digit. */
611 for (j = 0; j <= den_hi_sig; j++)
613 work = quo_est * den[j] + carry;
614 carry = HIGHPART (work);
615 work = num[i + j] - LOWPART (work);
616 num[i + j] = LOWPART (work);
617 carry += HIGHPART (work) != 0;
620 /* if quo_est was high by one, then num[i] went negative and
621 we need to correct things. */
623 if (num[num_hi_sig] < carry)
626 carry = 0; /* add divisor back in */
627 for (j = 0; j <= den_hi_sig; j++)
629 work = num[i + j] + den[j] + carry;
630 carry = HIGHPART (work);
631 num[i + j] = LOWPART (work);
633 num [num_hi_sig] += carry;
636 /* store the quotient digit. */
641 decode (quo, lquo, hquo);
644 /* if result is negative, make it so. */
646 neg_double (*lquo, *hquo, lquo, hquo);
648 /* compute trial remainder: rem = num - (quo * den) */
649 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
650 neg_double (*lrem, *hrem, lrem, hrem);
651 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
656 case TRUNC_MOD_EXPR: /* round toward zero */
657 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
661 case FLOOR_MOD_EXPR: /* round toward negative infinity */
662 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
665 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
668 else return overflow;
672 case CEIL_MOD_EXPR: /* round toward positive infinity */
673 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
675 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
678 else return overflow;
682 case ROUND_MOD_EXPR: /* round to closest integer */
684 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
685 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
687 /* get absolute values */
688 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
689 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
691 /* if (2 * abs (lrem) >= abs (lden)) */
692 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
693 labs_rem, habs_rem, <wice, &htwice);
694 if (((unsigned HOST_WIDE_INT) habs_den
695 < (unsigned HOST_WIDE_INT) htwice)
696 || (((unsigned HOST_WIDE_INT) habs_den
697 == (unsigned HOST_WIDE_INT) htwice)
698 && ((HOST_WIDE_INT unsigned) labs_den
699 < (unsigned HOST_WIDE_INT) ltwice)))
703 add_double (*lquo, *hquo,
704 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
707 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
710 else return overflow;
718 /* compute true remainder: rem = num - (quo * den) */
719 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
720 neg_double (*lrem, *hrem, lrem, hrem);
721 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
725 #ifndef REAL_ARITHMETIC
726 /* Effectively truncate a real value to represent the nearest possible value
727 in a narrower mode. The result is actually represented in the same data
728 type as the argument, but its value is usually different.
730 A trap may occur during the FP operations and it is the responsibility
731 of the calling function to have a handler established. */
734 real_value_truncate (mode, arg)
735 enum machine_mode mode;
738 return REAL_VALUE_TRUNCATE (mode, arg);
741 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
743 /* Check for infinity in an IEEE double precision number. */
749 /* The IEEE 64-bit double format. */
754 unsigned exponent : 11;
755 unsigned mantissa1 : 20;
760 unsigned mantissa1 : 20;
761 unsigned exponent : 11;
767 if (u.big_endian.sign == 1)
770 return (u.big_endian.exponent == 2047
771 && u.big_endian.mantissa1 == 0
772 && u.big_endian.mantissa2 == 0);
777 return (u.little_endian.exponent == 2047
778 && u.little_endian.mantissa1 == 0
779 && u.little_endian.mantissa2 == 0);
783 /* Check whether an IEEE double precision number is a NaN. */
789 /* The IEEE 64-bit double format. */
794 unsigned exponent : 11;
795 unsigned mantissa1 : 20;
800 unsigned mantissa1 : 20;
801 unsigned exponent : 11;
807 if (u.big_endian.sign == 1)
810 return (u.big_endian.exponent == 2047
811 && (u.big_endian.mantissa1 != 0
812 || u.big_endian.mantissa2 != 0));
817 return (u.little_endian.exponent == 2047
818 && (u.little_endian.mantissa1 != 0
819 || u.little_endian.mantissa2 != 0));
823 /* Check for a negative IEEE double precision number. */
829 /* The IEEE 64-bit double format. */
834 unsigned exponent : 11;
835 unsigned mantissa1 : 20;
840 unsigned mantissa1 : 20;
841 unsigned exponent : 11;
847 if (u.big_endian.sign == 1)
850 return u.big_endian.sign;
855 return u.little_endian.sign;
858 #else /* Target not IEEE */
860 /* Let's assume other float formats don't have infinity.
861 (This can be overridden by redefining REAL_VALUE_ISINF.) */
869 /* Let's assume other float formats don't have NaNs.
870 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
878 /* Let's assume other float formats don't have minus zero.
879 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
886 #endif /* Target not IEEE */
887 #endif /* no REAL_ARITHMETIC */
889 /* Split a tree IN into a constant and a variable part
890 that could be combined with CODE to make IN.
891 CODE must be a commutative arithmetic operation.
892 Store the constant part into *CONP and the variable in &VARP.
893 Return 1 if this was done; zero means the tree IN did not decompose
896 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
897 Therefore, we must tell the caller whether the variable part
898 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
899 The value stored is the coefficient for the variable term.
900 The constant term we return should always be added;
901 we negate it if necessary. */
904 split_tree (in, code, varp, conp, varsignp)
910 register tree outtype = TREE_TYPE (in);
914 /* Strip any conversions that don't change the machine mode. */
915 while ((TREE_CODE (in) == NOP_EXPR
916 || TREE_CODE (in) == CONVERT_EXPR)
917 && (TYPE_MODE (TREE_TYPE (in))
918 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
919 in = TREE_OPERAND (in, 0);
921 if (TREE_CODE (in) == code
922 || (! FLOAT_TYPE_P (TREE_TYPE (in))
923 /* We can associate addition and subtraction together
924 (even though the C standard doesn't say so)
925 for integers because the value is not affected.
926 For reals, the value might be affected, so we can't. */
927 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
928 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
930 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
931 if (code == INTEGER_CST)
933 *conp = TREE_OPERAND (in, 0);
934 *varp = TREE_OPERAND (in, 1);
935 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
936 && TREE_TYPE (*varp) != outtype)
937 *varp = convert (outtype, *varp);
938 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
941 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
943 *conp = TREE_OPERAND (in, 1);
944 *varp = TREE_OPERAND (in, 0);
946 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
947 && TREE_TYPE (*varp) != outtype)
948 *varp = convert (outtype, *varp);
949 if (TREE_CODE (in) == MINUS_EXPR)
951 /* If operation is subtraction and constant is second,
952 must negate it to get an additive constant.
953 And this cannot be done unless it is a manifest constant.
954 It could also be the address of a static variable.
955 We cannot negate that, so give up. */
956 if (TREE_CODE (*conp) == INTEGER_CST)
957 /* Subtracting from integer_zero_node loses for long long. */
958 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
964 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
966 *conp = TREE_OPERAND (in, 0);
967 *varp = TREE_OPERAND (in, 1);
968 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
969 && TREE_TYPE (*varp) != outtype)
970 *varp = convert (outtype, *varp);
971 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
978 /* Combine two constants NUM and ARG2 under operation CODE
979 to produce a new constant.
980 We assume ARG1 and ARG2 have the same data type,
981 or at least are the same kind of constant and the same machine mode.
983 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
986 const_binop (code, arg1, arg2, notrunc)
988 register tree arg1, arg2;
991 if (TREE_CODE (arg1) == INTEGER_CST)
993 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
994 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
995 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
996 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
997 HOST_WIDE_INT low, hi;
998 HOST_WIDE_INT garbagel, garbageh;
1000 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1006 t = build_int_2 (int1l | int2l, int1h | int2h);
1010 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1014 t = build_int_2 (int1l & int2l, int1h & int2h);
1017 case BIT_ANDTC_EXPR:
1018 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1024 /* It's unclear from the C standard whether shifts can overflow.
1025 The following code ignores overflow; perhaps a C standard
1026 interpretation ruling is needed. */
1027 lshift_double (int1l, int1h, int2l,
1028 TYPE_PRECISION (TREE_TYPE (arg1)),
1031 t = build_int_2 (low, hi);
1032 TREE_TYPE (t) = TREE_TYPE (arg1);
1034 force_fit_type (t, 0);
1035 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1036 TREE_CONSTANT_OVERFLOW (t)
1037 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1043 lrotate_double (int1l, int1h, int2l,
1044 TYPE_PRECISION (TREE_TYPE (arg1)),
1046 t = build_int_2 (low, hi);
1053 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1056 overflow = int2h < hi;
1058 t = build_int_2 (int2l, int2h);
1064 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1067 overflow = int1h < hi;
1069 t = build_int_2 (int1l, int1h);
1072 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1073 t = build_int_2 (low, hi);
1077 if (int2h == 0 && int2l == 0)
1079 t = build_int_2 (int1l, int1h);
1082 neg_double (int2l, int2h, &low, &hi);
1083 add_double (int1l, int1h, low, hi, &low, &hi);
1084 overflow = overflow_sum_sign (hi, int2h, int1h);
1085 t = build_int_2 (low, hi);
1089 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1090 t = build_int_2 (low, hi);
1093 case TRUNC_DIV_EXPR:
1094 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1095 case EXACT_DIV_EXPR:
1096 /* This is a shortcut for a common special case.
1097 It reduces the number of tree nodes generated
1099 if (int2h == 0 && int2l > 0
1100 && TREE_TYPE (arg1) == sizetype
1101 && int1h == 0 && int1l >= 0)
1103 if (code == CEIL_DIV_EXPR)
1105 return size_int (int1l / int2l);
1107 case ROUND_DIV_EXPR:
1108 if (int2h == 0 && int2l == 1)
1110 t = build_int_2 (int1l, int1h);
1113 if (int1l == int2l && int1h == int2h)
1115 if ((int1l | int1h) == 0)
1117 t = build_int_2 (1, 0);
1120 overflow = div_and_round_double (code, uns,
1121 int1l, int1h, int2l, int2h,
1122 &low, &hi, &garbagel, &garbageh);
1123 t = build_int_2 (low, hi);
1126 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1127 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1128 overflow = div_and_round_double (code, uns,
1129 int1l, int1h, int2l, int2h,
1130 &garbagel, &garbageh, &low, &hi);
1131 t = build_int_2 (low, hi);
1138 low = (((unsigned HOST_WIDE_INT) int1h
1139 < (unsigned HOST_WIDE_INT) int2h)
1140 || (((unsigned HOST_WIDE_INT) int1h
1141 == (unsigned HOST_WIDE_INT) int2h)
1142 && ((unsigned HOST_WIDE_INT) int1l
1143 < (unsigned HOST_WIDE_INT) int2l)));
1147 low = ((int1h < int2h)
1148 || ((int1h == int2h)
1149 && ((unsigned HOST_WIDE_INT) int1l
1150 < (unsigned HOST_WIDE_INT) int2l)));
1152 if (low == (code == MIN_EXPR))
1153 t = build_int_2 (int1l, int1h);
1155 t = build_int_2 (int2l, int2h);
1162 TREE_TYPE (t) = TREE_TYPE (arg1);
1164 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1165 | TREE_OVERFLOW (arg1)
1166 | TREE_OVERFLOW (arg2));
1167 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1168 | TREE_CONSTANT_OVERFLOW (arg1)
1169 | TREE_CONSTANT_OVERFLOW (arg2));
1172 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1173 if (TREE_CODE (arg1) == REAL_CST)
1178 REAL_VALUE_TYPE value;
1181 d1 = TREE_REAL_CST (arg1);
1182 d2 = TREE_REAL_CST (arg2);
1184 /* If either operand is a NaN, just return it. Otherwise, set up
1185 for floating-point trap; we return an overflow. */
1186 if (REAL_VALUE_ISNAN (d1))
1188 else if (REAL_VALUE_ISNAN (d2))
1190 else if (setjmp (float_error))
1192 t = copy_node (arg1);
1197 set_float_handler (float_error);
1199 #ifdef REAL_ARITHMETIC
1200 REAL_ARITHMETIC (value, code, d1, d2);
1217 #ifndef REAL_INFINITY
1226 value = MIN (d1, d2);
1230 value = MAX (d1, d2);
1236 #endif /* no REAL_ARITHMETIC */
1237 t = build_real (TREE_TYPE (arg1),
1238 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1240 set_float_handler (NULL_PTR);
1243 = (force_fit_type (t, overflow)
1244 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1245 TREE_CONSTANT_OVERFLOW (t)
1247 | TREE_CONSTANT_OVERFLOW (arg1)
1248 | TREE_CONSTANT_OVERFLOW (arg2);
1251 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1252 if (TREE_CODE (arg1) == COMPLEX_CST)
1254 register tree r1 = TREE_REALPART (arg1);
1255 register tree i1 = TREE_IMAGPART (arg1);
1256 register tree r2 = TREE_REALPART (arg2);
1257 register tree i2 = TREE_IMAGPART (arg2);
1263 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1264 const_binop (PLUS_EXPR, i1, i2, notrunc));
1268 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1269 const_binop (MINUS_EXPR, i1, i2, notrunc));
1273 t = build_complex (const_binop (MINUS_EXPR,
1274 const_binop (MULT_EXPR,
1276 const_binop (MULT_EXPR,
1279 const_binop (PLUS_EXPR,
1280 const_binop (MULT_EXPR,
1282 const_binop (MULT_EXPR,
1289 register tree magsquared
1290 = const_binop (PLUS_EXPR,
1291 const_binop (MULT_EXPR, r2, r2, notrunc),
1292 const_binop (MULT_EXPR, i2, i2, notrunc),
1296 (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1297 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1298 const_binop (PLUS_EXPR,
1299 const_binop (MULT_EXPR, r1, r2,
1301 const_binop (MULT_EXPR, i1, i2,
1304 magsquared, notrunc),
1305 const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1306 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1307 const_binop (MINUS_EXPR,
1308 const_binop (MULT_EXPR, i1, r2,
1310 const_binop (MULT_EXPR, r1, i2,
1313 magsquared, notrunc));
1320 TREE_TYPE (t) = TREE_TYPE (arg1);
1326 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1330 unsigned int number;
1333 /* Type-size nodes already made for small sizes. */
1334 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1336 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1337 && size_table[number] != 0)
1338 return size_table[number];
1339 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1341 push_obstacks_nochange ();
1342 /* Make this a permanent node. */
1343 end_temporary_allocation ();
1344 t = build_int_2 (number, 0);
1345 TREE_TYPE (t) = sizetype;
1346 size_table[number] = t;
1351 t = build_int_2 (number, 0);
1352 TREE_TYPE (t) = sizetype;
1357 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1358 CODE is a tree code. Data type is taken from `sizetype',
1359 If the operands are constant, so is the result. */
1362 size_binop (code, arg0, arg1)
1363 enum tree_code code;
1366 /* Handle the special case of two integer constants faster. */
1367 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1369 /* And some specific cases even faster than that. */
1370 if (code == PLUS_EXPR
1371 && TREE_INT_CST_LOW (arg0) == 0
1372 && TREE_INT_CST_HIGH (arg0) == 0)
1374 if (code == MINUS_EXPR
1375 && TREE_INT_CST_LOW (arg1) == 0
1376 && TREE_INT_CST_HIGH (arg1) == 0)
1378 if (code == MULT_EXPR
1379 && TREE_INT_CST_LOW (arg0) == 1
1380 && TREE_INT_CST_HIGH (arg0) == 0)
1382 /* Handle general case of two integer constants. */
1383 return const_binop (code, arg0, arg1, 1);
1386 if (arg0 == error_mark_node || arg1 == error_mark_node)
1387 return error_mark_node;
1389 return fold (build (code, sizetype, arg0, arg1));
1392 /* Given T, a tree representing type conversion of ARG1, a constant,
1393 return a constant tree representing the result of conversion. */
1396 fold_convert (t, arg1)
1400 register tree type = TREE_TYPE (t);
1403 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1405 if (TREE_CODE (arg1) == INTEGER_CST)
1407 /* Given an integer constant, make new constant with new type,
1408 appropriately sign-extended or truncated. */
1409 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1410 TREE_INT_CST_HIGH (arg1));
1411 TREE_TYPE (t) = type;
1412 /* Indicate an overflow if (1) ARG1 already overflowed,
1413 or (2) force_fit_type indicates an overflow.
1414 Tell force_fit_type that an overflow has already occurred
1415 if ARG1 is a too-large unsigned value and T is signed. */
1417 = (TREE_OVERFLOW (arg1)
1418 | force_fit_type (t,
1419 (TREE_INT_CST_HIGH (arg1) < 0
1420 & (TREE_UNSIGNED (type)
1421 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1422 TREE_CONSTANT_OVERFLOW (t)
1423 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1425 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1426 else if (TREE_CODE (arg1) == REAL_CST)
1428 /* Don't initialize these, use assignments.
1429 Initialized local aggregates don't work on old compilers. */
1434 x = TREE_REAL_CST (arg1);
1435 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1436 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1437 /* See if X will be in range after truncation towards 0.
1438 To compensate for truncation, move the bounds away from 0,
1439 but reject if X exactly equals the adjusted bounds. */
1440 #ifdef REAL_ARITHMETIC
1441 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1442 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1447 /* If X is a NaN, use zero instead and show we have an overflow.
1448 Otherwise, range check. */
1449 if (REAL_VALUE_ISNAN (x))
1450 overflow = 1, x = dconst0;
1451 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1454 #ifndef REAL_ARITHMETIC
1456 HOST_WIDE_INT low, high;
1457 HOST_WIDE_INT half_word
1458 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1463 high = (HOST_WIDE_INT) (x / half_word / half_word);
1464 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1465 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1467 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1468 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1471 low = (HOST_WIDE_INT) x;
1472 if (TREE_REAL_CST (arg1) < 0)
1473 neg_double (low, high, &low, &high);
1474 t = build_int_2 (low, high);
1478 HOST_WIDE_INT low, high;
1479 REAL_VALUE_TO_INT (&low, &high, x);
1480 t = build_int_2 (low, high);
1483 TREE_TYPE (t) = type;
1485 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1486 TREE_CONSTANT_OVERFLOW (t)
1487 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1489 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1490 TREE_TYPE (t) = type;
1492 else if (TREE_CODE (type) == REAL_TYPE)
1494 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1495 if (TREE_CODE (arg1) == INTEGER_CST)
1496 return build_real_from_int_cst (type, arg1);
1497 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1498 if (TREE_CODE (arg1) == REAL_CST)
1500 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1502 else if (setjmp (float_error))
1505 t = copy_node (arg1);
1508 set_float_handler (float_error);
1510 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1511 TREE_REAL_CST (arg1)));
1512 set_float_handler (NULL_PTR);
1516 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1517 TREE_CONSTANT_OVERFLOW (t)
1518 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1522 TREE_CONSTANT (t) = 1;
1526 /* Return an expr equal to X but certainly not valid as an lvalue.
1527 Also make sure it is not valid as an null pointer constant. */
1535 /* These things are certainly not lvalues. */
1536 if (TREE_CODE (x) == NON_LVALUE_EXPR
1537 || TREE_CODE (x) == INTEGER_CST
1538 || TREE_CODE (x) == REAL_CST
1539 || TREE_CODE (x) == STRING_CST
1540 || TREE_CODE (x) == ADDR_EXPR)
1542 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1544 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1545 so convert_for_assignment won't strip it.
1546 This is so this 0 won't be treated as a null pointer constant. */
1547 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1548 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1554 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1555 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1559 /* When pedantic, return an expr equal to X but certainly not valid as a
1560 pedantic lvalue. Otherwise, return X. */
1563 pedantic_non_lvalue (x)
1567 return non_lvalue (x);
1572 /* Given a tree comparison code, return the code that is the logical inverse
1573 of the given code. It is not safe to do this for floating-point
1574 comparisons, except for NE_EXPR and EQ_EXPR. */
1576 static enum tree_code
1577 invert_tree_comparison (code)
1578 enum tree_code code;
1599 /* Similar, but return the comparison that results if the operands are
1600 swapped. This is safe for floating-point. */
1602 static enum tree_code
1603 swap_tree_comparison (code)
1604 enum tree_code code;
1624 /* Return nonzero if CODE is a tree code that represents a truth value. */
1627 truth_value_p (code)
1628 enum tree_code code;
1630 return (TREE_CODE_CLASS (code) == '<'
1631 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1632 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1633 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1636 /* Return nonzero if two operands are necessarily equal.
1637 If ONLY_CONST is non-zero, only return non-zero for constants.
1638 This function tests whether the operands are indistinguishable;
1639 it does not test whether they are equal using C's == operation.
1640 The distinction is important for IEEE floating point, because
1641 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1642 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1645 operand_equal_p (arg0, arg1, only_const)
1649 /* If both types don't have the same signedness, then we can't consider
1650 them equal. We must check this before the STRIP_NOPS calls
1651 because they may change the signedness of the arguments. */
1652 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1658 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1659 We don't care about side effects in that case because the SAVE_EXPR
1660 takes care of that for us. */
1661 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1662 return ! only_const;
1664 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1667 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1668 && TREE_CODE (arg0) == ADDR_EXPR
1669 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1672 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1673 && TREE_CODE (arg0) == INTEGER_CST
1674 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1675 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1678 /* Detect when real constants are equal. */
1679 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1680 && TREE_CODE (arg0) == REAL_CST)
1681 return !bcmp ((char *) &TREE_REAL_CST (arg0),
1682 (char *) &TREE_REAL_CST (arg1),
1683 sizeof (REAL_VALUE_TYPE));
1691 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1693 /* This is needed for conversions and for COMPONENT_REF.
1694 Might as well play it safe and always test this. */
1695 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1698 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1701 /* Two conversions are equal only if signedness and modes match. */
1702 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1703 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1704 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1707 return operand_equal_p (TREE_OPERAND (arg0, 0),
1708 TREE_OPERAND (arg1, 0), 0);
1712 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1713 TREE_OPERAND (arg1, 0), 0)
1714 && operand_equal_p (TREE_OPERAND (arg0, 1),
1715 TREE_OPERAND (arg1, 1), 0));
1718 switch (TREE_CODE (arg0))
1721 return operand_equal_p (TREE_OPERAND (arg0, 0),
1722 TREE_OPERAND (arg1, 0), 0);
1726 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1727 TREE_OPERAND (arg1, 0), 0)
1728 && operand_equal_p (TREE_OPERAND (arg0, 1),
1729 TREE_OPERAND (arg1, 1), 0));
1732 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1733 TREE_OPERAND (arg1, 0), 0)
1734 && operand_equal_p (TREE_OPERAND (arg0, 1),
1735 TREE_OPERAND (arg1, 1), 0)
1736 && operand_equal_p (TREE_OPERAND (arg0, 2),
1737 TREE_OPERAND (arg1, 2), 0));
1745 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1746 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1748 When in doubt, return 0. */
1751 operand_equal_for_comparison_p (arg0, arg1, other)
1755 int unsignedp1, unsignedpo;
1756 tree primarg1, primother;
1757 unsigned correct_width;
1759 if (operand_equal_p (arg0, arg1, 0))
1762 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1765 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1766 actual comparison operand, ARG0.
1768 First throw away any conversions to wider types
1769 already present in the operands. */
1771 primarg1 = get_narrower (arg1, &unsignedp1);
1772 primother = get_narrower (other, &unsignedpo);
1774 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1775 if (unsignedp1 == unsignedpo
1776 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1777 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1779 tree type = TREE_TYPE (arg0);
1781 /* Make sure shorter operand is extended the right way
1782 to match the longer operand. */
1783 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1784 TREE_TYPE (primarg1)),
1787 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1794 /* See if ARG is an expression that is either a comparison or is performing
1795 arithmetic on comparisons. The comparisons must only be comparing
1796 two different values, which will be stored in *CVAL1 and *CVAL2; if
1797 they are non-zero it means that some operands have already been found.
1798 No variables may be used anywhere else in the expression except in the
1799 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1800 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1802 If this is true, return 1. Otherwise, return zero. */
1805 twoval_comparison_p (arg, cval1, cval2, save_p)
1807 tree *cval1, *cval2;
1810 enum tree_code code = TREE_CODE (arg);
1811 char class = TREE_CODE_CLASS (code);
1813 /* We can handle some of the 'e' cases here. */
1814 if (class == 'e' && code == TRUTH_NOT_EXPR)
1816 else if (class == 'e'
1817 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1818 || code == COMPOUND_EXPR))
1821 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1822 the expression. There may be no way to make this work, but it needs
1823 to be looked at again for 2.6. */
1825 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1827 /* If we've already found a CVAL1 or CVAL2, this expression is
1828 two complex to handle. */
1829 if (*cval1 || *cval2)
1840 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1843 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1844 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1845 cval1, cval2, save_p));
1851 if (code == COND_EXPR)
1852 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1853 cval1, cval2, save_p)
1854 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1855 cval1, cval2, save_p)
1856 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1857 cval1, cval2, save_p));
1861 /* First see if we can handle the first operand, then the second. For
1862 the second operand, we know *CVAL1 can't be zero. It must be that
1863 one side of the comparison is each of the values; test for the
1864 case where this isn't true by failing if the two operands
1867 if (operand_equal_p (TREE_OPERAND (arg, 0),
1868 TREE_OPERAND (arg, 1), 0))
1872 *cval1 = TREE_OPERAND (arg, 0);
1873 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1875 else if (*cval2 == 0)
1876 *cval2 = TREE_OPERAND (arg, 0);
1877 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1882 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1884 else if (*cval2 == 0)
1885 *cval2 = TREE_OPERAND (arg, 1);
1886 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1897 /* ARG is a tree that is known to contain just arithmetic operations and
1898 comparisons. Evaluate the operations in the tree substituting NEW0 for
1899 any occurrence of OLD0 as an operand of a comparison and likewise for
1903 eval_subst (arg, old0, new0, old1, new1)
1905 tree old0, new0, old1, new1;
1907 tree type = TREE_TYPE (arg);
1908 enum tree_code code = TREE_CODE (arg);
1909 char class = TREE_CODE_CLASS (code);
1911 /* We can handle some of the 'e' cases here. */
1912 if (class == 'e' && code == TRUTH_NOT_EXPR)
1914 else if (class == 'e'
1915 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1921 return fold (build1 (code, type,
1922 eval_subst (TREE_OPERAND (arg, 0),
1923 old0, new0, old1, new1)));
1926 return fold (build (code, type,
1927 eval_subst (TREE_OPERAND (arg, 0),
1928 old0, new0, old1, new1),
1929 eval_subst (TREE_OPERAND (arg, 1),
1930 old0, new0, old1, new1)));
1936 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1939 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1942 return fold (build (code, type,
1943 eval_subst (TREE_OPERAND (arg, 0),
1944 old0, new0, old1, new1),
1945 eval_subst (TREE_OPERAND (arg, 1),
1946 old0, new0, old1, new1),
1947 eval_subst (TREE_OPERAND (arg, 2),
1948 old0, new0, old1, new1)));
1953 tree arg0 = TREE_OPERAND (arg, 0);
1954 tree arg1 = TREE_OPERAND (arg, 1);
1956 /* We need to check both for exact equality and tree equality. The
1957 former will be true if the operand has a side-effect. In that
1958 case, we know the operand occurred exactly once. */
1960 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1962 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1965 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1967 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1970 return fold (build (code, type, arg0, arg1));
1977 /* Return a tree for the case when the result of an expression is RESULT
1978 converted to TYPE and OMITTED was previously an operand of the expression
1979 but is now not needed (e.g., we folded OMITTED * 0).
1981 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1982 the conversion of RESULT to TYPE. */
1985 omit_one_operand (type, result, omitted)
1986 tree type, result, omitted;
1988 tree t = convert (type, result);
1990 if (TREE_SIDE_EFFECTS (omitted))
1991 return build (COMPOUND_EXPR, type, omitted, t);
1993 return non_lvalue (t);
1996 /* Return a simplified tree node for the truth-negation of ARG. This
1997 never alters ARG itself. We assume that ARG is an operation that
1998 returns a truth value (0 or 1). */
2001 invert_truthvalue (arg)
2004 tree type = TREE_TYPE (arg);
2005 enum tree_code code = TREE_CODE (arg);
2007 if (code == ERROR_MARK)
2010 /* If this is a comparison, we can simply invert it, except for
2011 floating-point non-equality comparisons, in which case we just
2012 enclose a TRUTH_NOT_EXPR around what we have. */
2014 if (TREE_CODE_CLASS (code) == '<')
2016 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2017 && code != NE_EXPR && code != EQ_EXPR)
2018 return build1 (TRUTH_NOT_EXPR, type, arg);
2020 return build (invert_tree_comparison (code), type,
2021 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2027 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2028 && TREE_INT_CST_HIGH (arg) == 0, 0));
2030 case TRUTH_AND_EXPR:
2031 return build (TRUTH_OR_EXPR, type,
2032 invert_truthvalue (TREE_OPERAND (arg, 0)),
2033 invert_truthvalue (TREE_OPERAND (arg, 1)));
2036 return build (TRUTH_AND_EXPR, type,
2037 invert_truthvalue (TREE_OPERAND (arg, 0)),
2038 invert_truthvalue (TREE_OPERAND (arg, 1)));
2040 case TRUTH_XOR_EXPR:
2041 /* Here we can invert either operand. We invert the first operand
2042 unless the second operand is a TRUTH_NOT_EXPR in which case our
2043 result is the XOR of the first operand with the inside of the
2044 negation of the second operand. */
2046 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2047 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2048 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2050 return build (TRUTH_XOR_EXPR, type,
2051 invert_truthvalue (TREE_OPERAND (arg, 0)),
2052 TREE_OPERAND (arg, 1));
2054 case TRUTH_ANDIF_EXPR:
2055 return build (TRUTH_ORIF_EXPR, type,
2056 invert_truthvalue (TREE_OPERAND (arg, 0)),
2057 invert_truthvalue (TREE_OPERAND (arg, 1)));
2059 case TRUTH_ORIF_EXPR:
2060 return build (TRUTH_ANDIF_EXPR, type,
2061 invert_truthvalue (TREE_OPERAND (arg, 0)),
2062 invert_truthvalue (TREE_OPERAND (arg, 1)));
2064 case TRUTH_NOT_EXPR:
2065 return TREE_OPERAND (arg, 0);
2068 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2069 invert_truthvalue (TREE_OPERAND (arg, 1)),
2070 invert_truthvalue (TREE_OPERAND (arg, 2)));
2073 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2074 invert_truthvalue (TREE_OPERAND (arg, 1)));
2076 case NON_LVALUE_EXPR:
2077 return invert_truthvalue (TREE_OPERAND (arg, 0));
2082 return build1 (TREE_CODE (arg), type,
2083 invert_truthvalue (TREE_OPERAND (arg, 0)));
2086 if (!integer_onep (TREE_OPERAND (arg, 1)))
2088 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2091 return build1 (TRUTH_NOT_EXPR, type, arg);
2093 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2095 return build1 (TRUTH_NOT_EXPR, type, arg);
2098 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2099 operands are another bit-wise operation with a common input. If so,
2100 distribute the bit operations to save an operation and possibly two if
2101 constants are involved. For example, convert
2102 (A | B) & (A | C) into A | (B & C)
2103 Further simplification will occur if B and C are constants.
2105 If this optimization cannot be done, 0 will be returned. */
2108 distribute_bit_expr (code, type, arg0, arg1)
2109 enum tree_code code;
2116 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2117 || TREE_CODE (arg0) == code
2118 || (TREE_CODE (arg0) != BIT_AND_EXPR
2119 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2122 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2124 common = TREE_OPERAND (arg0, 0);
2125 left = TREE_OPERAND (arg0, 1);
2126 right = TREE_OPERAND (arg1, 1);
2128 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2130 common = TREE_OPERAND (arg0, 0);
2131 left = TREE_OPERAND (arg0, 1);
2132 right = TREE_OPERAND (arg1, 0);
2134 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2136 common = TREE_OPERAND (arg0, 1);
2137 left = TREE_OPERAND (arg0, 0);
2138 right = TREE_OPERAND (arg1, 1);
2140 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2142 common = TREE_OPERAND (arg0, 1);
2143 left = TREE_OPERAND (arg0, 0);
2144 right = TREE_OPERAND (arg1, 0);
2149 return fold (build (TREE_CODE (arg0), type, common,
2150 fold (build (code, type, left, right))));
2153 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2154 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2157 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2160 int bitsize, bitpos;
2163 tree result = build (BIT_FIELD_REF, type, inner,
2164 size_int (bitsize), size_int (bitpos));
2166 TREE_UNSIGNED (result) = unsignedp;
2171 /* Optimize a bit-field compare.
2173 There are two cases: First is a compare against a constant and the
2174 second is a comparison of two items where the fields are at the same
2175 bit position relative to the start of a chunk (byte, halfword, word)
2176 large enough to contain it. In these cases we can avoid the shift
2177 implicit in bitfield extractions.
2179 For constants, we emit a compare of the shifted constant with the
2180 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2181 compared. For two fields at the same position, we do the ANDs with the
2182 similar mask and compare the result of the ANDs.
2184 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2185 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2186 are the left and right operands of the comparison, respectively.
2188 If the optimization described above can be done, we return the resulting
2189 tree. Otherwise we return zero. */
2192 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2193 enum tree_code code;
2197 int lbitpos, lbitsize, rbitpos, rbitsize;
2198 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2199 tree type = TREE_TYPE (lhs);
2200 tree signed_type, unsigned_type;
2201 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2202 enum machine_mode lmode, rmode, lnmode, rnmode;
2203 int lunsignedp, runsignedp;
2204 int lvolatilep = 0, rvolatilep = 0;
2205 tree linner, rinner;
2209 /* Get all the information about the extractions being done. If the bit size
2210 if the same as the size of the underlying object, we aren't doing an
2211 extraction at all and so can do nothing. */
2212 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2213 &lunsignedp, &lvolatilep);
2214 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2220 /* If this is not a constant, we can only do something if bit positions,
2221 sizes, and signedness are the same. */
2222 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2223 &rmode, &runsignedp, &rvolatilep);
2225 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2226 || lunsignedp != runsignedp || offset != 0)
2230 /* See if we can find a mode to refer to this field. We should be able to,
2231 but fail if we can't. */
2232 lnmode = get_best_mode (lbitsize, lbitpos,
2233 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2235 if (lnmode == VOIDmode)
2238 /* Set signed and unsigned types of the precision of this mode for the
2240 signed_type = type_for_mode (lnmode, 0);
2241 unsigned_type = type_for_mode (lnmode, 1);
2245 rnmode = get_best_mode (rbitsize, rbitpos,
2246 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2248 if (rnmode == VOIDmode)
2252 /* Compute the bit position and size for the new reference and our offset
2253 within it. If the new reference is the same size as the original, we
2254 won't optimize anything, so return zero. */
2255 lnbitsize = GET_MODE_BITSIZE (lnmode);
2256 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2257 lbitpos -= lnbitpos;
2258 if (lnbitsize == lbitsize)
2263 rnbitsize = GET_MODE_BITSIZE (rnmode);
2264 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2265 rbitpos -= rnbitpos;
2266 if (rnbitsize == rbitsize)
2270 #if BYTES_BIG_ENDIAN
2271 lbitpos = lnbitsize - lbitsize - lbitpos;
2274 /* Make the mask to be used against the extracted field. */
2275 mask = build_int_2 (~0, ~0);
2276 TREE_TYPE (mask) = unsigned_type;
2277 force_fit_type (mask, 0);
2278 mask = convert (unsigned_type, mask);
2279 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2280 mask = const_binop (RSHIFT_EXPR, mask,
2281 size_int (lnbitsize - lbitsize - lbitpos), 0);
2284 /* If not comparing with constant, just rework the comparison
2286 return build (code, compare_type,
2287 build (BIT_AND_EXPR, unsigned_type,
2288 make_bit_field_ref (linner, unsigned_type,
2289 lnbitsize, lnbitpos, 1),
2291 build (BIT_AND_EXPR, unsigned_type,
2292 make_bit_field_ref (rinner, unsigned_type,
2293 rnbitsize, rnbitpos, 1),
2296 /* Otherwise, we are handling the constant case. See if the constant is too
2297 big for the field. Warn and return a tree of for 0 (false) if so. We do
2298 this not only for its own sake, but to avoid having to test for this
2299 error case below. If we didn't, we might generate wrong code.
2301 For unsigned fields, the constant shifted right by the field length should
2302 be all zero. For signed fields, the high-order bits should agree with
2307 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2308 convert (unsigned_type, rhs),
2309 size_int (lbitsize), 0)))
2311 warning ("comparison is always %s due to width of bitfield",
2312 code == NE_EXPR ? "one" : "zero");
2313 return convert (compare_type,
2315 ? integer_one_node : integer_zero_node));
2320 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2321 size_int (lbitsize - 1), 0);
2322 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2324 warning ("comparison is always %s due to width of bitfield",
2325 code == NE_EXPR ? "one" : "zero");
2326 return convert (compare_type,
2328 ? integer_one_node : integer_zero_node));
2332 /* Single-bit compares should always be against zero. */
2333 if (lbitsize == 1 && ! integer_zerop (rhs))
2335 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2336 rhs = convert (type, integer_zero_node);
2339 /* Make a new bitfield reference, shift the constant over the
2340 appropriate number of bits and mask it with the computed mask
2341 (in case this was a signed field). If we changed it, make a new one. */
2342 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2345 TREE_SIDE_EFFECTS (lhs) = 1;
2346 TREE_THIS_VOLATILE (lhs) = 1;
2349 rhs = fold (const_binop (BIT_AND_EXPR,
2350 const_binop (LSHIFT_EXPR,
2351 convert (unsigned_type, rhs),
2352 size_int (lbitpos), 0),
2355 return build (code, compare_type,
2356 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2360 /* Subroutine for fold_truthop: decode a field reference.
2362 If EXP is a comparison reference, we return the innermost reference.
2364 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2365 set to the starting bit number.
2367 If the innermost field can be completely contained in a mode-sized
2368 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2370 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2371 otherwise it is not changed.
2373 *PUNSIGNEDP is set to the signedness of the field.
2375 *PMASK is set to the mask used. This is either contained in a
2376 BIT_AND_EXPR or derived from the width of the field.
2378 Return 0 if this is not a component reference or is one that we can't
2379 do anything with. */
2382 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2385 int *pbitsize, *pbitpos;
2386 enum machine_mode *pmode;
2387 int *punsignedp, *pvolatilep;
2391 tree mask, inner, offset;
2395 /* All the optimizations using this function assume integer fields.
2396 There are problems with FP fields since the type_for_size call
2397 below can fail for, e.g., XFmode. */
2398 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2403 if (TREE_CODE (exp) == BIT_AND_EXPR)
2405 and_mask = TREE_OPERAND (exp, 1);
2406 exp = TREE_OPERAND (exp, 0);
2407 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2408 if (TREE_CODE (and_mask) != INTEGER_CST)
2412 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2413 && TREE_CODE (exp) != BIT_FIELD_REF)
2416 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2417 punsignedp, pvolatilep);
2418 if (inner == exp || *pbitsize < 0 || offset != 0)
2421 /* Compute the mask to access the bitfield. */
2422 unsigned_type = type_for_size (*pbitsize, 1);
2423 precision = TYPE_PRECISION (unsigned_type);
2425 mask = build_int_2 (~0, ~0);
2426 TREE_TYPE (mask) = unsigned_type;
2427 force_fit_type (mask, 0);
2428 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2429 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2431 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2433 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2434 convert (unsigned_type, and_mask), mask));
2440 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2444 all_ones_mask_p (mask, size)
2448 tree type = TREE_TYPE (mask);
2449 int precision = TYPE_PRECISION (type);
2452 tmask = build_int_2 (~0, ~0);
2453 TREE_TYPE (tmask) = signed_type (type);
2454 force_fit_type (tmask, 0);
2456 operand_equal_p (mask,
2457 const_binop (RSHIFT_EXPR,
2458 const_binop (LSHIFT_EXPR, tmask,
2459 size_int (precision - size), 0),
2460 size_int (precision - size), 0),
2464 /* Subroutine for fold_truthop: determine if an operand is simple enough
2465 to be evaluated unconditionally. */
2468 simple_operand_p (exp)
2471 /* Strip any conversions that don't change the machine mode. */
2472 while ((TREE_CODE (exp) == NOP_EXPR
2473 || TREE_CODE (exp) == CONVERT_EXPR)
2474 && (TYPE_MODE (TREE_TYPE (exp))
2475 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2476 exp = TREE_OPERAND (exp, 0);
2478 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2479 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2480 && ! TREE_ADDRESSABLE (exp)
2481 && ! TREE_THIS_VOLATILE (exp)
2482 && ! DECL_NONLOCAL (exp)
2483 /* Don't regard global variables as simple. They may be
2484 allocated in ways unknown to the compiler (shared memory,
2485 #pragma weak, etc). */
2486 && ! TREE_PUBLIC (exp)
2487 && ! DECL_EXTERNAL (exp)
2488 /* Loading a static variable is unduly expensive, but global
2489 registers aren't expensive. */
2490 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2493 /* Subroutine for fold_truthop: try to optimize a range test.
2495 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2497 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2498 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2499 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2502 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2503 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2504 larger than HI_CST (they may be equal).
2506 We return the simplified tree or 0 if no optimization is possible. */
2509 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2510 enum tree_code jcode, lo_code, hi_code;
2511 tree type, var, lo_cst, hi_cst;
2514 enum tree_code rcode;
2516 /* See if this is a range test and normalize the constant terms. */
2518 if (jcode == TRUTH_AND_EXPR)
2523 /* See if we have VAR != CST && VAR != CST+1. */
2524 if (! (hi_code == NE_EXPR
2525 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2526 && tree_int_cst_equal (integer_one_node,
2527 const_binop (MINUS_EXPR,
2528 hi_cst, lo_cst, 0))))
2536 if (hi_code == LT_EXPR)
2537 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2538 else if (hi_code != LE_EXPR)
2541 if (lo_code == GT_EXPR)
2542 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2544 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2557 /* See if we have VAR == CST || VAR == CST+1. */
2558 if (! (hi_code == EQ_EXPR
2559 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2560 && tree_int_cst_equal (integer_one_node,
2561 const_binop (MINUS_EXPR,
2562 hi_cst, lo_cst, 0))))
2570 if (hi_code == GE_EXPR)
2571 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2572 else if (hi_code != GT_EXPR)
2575 if (lo_code == LE_EXPR)
2576 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2578 /* We now have VAR < LO_CST || VAR > HI_CST. */
2587 /* When normalizing, it is possible to both increment the smaller constant
2588 and decrement the larger constant. See if they are still ordered. */
2589 if (tree_int_cst_lt (hi_cst, lo_cst))
2592 /* Fail if VAR isn't an integer. */
2593 utype = TREE_TYPE (var);
2594 if (! INTEGRAL_TYPE_P (utype))
2597 /* The range test is invalid if subtracting the two constants results
2598 in overflow. This can happen in traditional mode. */
2599 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2600 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2603 if (! TREE_UNSIGNED (utype))
2605 utype = unsigned_type (utype);
2606 var = convert (utype, var);
2607 lo_cst = convert (utype, lo_cst);
2608 hi_cst = convert (utype, hi_cst);
2611 return fold (convert (type,
2612 build (rcode, utype,
2613 build (MINUS_EXPR, utype, var, lo_cst),
2614 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2617 /* Find ways of folding logical expressions of LHS and RHS:
2618 Try to merge two comparisons to the same innermost item.
2619 Look for range tests like "ch >= '0' && ch <= '9'".
2620 Look for combinations of simple terms on machines with expensive branches
2621 and evaluate the RHS unconditionally.
2623 For example, if we have p->a == 2 && p->b == 4 and we can make an
2624 object large enough to span both A and B, we can do this with a comparison
2625 against the object ANDed with the a mask.
2627 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2628 operations to do this with one comparison.
2630 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2631 function and the one above.
2633 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2634 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2636 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2639 We return the simplified tree or 0 if no optimization is possible. */
2642 fold_truthop (code, truth_type, lhs, rhs)
2643 enum tree_code code;
2644 tree truth_type, lhs, rhs;
2646 /* If this is the "or" of two comparisons, we can do something if we
2647 the comparisons are NE_EXPR. If this is the "and", we can do something
2648 if the comparisons are EQ_EXPR. I.e.,
2649 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2651 WANTED_CODE is this operation code. For single bit fields, we can
2652 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2653 comparison for one-bit fields. */
2655 enum tree_code wanted_code;
2656 enum tree_code lcode, rcode;
2657 tree ll_arg, lr_arg, rl_arg, rr_arg;
2658 tree ll_inner, lr_inner, rl_inner, rr_inner;
2659 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2660 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2661 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2662 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2663 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2664 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2665 enum machine_mode lnmode, rnmode;
2666 tree ll_mask, lr_mask, rl_mask, rr_mask;
2667 tree l_const, r_const;
2669 int first_bit, end_bit;
2672 /* Start by getting the comparison codes and seeing if this looks like
2673 a range test. Fail if anything is volatile. If one operand is a
2674 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2677 if (TREE_SIDE_EFFECTS (lhs)
2678 || TREE_SIDE_EFFECTS (rhs))
2681 lcode = TREE_CODE (lhs);
2682 rcode = TREE_CODE (rhs);
2684 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2685 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2687 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2688 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2690 if (TREE_CODE_CLASS (lcode) != '<'
2691 || TREE_CODE_CLASS (rcode) != '<')
2694 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2695 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2697 ll_arg = TREE_OPERAND (lhs, 0);
2698 lr_arg = TREE_OPERAND (lhs, 1);
2699 rl_arg = TREE_OPERAND (rhs, 0);
2700 rr_arg = TREE_OPERAND (rhs, 1);
2702 if (TREE_CODE (lr_arg) == INTEGER_CST
2703 && TREE_CODE (rr_arg) == INTEGER_CST
2704 && operand_equal_p (ll_arg, rl_arg, 0))
2706 if (tree_int_cst_lt (lr_arg, rr_arg))
2707 result = range_test (code, truth_type, lcode, rcode,
2708 ll_arg, lr_arg, rr_arg);
2710 result = range_test (code, truth_type, rcode, lcode,
2711 ll_arg, rr_arg, lr_arg);
2713 /* If this isn't a range test, it also isn't a comparison that
2714 can be merged. However, it wins to evaluate the RHS unconditionally
2715 on machines with expensive branches. */
2717 if (result == 0 && BRANCH_COST >= 2)
2719 if (TREE_CODE (ll_arg) != VAR_DECL
2720 && TREE_CODE (ll_arg) != PARM_DECL)
2722 /* Avoid evaluating the variable part twice. */
2723 ll_arg = save_expr (ll_arg);
2724 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2725 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2727 return build (code, truth_type, lhs, rhs);
2732 /* If the RHS can be evaluated unconditionally and its operands are
2733 simple, it wins to evaluate the RHS unconditionally on machines
2734 with expensive branches. In this case, this isn't a comparison
2735 that can be merged. */
2737 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2738 are with zero (tmw). */
2740 if (BRANCH_COST >= 2
2741 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2742 && simple_operand_p (rl_arg)
2743 && simple_operand_p (rr_arg))
2744 return build (code, truth_type, lhs, rhs);
2746 /* See if the comparisons can be merged. Then get all the parameters for
2749 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2750 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2754 ll_inner = decode_field_reference (ll_arg,
2755 &ll_bitsize, &ll_bitpos, &ll_mode,
2756 &ll_unsignedp, &volatilep, &ll_mask);
2757 lr_inner = decode_field_reference (lr_arg,
2758 &lr_bitsize, &lr_bitpos, &lr_mode,
2759 &lr_unsignedp, &volatilep, &lr_mask);
2760 rl_inner = decode_field_reference (rl_arg,
2761 &rl_bitsize, &rl_bitpos, &rl_mode,
2762 &rl_unsignedp, &volatilep, &rl_mask);
2763 rr_inner = decode_field_reference (rr_arg,
2764 &rr_bitsize, &rr_bitpos, &rr_mode,
2765 &rr_unsignedp, &volatilep, &rr_mask);
2767 /* It must be true that the inner operation on the lhs of each
2768 comparison must be the same if we are to be able to do anything.
2769 Then see if we have constants. If not, the same must be true for
2771 if (volatilep || ll_inner == 0 || rl_inner == 0
2772 || ! operand_equal_p (ll_inner, rl_inner, 0))
2775 if (TREE_CODE (lr_arg) == INTEGER_CST
2776 && TREE_CODE (rr_arg) == INTEGER_CST)
2777 l_const = lr_arg, r_const = rr_arg;
2778 else if (lr_inner == 0 || rr_inner == 0
2779 || ! operand_equal_p (lr_inner, rr_inner, 0))
2782 l_const = r_const = 0;
2784 /* If either comparison code is not correct for our logical operation,
2785 fail. However, we can convert a one-bit comparison against zero into
2786 the opposite comparison against that bit being set in the field. */
2788 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2789 if (lcode != wanted_code)
2791 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2797 if (rcode != wanted_code)
2799 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2805 /* See if we can find a mode that contains both fields being compared on
2806 the left. If we can't, fail. Otherwise, update all constants and masks
2807 to be relative to a field of that size. */
2808 first_bit = MIN (ll_bitpos, rl_bitpos);
2809 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2810 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2811 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2813 if (lnmode == VOIDmode)
2816 lnbitsize = GET_MODE_BITSIZE (lnmode);
2817 lnbitpos = first_bit & ~ (lnbitsize - 1);
2818 type = type_for_size (lnbitsize, 1);
2819 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2821 #if BYTES_BIG_ENDIAN
2822 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2823 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2826 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2827 size_int (xll_bitpos), 0);
2828 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2829 size_int (xrl_bitpos), 0);
2831 /* Make sure the constants are interpreted as unsigned, so we
2832 don't have sign bits outside the range of their type. */
2836 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2837 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2838 size_int (xll_bitpos), 0);
2842 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2843 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2844 size_int (xrl_bitpos), 0);
2847 /* If the right sides are not constant, do the same for it. Also,
2848 disallow this optimization if a size or signedness mismatch occurs
2849 between the left and right sides. */
2852 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2853 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2854 /* Make sure the two fields on the right
2855 correspond to the left without being swapped. */
2856 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2859 first_bit = MIN (lr_bitpos, rr_bitpos);
2860 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2861 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2862 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2864 if (rnmode == VOIDmode)
2867 rnbitsize = GET_MODE_BITSIZE (rnmode);
2868 rnbitpos = first_bit & ~ (rnbitsize - 1);
2869 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2871 #if BYTES_BIG_ENDIAN
2872 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2873 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2876 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2877 size_int (xlr_bitpos), 0);
2878 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2879 size_int (xrr_bitpos), 0);
2881 /* Make a mask that corresponds to both fields being compared.
2882 Do this for both items being compared. If the masks agree,
2883 we can do this by masking both and comparing the masked
2885 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2886 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2887 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2889 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2890 ll_unsignedp || rl_unsignedp);
2891 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2892 lr_unsignedp || rr_unsignedp);
2893 if (! all_ones_mask_p (ll_mask, lnbitsize))
2895 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2896 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2898 return build (wanted_code, truth_type, lhs, rhs);
2901 /* There is still another way we can do something: If both pairs of
2902 fields being compared are adjacent, we may be able to make a wider
2903 field containing them both. */
2904 if ((ll_bitsize + ll_bitpos == rl_bitpos
2905 && lr_bitsize + lr_bitpos == rr_bitpos)
2906 || (ll_bitpos == rl_bitpos + rl_bitsize
2907 && lr_bitpos == rr_bitpos + rr_bitsize))
2908 return build (wanted_code, truth_type,
2909 make_bit_field_ref (ll_inner, type,
2910 ll_bitsize + rl_bitsize,
2911 MIN (ll_bitpos, rl_bitpos),
2913 make_bit_field_ref (lr_inner, type,
2914 lr_bitsize + rr_bitsize,
2915 MIN (lr_bitpos, rr_bitpos),
2921 /* Handle the case of comparisons with constants. If there is something in
2922 common between the masks, those bits of the constants must be the same.
2923 If not, the condition is always false. Test for this to avoid generating
2924 incorrect code below. */
2925 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2926 if (! integer_zerop (result)
2927 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2928 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2930 if (wanted_code == NE_EXPR)
2932 warning ("`or' of unmatched not-equal tests is always 1");
2933 return convert (truth_type, integer_one_node);
2937 warning ("`and' of mutually exclusive equal-tests is always zero");
2938 return convert (truth_type, integer_zero_node);
2942 /* Construct the expression we will return. First get the component
2943 reference we will make. Unless the mask is all ones the width of
2944 that field, perform the mask operation. Then compare with the
2946 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2947 ll_unsignedp || rl_unsignedp);
2949 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2950 if (! all_ones_mask_p (ll_mask, lnbitsize))
2951 result = build (BIT_AND_EXPR, type, result, ll_mask);
2953 return build (wanted_code, truth_type, result,
2954 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
2957 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
2958 S, a SAVE_EXPR, return the expression actually being evaluated. Note
2959 that we may sometimes modify the tree. */
2962 strip_compound_expr (t, s)
2966 tree type = TREE_TYPE (t);
2967 enum tree_code code = TREE_CODE (t);
2969 /* See if this is the COMPOUND_EXPR we want to eliminate. */
2970 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
2971 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
2972 return TREE_OPERAND (t, 1);
2974 /* See if this is a COND_EXPR or a simple arithmetic operator. We
2975 don't bother handling any other types. */
2976 else if (code == COND_EXPR)
2978 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
2979 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
2980 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
2982 else if (TREE_CODE_CLASS (code) == '1')
2983 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
2984 else if (TREE_CODE_CLASS (code) == '<'
2985 || TREE_CODE_CLASS (code) == '2')
2987 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
2988 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
2994 /* Perform constant folding and related simplification of EXPR.
2995 The related simplifications include x*1 => x, x*0 => 0, etc.,
2996 and application of the associative law.
2997 NOP_EXPR conversions may be removed freely (as long as we
2998 are careful not to change the C type of the overall expression)
2999 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3000 but we can constant-fold them if they have constant operands. */
3006 register tree t = expr;
3007 tree t1 = NULL_TREE;
3009 tree type = TREE_TYPE (expr);
3010 register tree arg0, arg1;
3011 register enum tree_code code = TREE_CODE (t);
3015 /* WINS will be nonzero when the switch is done
3016 if all operands are constant. */
3020 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3021 if (code == RTL_EXPR)
3024 /* Return right away if already constant. */
3025 if (TREE_CONSTANT (t))
3027 if (code == CONST_DECL)
3028 return DECL_INITIAL (t);
3032 kind = TREE_CODE_CLASS (code);
3033 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3037 /* Special case for conversion ops that can have fixed point args. */
3038 arg0 = TREE_OPERAND (t, 0);
3040 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3042 STRIP_TYPE_NOPS (arg0);
3044 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3045 subop = TREE_REALPART (arg0);
3049 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3050 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3051 && TREE_CODE (subop) != REAL_CST
3052 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3054 /* Note that TREE_CONSTANT isn't enough:
3055 static var addresses are constant but we can't
3056 do arithmetic on them. */
3059 else if (kind == 'e' || kind == '<'
3060 || kind == '1' || kind == '2' || kind == 'r')
3062 register int len = tree_code_length[(int) code];
3064 for (i = 0; i < len; i++)
3066 tree op = TREE_OPERAND (t, i);
3070 continue; /* Valid for CALL_EXPR, at least. */
3072 if (kind == '<' || code == RSHIFT_EXPR)
3074 /* Signedness matters here. Perhaps we can refine this
3076 STRIP_TYPE_NOPS (op);
3080 /* Strip any conversions that don't change the mode. */
3084 if (TREE_CODE (op) == COMPLEX_CST)
3085 subop = TREE_REALPART (op);
3089 if (TREE_CODE (subop) != INTEGER_CST
3090 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3091 && TREE_CODE (subop) != REAL_CST
3092 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3094 /* Note that TREE_CONSTANT isn't enough:
3095 static var addresses are constant but we can't
3096 do arithmetic on them. */
3106 /* If this is a commutative operation, and ARG0 is a constant, move it
3107 to ARG1 to reduce the number of tests below. */
3108 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3109 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3110 || code == BIT_AND_EXPR)
3111 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3113 tem = arg0; arg0 = arg1; arg1 = tem;
3115 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3116 TREE_OPERAND (t, 1) = tem;
3119 /* Now WINS is set as described above,
3120 ARG0 is the first operand of EXPR,
3121 and ARG1 is the second operand (if it has more than one operand).
3123 First check for cases where an arithmetic operation is applied to a
3124 compound, conditional, or comparison operation. Push the arithmetic
3125 operation inside the compound or conditional to see if any folding
3126 can then be done. Convert comparison to conditional for this purpose.
3127 The also optimizes non-constant cases that used to be done in
3130 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3131 one of the operands is a comparison and the other is a comparison, a
3132 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3133 code below would make the expression more complex. Change it to a
3134 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3135 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3137 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3138 || code == EQ_EXPR || code == NE_EXPR)
3139 && ((truth_value_p (TREE_CODE (arg0))
3140 && (truth_value_p (TREE_CODE (arg1))
3141 || (TREE_CODE (arg1) == BIT_AND_EXPR
3142 && integer_onep (TREE_OPERAND (arg1, 1)))))
3143 || (truth_value_p (TREE_CODE (arg1))
3144 && (truth_value_p (TREE_CODE (arg0))
3145 || (TREE_CODE (arg0) == BIT_AND_EXPR
3146 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3148 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3149 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3153 if (code == EQ_EXPR)
3154 t = invert_truthvalue (t);
3159 if (TREE_CODE_CLASS (code) == '1')
3161 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3162 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3163 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3164 else if (TREE_CODE (arg0) == COND_EXPR)
3166 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3167 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3168 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3170 /* If this was a conversion, and all we did was to move into
3171 inside the COND_EXPR, bring it back out. Then return so we
3172 don't get into an infinite recursion loop taking the conversion
3173 out and then back in. */
3175 if ((code == NOP_EXPR || code == CONVERT_EXPR
3176 || code == NON_LVALUE_EXPR)
3177 && TREE_CODE (t) == COND_EXPR
3178 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3179 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3180 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3181 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
3182 t = build1 (code, type,
3184 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3185 TREE_OPERAND (t, 0),
3186 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3187 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3190 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3191 return fold (build (COND_EXPR, type, arg0,
3192 fold (build1 (code, type, integer_one_node)),
3193 fold (build1 (code, type, integer_zero_node))));
3195 else if (TREE_CODE_CLASS (code) == '2'
3196 || TREE_CODE_CLASS (code) == '<')
3198 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3199 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3200 fold (build (code, type,
3201 arg0, TREE_OPERAND (arg1, 1))));
3202 else if (TREE_CODE (arg1) == COND_EXPR
3203 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3205 tree test, true_value, false_value;
3207 if (TREE_CODE (arg1) == COND_EXPR)
3209 test = TREE_OPERAND (arg1, 0);
3210 true_value = TREE_OPERAND (arg1, 1);
3211 false_value = TREE_OPERAND (arg1, 2);
3216 true_value = integer_one_node;
3217 false_value = integer_zero_node;
3220 /* If ARG0 is complex we want to make sure we only evaluate
3221 it once. Though this is only required if it is volatile, it
3222 might be more efficient even if it is not. However, if we
3223 succeed in folding one part to a constant, we do not need
3224 to make this SAVE_EXPR. Since we do this optimization
3225 primarily to see if we do end up with constant and this
3226 SAVE_EXPR interfers with later optimizations, suppressing
3227 it when we can is important. */
3229 if (TREE_CODE (arg0) != SAVE_EXPR
3230 && ((TREE_CODE (arg0) != VAR_DECL
3231 && TREE_CODE (arg0) != PARM_DECL)
3232 || TREE_SIDE_EFFECTS (arg0)))
3234 tree lhs = fold (build (code, type, arg0, true_value));
3235 tree rhs = fold (build (code, type, arg0, false_value));
3237 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3238 return fold (build (COND_EXPR, type, test, lhs, rhs));
3240 arg0 = save_expr (arg0);
3243 test = fold (build (COND_EXPR, type, test,
3244 fold (build (code, type, arg0, true_value)),
3245 fold (build (code, type, arg0, false_value))));
3246 if (TREE_CODE (arg0) == SAVE_EXPR)
3247 return build (COMPOUND_EXPR, type,
3248 convert (void_type_node, arg0),
3249 strip_compound_expr (test, arg0));
3251 return convert (type, test);
3254 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3255 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3256 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3257 else if (TREE_CODE (arg0) == COND_EXPR
3258 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3260 tree test, true_value, false_value;
3262 if (TREE_CODE (arg0) == COND_EXPR)
3264 test = TREE_OPERAND (arg0, 0);
3265 true_value = TREE_OPERAND (arg0, 1);
3266 false_value = TREE_OPERAND (arg0, 2);
3271 true_value = integer_one_node;
3272 false_value = integer_zero_node;
3275 if (TREE_CODE (arg1) != SAVE_EXPR
3276 && ((TREE_CODE (arg1) != VAR_DECL
3277 && TREE_CODE (arg1) != PARM_DECL)
3278 || TREE_SIDE_EFFECTS (arg1)))
3280 tree lhs = fold (build (code, type, true_value, arg1));
3281 tree rhs = fold (build (code, type, false_value, arg1));
3283 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
3284 || TREE_CONSTANT (arg1))
3285 return fold (build (COND_EXPR, type, test, lhs, rhs));
3287 arg1 = save_expr (arg1);
3290 test = fold (build (COND_EXPR, type, test,
3291 fold (build (code, type, true_value, arg1)),
3292 fold (build (code, type, false_value, arg1))));
3293 if (TREE_CODE (arg1) == SAVE_EXPR)
3294 return build (COMPOUND_EXPR, type,
3295 convert (void_type_node, arg1),
3296 strip_compound_expr (test, arg1));
3298 return convert (type, test);
3301 else if (TREE_CODE_CLASS (code) == '<'
3302 && TREE_CODE (arg0) == COMPOUND_EXPR)
3303 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3304 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3305 else if (TREE_CODE_CLASS (code) == '<'
3306 && TREE_CODE (arg1) == COMPOUND_EXPR)
3307 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3308 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3320 return fold (DECL_INITIAL (t));
3325 case FIX_TRUNC_EXPR:
3326 /* Other kinds of FIX are not handled properly by fold_convert. */
3328 /* In addition to the cases of two conversions in a row
3329 handled below, if we are converting something to its own
3330 type via an object of identical or wider precision, neither
3331 conversion is needed. */
3332 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3333 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3334 && TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == TREE_TYPE (t)
3335 && ((INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3336 && INTEGRAL_TYPE_P (TREE_TYPE (t)))
3337 || (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3338 && FLOAT_TYPE_P (TREE_TYPE (t))))
3339 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3340 >= TYPE_PRECISION (TREE_TYPE (t))))
3341 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3343 /* Two conversions in a row are not needed unless:
3344 - the intermediate type is narrower than both initial and final, or
3345 - the intermediate type and innermost type differ in signedness,
3346 and the outermost type is wider than the intermediate, or
3347 - the initial type is a pointer type and the precisions of the
3348 intermediate and final types differ, or
3349 - the final type is a pointer type and the precisions of the
3350 initial and intermediate types differ. */
3351 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3352 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3353 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3354 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3356 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3357 > TYPE_PRECISION (TREE_TYPE (t)))
3358 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3360 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3362 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3363 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3364 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3365 < TYPE_PRECISION (TREE_TYPE (t))))
3366 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3367 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3368 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3370 (TREE_UNSIGNED (TREE_TYPE (t))
3371 && (TYPE_PRECISION (TREE_TYPE (t))
3372 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3373 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3375 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3376 != TYPE_PRECISION (TREE_TYPE (t))))
3377 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3378 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3379 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3380 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3382 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3383 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3384 /* Detect assigning a bitfield. */
3385 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3386 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3388 /* Don't leave an assignment inside a conversion
3389 unless assigning a bitfield. */
3390 tree prev = TREE_OPERAND (t, 0);
3391 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3392 /* First do the assignment, then return converted constant. */
3393 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3399 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3402 return fold_convert (t, arg0);
3404 #if 0 /* This loses on &"foo"[0]. */
3409 /* Fold an expression like: "foo"[2] */
3410 if (TREE_CODE (arg0) == STRING_CST
3411 && TREE_CODE (arg1) == INTEGER_CST
3412 && !TREE_INT_CST_HIGH (arg1)
3413 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3415 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3416 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3417 force_fit_type (t, 0);
3424 TREE_CONSTANT (t) = wins;
3430 if (TREE_CODE (arg0) == INTEGER_CST)
3432 HOST_WIDE_INT low, high;
3433 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3434 TREE_INT_CST_HIGH (arg0),
3436 t = build_int_2 (low, high);
3437 TREE_TYPE (t) = type;
3439 = (TREE_OVERFLOW (arg0)
3440 | force_fit_type (t, overflow));
3441 TREE_CONSTANT_OVERFLOW (t)
3442 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3444 else if (TREE_CODE (arg0) == REAL_CST)
3445 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3446 TREE_TYPE (t) = type;
3448 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3449 return TREE_OPERAND (arg0, 0);
3451 /* Convert - (a - b) to (b - a) for non-floating-point. */
3452 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3453 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3454 TREE_OPERAND (arg0, 0));
3461 if (TREE_CODE (arg0) == INTEGER_CST)
3463 if (! TREE_UNSIGNED (type)
3464 && TREE_INT_CST_HIGH (arg0) < 0)
3466 HOST_WIDE_INT low, high;
3467 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3468 TREE_INT_CST_HIGH (arg0),
3470 t = build_int_2 (low, high);
3471 TREE_TYPE (t) = type;
3473 = (TREE_OVERFLOW (arg0)
3474 | force_fit_type (t, overflow));
3475 TREE_CONSTANT_OVERFLOW (t)
3476 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3479 else if (TREE_CODE (arg0) == REAL_CST)
3481 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3482 t = build_real (type,
3483 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3485 TREE_TYPE (t) = type;
3487 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3488 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3492 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3494 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3495 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3496 TREE_OPERAND (arg0, 0),
3497 fold (build1 (NEGATE_EXPR,
3498 TREE_TYPE (TREE_TYPE (arg0)),
3499 TREE_OPERAND (arg0, 1))));
3500 else if (TREE_CODE (arg0) == COMPLEX_CST)
3501 return build_complex (TREE_OPERAND (arg0, 0),
3502 fold (build1 (NEGATE_EXPR,
3503 TREE_TYPE (TREE_TYPE (arg0)),
3504 TREE_OPERAND (arg0, 1))));
3505 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3506 return fold (build (TREE_CODE (arg0), type,
3507 fold (build1 (CONJ_EXPR, type,
3508 TREE_OPERAND (arg0, 0))),
3509 fold (build1 (CONJ_EXPR,
3510 type, TREE_OPERAND (arg0, 1)))));
3511 else if (TREE_CODE (arg0) == CONJ_EXPR)
3512 return TREE_OPERAND (arg0, 0);
3518 if (TREE_CODE (arg0) == INTEGER_CST)
3519 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3520 ~ TREE_INT_CST_HIGH (arg0));
3521 TREE_TYPE (t) = type;
3522 force_fit_type (t, 0);
3523 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3524 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3526 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3527 return TREE_OPERAND (arg0, 0);
3531 /* A + (-B) -> A - B */
3532 if (TREE_CODE (arg1) == NEGATE_EXPR)
3533 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3534 else if (! FLOAT_TYPE_P (type))
3536 if (integer_zerop (arg1))
3537 return non_lvalue (convert (type, arg0));
3539 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3540 with a constant, and the two constants have no bits in common,
3541 we should treat this as a BIT_IOR_EXPR since this may produce more
3543 if (TREE_CODE (arg0) == BIT_AND_EXPR
3544 && TREE_CODE (arg1) == BIT_AND_EXPR
3545 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3546 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3547 && integer_zerop (const_binop (BIT_AND_EXPR,
3548 TREE_OPERAND (arg0, 1),
3549 TREE_OPERAND (arg1, 1), 0)))
3551 code = BIT_IOR_EXPR;
3555 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3556 about the case where C is a constant, just try one of the
3557 four possibilities. */
3559 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3560 && operand_equal_p (TREE_OPERAND (arg0, 1),
3561 TREE_OPERAND (arg1, 1), 0))
3562 return fold (build (MULT_EXPR, type,
3563 fold (build (PLUS_EXPR, type,
3564 TREE_OPERAND (arg0, 0),
3565 TREE_OPERAND (arg1, 0))),
3566 TREE_OPERAND (arg0, 1)));
3568 /* In IEEE floating point, x+0 may not equal x. */
3569 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3571 && real_zerop (arg1))
3572 return non_lvalue (convert (type, arg0));
3574 /* In most languages, can't associate operations on floats
3575 through parentheses. Rather than remember where the parentheses
3576 were, we don't associate floats at all. It shouldn't matter much.
3577 However, associating multiplications is only very slightly
3578 inaccurate, so do that if -ffast-math is specified. */
3579 if (FLOAT_TYPE_P (type)
3580 && ! (flag_fast_math && code == MULT_EXPR))
3583 /* The varsign == -1 cases happen only for addition and subtraction.
3584 It says that the arg that was split was really CON minus VAR.
3585 The rest of the code applies to all associative operations. */
3591 if (split_tree (arg0, code, &var, &con, &varsign))
3595 /* EXPR is (CON-VAR) +- ARG1. */
3596 /* If it is + and VAR==ARG1, return just CONST. */
3597 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3598 return convert (TREE_TYPE (t), con);
3600 /* If ARG0 is a constant, don't change things around;
3601 instead keep all the constant computations together. */
3603 if (TREE_CONSTANT (arg0))
3606 /* Otherwise return (CON +- ARG1) - VAR. */
3607 TREE_SET_CODE (t, MINUS_EXPR);
3608 TREE_OPERAND (t, 1) = var;
3610 = fold (build (code, TREE_TYPE (t), con, arg1));
3614 /* EXPR is (VAR+CON) +- ARG1. */
3615 /* If it is - and VAR==ARG1, return just CONST. */
3616 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3617 return convert (TREE_TYPE (t), con);
3619 /* If ARG0 is a constant, don't change things around;
3620 instead keep all the constant computations together. */
3622 if (TREE_CONSTANT (arg0))
3625 /* Otherwise return VAR +- (ARG1 +- CON). */
3626 TREE_OPERAND (t, 1) = tem
3627 = fold (build (code, TREE_TYPE (t), arg1, con));
3628 TREE_OPERAND (t, 0) = var;
3629 if (integer_zerop (tem)
3630 && (code == PLUS_EXPR || code == MINUS_EXPR))
3631 return convert (type, var);
3632 /* If we have x +/- (c - d) [c an explicit integer]
3633 change it to x -/+ (d - c) since if d is relocatable
3634 then the latter can be a single immediate insn
3635 and the former cannot. */
3636 if (TREE_CODE (tem) == MINUS_EXPR
3637 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3639 tree tem1 = TREE_OPERAND (tem, 1);
3640 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3641 TREE_OPERAND (tem, 0) = tem1;
3643 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3649 if (split_tree (arg1, code, &var, &con, &varsign))
3651 if (TREE_CONSTANT (arg1))
3656 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3658 /* EXPR is ARG0 +- (CON +- VAR). */
3659 if (TREE_CODE (t) == MINUS_EXPR
3660 && operand_equal_p (var, arg0, 0))
3662 /* If VAR and ARG0 cancel, return just CON or -CON. */
3663 if (code == PLUS_EXPR)
3664 return convert (TREE_TYPE (t), con);
3665 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3666 convert (TREE_TYPE (t), con)));
3670 = fold (build (code, TREE_TYPE (t), arg0, con));
3671 TREE_OPERAND (t, 1) = var;
3672 if (integer_zerop (TREE_OPERAND (t, 0))
3673 && TREE_CODE (t) == PLUS_EXPR)
3674 return convert (TREE_TYPE (t), var);
3679 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3680 if (TREE_CODE (arg1) == REAL_CST)
3682 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3684 t1 = const_binop (code, arg0, arg1, 0);
3685 if (t1 != NULL_TREE)
3687 /* The return value should always have
3688 the same type as the original expression. */
3689 TREE_TYPE (t1) = TREE_TYPE (t);
3695 if (! FLOAT_TYPE_P (type))
3697 if (! wins && integer_zerop (arg0))
3698 return build1 (NEGATE_EXPR, type, arg1);
3699 if (integer_zerop (arg1))
3700 return non_lvalue (convert (type, arg0));
3702 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3703 about the case where C is a constant, just try one of the
3704 four possibilities. */
3706 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3707 && operand_equal_p (TREE_OPERAND (arg0, 1),
3708 TREE_OPERAND (arg1, 1), 0))
3709 return fold (build (MULT_EXPR, type,
3710 fold (build (MINUS_EXPR, type,
3711 TREE_OPERAND (arg0, 0),
3712 TREE_OPERAND (arg1, 0))),
3713 TREE_OPERAND (arg0, 1)));
3715 /* Convert A - (-B) to A + B. */
3716 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3717 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3719 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3722 /* Except with IEEE floating point, 0-x equals -x. */
3723 if (! wins && real_zerop (arg0))
3724 return build1 (NEGATE_EXPR, type, arg1);
3725 /* Except with IEEE floating point, x-0 equals x. */
3726 if (real_zerop (arg1))
3727 return non_lvalue (convert (type, arg0));
3730 /* Fold &x - &x. This can happen from &x.foo - &x.
3731 This is unsafe for certain floats even in non-IEEE formats.
3732 In IEEE, it is unsafe because it does wrong for NaNs.
3733 Also note that operand_equal_p is always false if an operand
3736 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
3737 && operand_equal_p (arg0, arg1, 0))
3738 return convert (type, integer_zero_node);
3743 if (! FLOAT_TYPE_P (type))
3745 if (integer_zerop (arg1))
3746 return omit_one_operand (type, arg1, arg0);
3747 if (integer_onep (arg1))
3748 return non_lvalue (convert (type, arg0));
3750 /* ((A / C) * C) is A if the division is an
3751 EXACT_DIV_EXPR. Since C is normally a constant,
3752 just check for one of the four possibilities. */
3754 if (TREE_CODE (arg0) == EXACT_DIV_EXPR
3755 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3756 return TREE_OPERAND (arg0, 0);
3758 /* (a * (1 << b)) is (a << b) */
3759 if (TREE_CODE (arg1) == LSHIFT_EXPR
3760 && integer_onep (TREE_OPERAND (arg1, 0)))
3761 return fold (build (LSHIFT_EXPR, type, arg0,
3762 TREE_OPERAND (arg1, 1)));
3763 if (TREE_CODE (arg0) == LSHIFT_EXPR
3764 && integer_onep (TREE_OPERAND (arg0, 0)))
3765 return fold (build (LSHIFT_EXPR, type, arg1,
3766 TREE_OPERAND (arg0, 1)));
3770 /* x*0 is 0, except for IEEE floating point. */
3771 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3773 && real_zerop (arg1))
3774 return omit_one_operand (type, arg1, arg0);
3775 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3776 However, ANSI says we can drop signals,
3777 so we can do this anyway. */
3778 if (real_onep (arg1))
3779 return non_lvalue (convert (type, arg0));
3781 if (! wins && real_twop (arg1))
3783 tree arg = save_expr (arg0);
3784 return build (PLUS_EXPR, type, arg, arg);
3791 if (integer_all_onesp (arg1))
3792 return omit_one_operand (type, arg1, arg0);
3793 if (integer_zerop (arg1))
3794 return non_lvalue (convert (type, arg0));
3795 t1 = distribute_bit_expr (code, type, arg0, arg1);
3796 if (t1 != NULL_TREE)
3799 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3800 is a rotate of A by C1 bits. */
3802 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3803 || TREE_CODE (arg0) == LSHIFT_EXPR)
3804 && (TREE_CODE (arg1) == RSHIFT_EXPR
3805 || TREE_CODE (arg1) == LSHIFT_EXPR)
3806 && TREE_CODE (arg0) != TREE_CODE (arg1)
3807 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3808 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3809 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3810 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3811 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3812 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3813 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3814 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3815 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3816 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3817 TREE_CODE (arg0) == LSHIFT_EXPR
3818 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3823 if (integer_zerop (arg1))
3824 return non_lvalue (convert (type, arg0));
3825 if (integer_all_onesp (arg1))
3826 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3831 if (integer_all_onesp (arg1))
3832 return non_lvalue (convert (type, arg0));
3833 if (integer_zerop (arg1))
3834 return omit_one_operand (type, arg1, arg0);
3835 t1 = distribute_bit_expr (code, type, arg0, arg1);
3836 if (t1 != NULL_TREE)
3838 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3839 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3840 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3842 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3843 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3844 && (~TREE_INT_CST_LOW (arg0)
3845 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3846 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3848 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3849 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3851 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3852 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3853 && (~TREE_INT_CST_LOW (arg1)
3854 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3855 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3859 case BIT_ANDTC_EXPR:
3860 if (integer_all_onesp (arg0))
3861 return non_lvalue (convert (type, arg1));
3862 if (integer_zerop (arg0))
3863 return omit_one_operand (type, arg0, arg1);
3864 if (TREE_CODE (arg1) == INTEGER_CST)
3866 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3867 code = BIT_AND_EXPR;
3873 /* In most cases, do nothing with a divide by zero. */
3874 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3875 #ifndef REAL_INFINITY
3876 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
3879 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3881 /* In IEEE floating point, x/1 is not equivalent to x for snans.
3882 However, ANSI says we can drop signals, so we can do this anyway. */
3883 if (real_onep (arg1))
3884 return non_lvalue (convert (type, arg0));
3886 /* If ARG1 is a constant, we can convert this to a multiply by the
3887 reciprocal. This does not have the same rounding properties,
3888 so only do this if -ffast-math. We can actually always safely
3889 do it if ARG1 is a power of two, but it's hard to tell if it is
3890 or not in a portable manner. */
3891 if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
3892 && 0 != (tem = const_binop (code, build_real (type, dconst1),
3894 return fold (build (MULT_EXPR, type, arg0, tem));
3898 case TRUNC_DIV_EXPR:
3899 case ROUND_DIV_EXPR:
3900 case FLOOR_DIV_EXPR:
3902 case EXACT_DIV_EXPR:
3903 if (integer_onep (arg1))
3904 return non_lvalue (convert (type, arg0));
3905 if (integer_zerop (arg1))
3908 /* If we have ((a / C1) / C2) where both division are the same type, try
3909 to simplify. First see if C1 * C2 overflows or not. */
3910 if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
3911 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
3915 new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
3916 tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
3918 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
3919 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
3921 /* If no overflow, divide by C1*C2. */
3922 return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
3926 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
3927 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
3928 expressions, which often appear in the offsets or sizes of
3929 objects with a varying size. Only deal with positive divisors
3930 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
3932 Look for NOPs and SAVE_EXPRs inside. */
3934 if (TREE_CODE (arg1) == INTEGER_CST
3935 && tree_int_cst_sgn (arg1) >= 0)
3937 int have_save_expr = 0;
3938 tree c2 = integer_zero_node;
3941 if (TREE_CODE (xarg0) == SAVE_EXPR)
3942 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3946 if (TREE_CODE (xarg0) == PLUS_EXPR
3947 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3948 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
3949 else if (TREE_CODE (xarg0) == MINUS_EXPR
3950 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3951 /* If we are doing this computation unsigned, the negate
3953 && ! TREE_UNSIGNED (type))
3955 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
3956 xarg0 = TREE_OPERAND (xarg0, 0);
3959 if (TREE_CODE (xarg0) == SAVE_EXPR)
3960 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3964 if (TREE_CODE (xarg0) == MULT_EXPR
3965 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3966 && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
3967 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
3968 TREE_OPERAND (xarg0, 1), arg1, 1))
3969 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
3970 TREE_OPERAND (xarg0, 1), 1)))
3971 && (tree_int_cst_sgn (c2) >= 0
3972 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
3975 tree outer_div = integer_one_node;
3976 tree c1 = TREE_OPERAND (xarg0, 1);
3979 /* If C3 > C1, set them equal and do a divide by
3980 C3/C1 at the end of the operation. */
3981 if (tree_int_cst_lt (c1, c3))
3982 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
3984 /* The result is A * (C1/C3) + (C2/C3). */
3985 t = fold (build (PLUS_EXPR, type,
3986 fold (build (MULT_EXPR, type,
3987 TREE_OPERAND (xarg0, 0),
3988 const_binop (code, c1, c3, 1))),
3989 const_binop (code, c2, c3, 1)));
3991 if (! integer_onep (outer_div))
3992 t = fold (build (code, type, t, convert (type, outer_div)));
4004 case FLOOR_MOD_EXPR:
4005 case ROUND_MOD_EXPR:
4006 case TRUNC_MOD_EXPR:
4007 if (integer_onep (arg1))
4008 return omit_one_operand (type, integer_zero_node, arg0);
4009 if (integer_zerop (arg1))
4012 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4013 where C1 % C3 == 0. Handle similarly to the division case,
4014 but don't bother with SAVE_EXPRs. */
4016 if (TREE_CODE (arg1) == INTEGER_CST
4017 && ! integer_zerop (arg1))
4019 tree c2 = integer_zero_node;
4022 if (TREE_CODE (xarg0) == PLUS_EXPR
4023 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4024 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4025 else if (TREE_CODE (xarg0) == MINUS_EXPR
4026 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4027 && ! TREE_UNSIGNED (type))
4029 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4030 xarg0 = TREE_OPERAND (xarg0, 0);
4035 if (TREE_CODE (xarg0) == MULT_EXPR
4036 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4037 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4038 TREE_OPERAND (xarg0, 1),
4040 && tree_int_cst_sgn (c2) >= 0)
4041 /* The result is (C2%C3). */
4042 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4043 TREE_OPERAND (xarg0, 0));
4052 if (integer_zerop (arg1))
4053 return non_lvalue (convert (type, arg0));
4054 /* Since negative shift count is not well-defined,
4055 don't try to compute it in the compiler. */
4056 if (tree_int_cst_sgn (arg1) < 0)
4061 if (operand_equal_p (arg0, arg1, 0))
4063 if (INTEGRAL_TYPE_P (type)
4064 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4065 return omit_one_operand (type, arg1, arg0);
4069 if (operand_equal_p (arg0, arg1, 0))
4071 if (INTEGRAL_TYPE_P (type)
4072 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4073 return omit_one_operand (type, arg1, arg0);
4076 case TRUTH_NOT_EXPR:
4077 /* Note that the operand of this must be an int
4078 and its values must be 0 or 1.
4079 ("true" is a fixed value perhaps depending on the language,
4080 but we don't handle values other than 1 correctly yet.) */
4081 return invert_truthvalue (arg0);
4083 case TRUTH_ANDIF_EXPR:
4084 /* Note that the operands of this must be ints
4085 and their values must be 0 or 1.
4086 ("true" is a fixed value perhaps depending on the language.) */
4087 /* If first arg is constant zero, return it. */
4088 if (integer_zerop (arg0))
4090 case TRUTH_AND_EXPR:
4091 /* If either arg is constant true, drop it. */
4092 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4093 return non_lvalue (arg1);
4094 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4095 return non_lvalue (arg0);
4096 /* If second arg is constant zero, result is zero, but first arg
4097 must be evaluated. */
4098 if (integer_zerop (arg1))
4099 return omit_one_operand (type, arg1, arg0);
4102 /* We only do these simplifications if we are optimizing. */
4106 /* Check for things like (A || B) && (A || C). We can convert this
4107 to A || (B && C). Note that either operator can be any of the four
4108 truth and/or operations and the transformation will still be
4109 valid. Also note that we only care about order for the
4110 ANDIF and ORIF operators. */
4111 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4112 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4113 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4114 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4115 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4117 tree a00 = TREE_OPERAND (arg0, 0);
4118 tree a01 = TREE_OPERAND (arg0, 1);
4119 tree a10 = TREE_OPERAND (arg1, 0);
4120 tree a11 = TREE_OPERAND (arg1, 1);
4121 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
4122 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
4123 && (code == TRUTH_AND_EXPR
4124 || code == TRUTH_OR_EXPR));
4126 if (operand_equal_p (a00, a10, 0))
4127 return fold (build (TREE_CODE (arg0), type, a00,
4128 fold (build (code, type, a01, a11))));
4129 else if (commutative && operand_equal_p (a00, a11, 0))
4130 return fold (build (TREE_CODE (arg0), type, a00,
4131 fold (build (code, type, a01, a10))));
4132 else if (commutative && operand_equal_p (a01, a10, 0))
4133 return fold (build (TREE_CODE (arg0), type, a01,
4134 fold (build (code, type, a00, a11))));
4136 /* This case if tricky because we must either have commutative
4137 operators or else A10 must not have side-effects. */
4139 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
4140 && operand_equal_p (a01, a11, 0))
4141 return fold (build (TREE_CODE (arg0), type,
4142 fold (build (code, type, a00, a10)),
4146 /* Check for the possibility of merging component references. If our
4147 lhs is another similar operation, try to merge its rhs with our
4148 rhs. Then try to merge our lhs and rhs. */
4149 if (TREE_CODE (arg0) == code
4150 && 0 != (tem = fold_truthop (code, type,
4151 TREE_OPERAND (arg0, 1), arg1)))
4152 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4154 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
4159 case TRUTH_ORIF_EXPR:
4160 /* Note that the operands of this must be ints
4161 and their values must be 0 or true.
4162 ("true" is a fixed value perhaps depending on the language.) */
4163 /* If first arg is constant true, return it. */
4164 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4167 /* If either arg is constant zero, drop it. */
4168 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4169 return non_lvalue (arg1);
4170 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4171 return non_lvalue (arg0);
4172 /* If second arg is constant true, result is true, but we must
4173 evaluate first arg. */
4174 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4175 return omit_one_operand (type, arg1, arg0);
4178 case TRUTH_XOR_EXPR:
4179 /* If either arg is constant zero, drop it. */
4180 if (integer_zerop (arg0))
4181 return non_lvalue (arg1);
4182 if (integer_zerop (arg1))
4183 return non_lvalue (arg0);
4184 /* If either arg is constant true, this is a logical inversion. */
4185 if (integer_onep (arg0))
4186 return non_lvalue (invert_truthvalue (arg1));
4187 if (integer_onep (arg1))
4188 return non_lvalue (invert_truthvalue (arg0));
4197 /* If one arg is a constant integer, put it last. */
4198 if (TREE_CODE (arg0) == INTEGER_CST
4199 && TREE_CODE (arg1) != INTEGER_CST)
4201 TREE_OPERAND (t, 0) = arg1;
4202 TREE_OPERAND (t, 1) = arg0;
4203 arg0 = TREE_OPERAND (t, 0);
4204 arg1 = TREE_OPERAND (t, 1);
4205 code = swap_tree_comparison (code);
4206 TREE_SET_CODE (t, code);
4209 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4210 First, see if one arg is constant; find the constant arg
4211 and the other one. */
4213 tree constop = 0, varop;
4216 if (TREE_CONSTANT (arg1))
4217 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
4218 if (TREE_CONSTANT (arg0))
4219 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
4221 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4223 /* This optimization is invalid for ordered comparisons
4224 if CONST+INCR overflows or if foo+incr might overflow.
4225 This optimization is invalid for floating point due to rounding.
4226 For pointer types we assume overflow doesn't happen. */
4227 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4228 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4229 && (code == EQ_EXPR || code == NE_EXPR)))
4232 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4233 constop, TREE_OPERAND (varop, 1)));
4234 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4235 *constoploc = newconst;
4239 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4241 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4242 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4243 && (code == EQ_EXPR || code == NE_EXPR)))
4246 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4247 constop, TREE_OPERAND (varop, 1)));
4248 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4249 *constoploc = newconst;
4255 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4256 if (TREE_CODE (arg1) == INTEGER_CST
4257 && TREE_CODE (arg0) != INTEGER_CST
4258 && tree_int_cst_sgn (arg1) > 0)
4260 switch (TREE_CODE (t))
4264 TREE_SET_CODE (t, code);
4265 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4266 TREE_OPERAND (t, 1) = arg1;
4271 TREE_SET_CODE (t, code);
4272 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4273 TREE_OPERAND (t, 1) = arg1;
4277 /* If this is an EQ or NE comparison with zero and ARG0 is
4278 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4279 two operations, but the latter can be done in one less insn
4280 one machine that have only two-operand insns or on which a
4281 constant cannot be the first operand. */
4282 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4283 && TREE_CODE (arg0) == BIT_AND_EXPR)
4285 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4286 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4288 fold (build (code, type,
4289 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4291 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4292 TREE_OPERAND (arg0, 1),
4293 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4294 convert (TREE_TYPE (arg0),
4297 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4298 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4300 fold (build (code, type,
4301 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4303 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4304 TREE_OPERAND (arg0, 0),
4305 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4306 convert (TREE_TYPE (arg0),
4311 /* If this is an NE or EQ comparison of zero against the result of a
4312 signed MOD operation whose second operand is a power of 2, make
4313 the MOD operation unsigned since it is simpler and equivalent. */
4314 if ((code == NE_EXPR || code == EQ_EXPR)
4315 && integer_zerop (arg1)
4316 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4317 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4318 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4319 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4320 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4321 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4323 tree newtype = unsigned_type (TREE_TYPE (arg0));
4324 tree newmod = build (TREE_CODE (arg0), newtype,
4325 convert (newtype, TREE_OPERAND (arg0, 0)),
4326 convert (newtype, TREE_OPERAND (arg0, 1)));
4328 return build (code, type, newmod, convert (newtype, arg1));
4331 /* If this is an NE comparison of zero with an AND of one, remove the
4332 comparison since the AND will give the correct value. */
4333 if (code == NE_EXPR && integer_zerop (arg1)
4334 && TREE_CODE (arg0) == BIT_AND_EXPR
4335 && integer_onep (TREE_OPERAND (arg0, 1)))
4336 return convert (type, arg0);
4338 /* If we have (A & C) == C where C is a power of 2, convert this into
4339 (A & C) != 0. Similarly for NE_EXPR. */
4340 if ((code == EQ_EXPR || code == NE_EXPR)
4341 && TREE_CODE (arg0) == BIT_AND_EXPR
4342 && integer_pow2p (TREE_OPERAND (arg0, 1))
4343 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4344 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4345 arg0, integer_zero_node);
4347 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
4348 and similarly for >= into !=. */
4349 if ((code == LT_EXPR || code == GE_EXPR)
4350 && TREE_UNSIGNED (TREE_TYPE (arg0))
4351 && TREE_CODE (arg1) == LSHIFT_EXPR
4352 && integer_onep (TREE_OPERAND (arg1, 0)))
4353 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4354 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4355 TREE_OPERAND (arg1, 1)),
4356 convert (TREE_TYPE (arg0), integer_zero_node));
4358 else if ((code == LT_EXPR || code == GE_EXPR)
4359 && TREE_UNSIGNED (TREE_TYPE (arg0))
4360 && (TREE_CODE (arg1) == NOP_EXPR
4361 || TREE_CODE (arg1) == CONVERT_EXPR)
4362 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
4363 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
4365 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4366 convert (TREE_TYPE (arg0),
4367 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4368 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
4369 convert (TREE_TYPE (arg0), integer_zero_node));
4371 /* Simplify comparison of something with itself. (For IEEE
4372 floating-point, we can only do some of these simplifications.) */
4373 if (operand_equal_p (arg0, arg1, 0))
4380 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4382 t = build_int_2 (1, 0);
4383 TREE_TYPE (t) = type;
4387 TREE_SET_CODE (t, code);
4391 /* For NE, we can only do this simplification if integer. */
4392 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4394 /* ... fall through ... */
4397 t = build_int_2 (0, 0);
4398 TREE_TYPE (t) = type;
4403 /* An unsigned comparison against 0 can be simplified. */
4404 if (integer_zerop (arg1)
4405 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4406 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4407 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4409 switch (TREE_CODE (t))
4413 TREE_SET_CODE (t, NE_EXPR);
4417 TREE_SET_CODE (t, EQ_EXPR);
4420 return omit_one_operand (type,
4421 convert (type, integer_one_node),
4424 return omit_one_operand (type,
4425 convert (type, integer_zero_node),
4430 /* If we are comparing an expression that just has comparisons
4431 of two integer values, arithmetic expressions of those comparisons,
4432 and constants, we can simplify it. There are only three cases
4433 to check: the two values can either be equal, the first can be
4434 greater, or the second can be greater. Fold the expression for
4435 those three values. Since each value must be 0 or 1, we have
4436 eight possibilities, each of which corresponds to the constant 0
4437 or 1 or one of the six possible comparisons.
4439 This handles common cases like (a > b) == 0 but also handles
4440 expressions like ((x > y) - (y > x)) > 0, which supposedly
4441 occur in macroized code. */
4443 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4445 tree cval1 = 0, cval2 = 0;
4448 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4449 /* Don't handle degenerate cases here; they should already
4450 have been handled anyway. */
4451 && cval1 != 0 && cval2 != 0
4452 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4453 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4454 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4455 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4456 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4458 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4459 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4461 /* We can't just pass T to eval_subst in case cval1 or cval2
4462 was the same as ARG1. */
4465 = fold (build (code, type,
4466 eval_subst (arg0, cval1, maxval, cval2, minval),
4469 = fold (build (code, type,
4470 eval_subst (arg0, cval1, maxval, cval2, maxval),
4473 = fold (build (code, type,
4474 eval_subst (arg0, cval1, minval, cval2, maxval),
4477 /* All three of these results should be 0 or 1. Confirm they
4478 are. Then use those values to select the proper code
4481 if ((integer_zerop (high_result)
4482 || integer_onep (high_result))
4483 && (integer_zerop (equal_result)
4484 || integer_onep (equal_result))
4485 && (integer_zerop (low_result)
4486 || integer_onep (low_result)))
4488 /* Make a 3-bit mask with the high-order bit being the
4489 value for `>', the next for '=', and the low for '<'. */
4490 switch ((integer_onep (high_result) * 4)
4491 + (integer_onep (equal_result) * 2)
4492 + integer_onep (low_result))
4496 return omit_one_operand (type, integer_zero_node, arg0);
4517 return omit_one_operand (type, integer_one_node, arg0);
4520 t = build (code, type, cval1, cval2);
4522 return save_expr (t);
4529 /* If this is a comparison of a field, we may be able to simplify it. */
4530 if ((TREE_CODE (arg0) == COMPONENT_REF
4531 || TREE_CODE (arg0) == BIT_FIELD_REF)
4532 && (code == EQ_EXPR || code == NE_EXPR)
4533 /* Handle the constant case even without -O
4534 to make sure the warnings are given. */
4535 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4537 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4541 /* If this is a comparison of complex values and either or both
4542 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4543 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4544 may prevent needless evaluations. */
4545 if ((code == EQ_EXPR || code == NE_EXPR)
4546 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4547 && (TREE_CODE (arg0) == COMPLEX_EXPR
4548 || TREE_CODE (arg1) == COMPLEX_EXPR))
4550 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4551 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4552 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4553 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4554 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4556 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4559 fold (build (code, type, real0, real1)),
4560 fold (build (code, type, imag0, imag1))));
4563 /* From here on, the only cases we handle are when the result is
4564 known to be a constant.
4566 To compute GT, swap the arguments and do LT.
4567 To compute GE, do LT and invert the result.
4568 To compute LE, swap the arguments, do LT and invert the result.
4569 To compute NE, do EQ and invert the result.
4571 Therefore, the code below must handle only EQ and LT. */
4573 if (code == LE_EXPR || code == GT_EXPR)
4575 tem = arg0, arg0 = arg1, arg1 = tem;
4576 code = swap_tree_comparison (code);
4579 /* Note that it is safe to invert for real values here because we
4580 will check below in the one case that it matters. */
4583 if (code == NE_EXPR || code == GE_EXPR)
4586 code = invert_tree_comparison (code);
4589 /* Compute a result for LT or EQ if args permit;
4590 otherwise return T. */
4591 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4593 if (code == EQ_EXPR)
4594 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4595 == TREE_INT_CST_LOW (arg1))
4596 && (TREE_INT_CST_HIGH (arg0)
4597 == TREE_INT_CST_HIGH (arg1)),
4600 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4601 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4602 : INT_CST_LT (arg0, arg1)),
4606 /* Assume a nonexplicit constant cannot equal an explicit one,
4607 since such code would be undefined anyway.
4608 Exception: on sysvr4, using #pragma weak,
4609 a label can come out as 0. */
4610 else if (TREE_CODE (arg1) == INTEGER_CST
4611 && !integer_zerop (arg1)
4612 && TREE_CONSTANT (arg0)
4613 && TREE_CODE (arg0) == ADDR_EXPR
4615 t1 = build_int_2 (0, 0);
4617 /* Two real constants can be compared explicitly. */
4618 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4620 /* If either operand is a NaN, the result is false with two
4621 exceptions: First, an NE_EXPR is true on NaNs, but that case
4622 is already handled correctly since we will be inverting the
4623 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4624 or a GE_EXPR into a LT_EXPR, we must return true so that it
4625 will be inverted into false. */
4627 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4628 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4629 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4631 else if (code == EQ_EXPR)
4632 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4633 TREE_REAL_CST (arg1)),
4636 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4637 TREE_REAL_CST (arg1)),
4641 if (t1 == NULL_TREE)
4645 TREE_INT_CST_LOW (t1) ^= 1;
4647 TREE_TYPE (t1) = type;
4651 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4652 so all simple results must be passed through pedantic_non_lvalue. */
4653 if (TREE_CODE (arg0) == INTEGER_CST)
4654 return pedantic_non_lvalue
4655 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4656 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4657 return pedantic_non_lvalue (omit_one_operand (type, arg1, arg0));
4659 /* If the second operand is zero, invert the comparison and swap
4660 the second and third operands. Likewise if the second operand
4661 is constant and the third is not or if the third operand is
4662 equivalent to the first operand of the comparison. */
4664 if (integer_zerop (arg1)
4665 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4666 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4667 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4668 TREE_OPERAND (t, 2),
4669 TREE_OPERAND (arg0, 1))))
4671 /* See if this can be inverted. If it can't, possibly because
4672 it was a floating-point inequality comparison, don't do
4674 tem = invert_truthvalue (arg0);
4676 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4678 arg0 = TREE_OPERAND (t, 0) = tem;
4679 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4680 TREE_OPERAND (t, 2) = arg1;
4681 arg1 = TREE_OPERAND (t, 1);
4685 /* If we have A op B ? A : C, we may be able to convert this to a
4686 simpler expression, depending on the operation and the values
4687 of B and C. IEEE floating point prevents this though,
4688 because A or B might be -0.0 or a NaN. */
4690 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4691 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4692 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4694 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4695 arg1, TREE_OPERAND (arg0, 1)))
4697 tree arg2 = TREE_OPERAND (t, 2);
4698 enum tree_code comp_code = TREE_CODE (arg0);
4700 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4701 depending on the comparison operation. */
4702 if (integer_zerop (TREE_OPERAND (arg0, 1))
4703 && TREE_CODE (arg2) == NEGATE_EXPR
4704 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4708 return pedantic_non_lvalue
4709 (fold (build1 (NEGATE_EXPR, type, arg1)));
4711 return pedantic_non_lvalue (convert (type, arg1));
4714 return pedantic_non_lvalue
4715 (fold (build1 (ABS_EXPR, type, arg1)));
4718 return pedantic_non_lvalue
4719 (fold (build1 (NEGATE_EXPR, type,
4720 fold (build1 (ABS_EXPR, type, arg1)))));
4723 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4726 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4728 if (comp_code == NE_EXPR)
4729 return pedantic_non_lvalue (convert (type, arg1));
4730 else if (comp_code == EQ_EXPR)
4731 return pedantic_non_lvalue (convert (type, integer_zero_node));
4734 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4735 or max (A, B), depending on the operation. */
4737 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4738 arg2, TREE_OPERAND (arg0, 0)))
4742 return pedantic_non_lvalue (convert (type, arg2));
4744 return pedantic_non_lvalue (convert (type, arg1));
4747 return pedantic_non_lvalue
4748 (fold (build (MIN_EXPR, type, arg1, arg2)));
4751 return pedantic_non_lvalue
4752 (fold (build (MAX_EXPR, type, arg1, arg2)));
4755 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4756 we might still be able to simplify this. For example,
4757 if C1 is one less or one more than C2, this might have started
4758 out as a MIN or MAX and been transformed by this function.
4759 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4761 if (INTEGRAL_TYPE_P (type)
4762 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4763 && TREE_CODE (arg2) == INTEGER_CST)
4767 /* We can replace A with C1 in this case. */
4768 arg1 = TREE_OPERAND (t, 1)
4769 = convert (type, TREE_OPERAND (arg0, 1));
4773 /* If C1 is C2 + 1, this is min(A, C2). */
4774 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4775 && operand_equal_p (TREE_OPERAND (arg0, 1),
4776 const_binop (PLUS_EXPR, arg2,
4777 integer_one_node, 0), 1))
4778 return pedantic_non_lvalue
4779 (fold (build (MIN_EXPR, type, arg1, arg2)));
4783 /* If C1 is C2 - 1, this is min(A, C2). */
4784 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4785 && operand_equal_p (TREE_OPERAND (arg0, 1),
4786 const_binop (MINUS_EXPR, arg2,
4787 integer_one_node, 0), 1))
4788 return pedantic_non_lvalue
4789 (fold (build (MIN_EXPR, type, arg1, arg2)));
4793 /* If C1 is C2 - 1, this is max(A, C2). */
4794 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4795 && operand_equal_p (TREE_OPERAND (arg0, 1),
4796 const_binop (MINUS_EXPR, arg2,
4797 integer_one_node, 0), 1))
4798 return pedantic_non_lvalue
4799 (fold (build (MAX_EXPR, type, arg1, arg2)));
4803 /* If C1 is C2 + 1, this is max(A, C2). */
4804 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4805 && operand_equal_p (TREE_OPERAND (arg0, 1),
4806 const_binop (PLUS_EXPR, arg2,
4807 integer_one_node, 0), 1))
4808 return pedantic_non_lvalue
4809 (fold (build (MAX_EXPR, type, arg1, arg2)));
4814 /* Convert A ? 1 : 0 to simply A. */
4815 if (integer_onep (TREE_OPERAND (t, 1))
4816 && integer_zerop (TREE_OPERAND (t, 2))
4817 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4818 call to fold will try to move the conversion inside
4819 a COND, which will recurse. In that case, the COND_EXPR
4820 is probably the best choice, so leave it alone. */
4821 && type == TREE_TYPE (arg0))
4822 return pedantic_non_lvalue (arg0);
4825 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4826 operation is simply A & 2. */
4828 if (integer_zerop (TREE_OPERAND (t, 2))
4829 && TREE_CODE (arg0) == NE_EXPR
4830 && integer_zerop (TREE_OPERAND (arg0, 1))
4831 && integer_pow2p (arg1)
4832 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4833 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4835 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
4840 /* When pedantic, a compound expression can be neither an lvalue
4841 nor an integer constant expression. */
4842 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4844 /* Don't let (0, 0) be null pointer constant. */
4845 if (integer_zerop (arg1))
4846 return non_lvalue (arg1);
4851 return build_complex (arg0, arg1);
4855 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4857 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4858 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4859 TREE_OPERAND (arg0, 1));
4860 else if (TREE_CODE (arg0) == COMPLEX_CST)
4861 return TREE_REALPART (arg0);
4862 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4863 return fold (build (TREE_CODE (arg0), type,
4864 fold (build1 (REALPART_EXPR, type,
4865 TREE_OPERAND (arg0, 0))),
4866 fold (build1 (REALPART_EXPR,
4867 type, TREE_OPERAND (arg0, 1)))));
4871 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4872 return convert (type, integer_zero_node);
4873 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4874 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4875 TREE_OPERAND (arg0, 0));
4876 else if (TREE_CODE (arg0) == COMPLEX_CST)
4877 return TREE_IMAGPART (arg0);
4878 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4879 return fold (build (TREE_CODE (arg0), type,
4880 fold (build1 (IMAGPART_EXPR, type,
4881 TREE_OPERAND (arg0, 0))),
4882 fold (build1 (IMAGPART_EXPR, type,
4883 TREE_OPERAND (arg0, 1)))));
4888 } /* switch (code) */