1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ This file should be rewritten to use an arbitrary precision
21 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
22 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
23 @@ The routines that translate from the ap rep should
24 @@ warn if precision et. al. is lost.
25 @@ This would also make life easier when this technology is used
26 @@ for cross-compilers. */
29 /* The entry points in this file are fold, size_int and size_binop.
31 fold takes a tree as argument and returns a simplified tree.
33 size_binop takes a tree code for an arithmetic operation
34 and two operands that are trees, and produces a tree for the
35 result, assuming the type comes from `sizetype'.
37 size_int takes an integer value, and creates a tree constant
38 with type from `sizetype'. */
46 /* Handle floating overflow for `const_binop'. */
47 static jmp_buf float_error;
49 static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
50 static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
51 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
52 HOST_WIDE_INT, HOST_WIDE_INT,
53 HOST_WIDE_INT, HOST_WIDE_INT *,
54 HOST_WIDE_INT *, HOST_WIDE_INT *,
56 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
57 static tree const_binop PROTO((enum tree_code, tree, tree, int));
58 static tree fold_convert PROTO((tree, tree));
59 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
60 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
61 static int truth_value_p PROTO((enum tree_code));
62 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
63 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
64 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
65 static tree omit_one_operand PROTO((tree, tree, tree));
66 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
67 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
68 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
70 static tree decode_field_reference PROTO((tree, int *, int *,
71 enum machine_mode *, int *,
73 static int all_ones_mask_p PROTO((tree, int));
74 static int simple_operand_p PROTO((tree));
75 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
76 enum tree_code, tree, tree, tree));
77 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
78 static tree strip_compound_expr PROTO((tree, tree));
84 /* Yield nonzero if a signed left shift of A by B bits overflows. */
85 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
87 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
88 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
89 Then this yields nonzero if overflow occurred during the addition.
90 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
91 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
92 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
94 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
95 We do that by representing the two-word integer in 4 words, with only
96 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
99 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
100 #define HIGHPART(x) \
101 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
102 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
104 /* Unpack a two-word integer into 4 words.
105 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
106 WORDS points to the array of HOST_WIDE_INTs. */
109 encode (words, low, hi)
110 HOST_WIDE_INT *words;
111 HOST_WIDE_INT low, hi;
113 words[0] = LOWPART (low);
114 words[1] = HIGHPART (low);
115 words[2] = LOWPART (hi);
116 words[3] = HIGHPART (hi);
119 /* Pack an array of 4 words into a two-word integer.
120 WORDS points to the array of words.
121 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
124 decode (words, low, hi)
125 HOST_WIDE_INT *words;
126 HOST_WIDE_INT *low, *hi;
128 *low = words[0] | words[1] * BASE;
129 *hi = words[2] | words[3] * BASE;
132 /* Make the integer constant T valid for its type
133 by setting to 0 or 1 all the bits in the constant
134 that don't belong in the type.
135 Yield 1 if a signed overflow occurs, 0 otherwise.
136 If OVERFLOW is nonzero, a signed overflow has already occurred
137 in calculating T, so propagate it.
139 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
143 force_fit_type (t, overflow)
147 HOST_WIDE_INT low, high;
150 if (TREE_CODE (t) == REAL_CST)
152 #ifdef CHECK_FLOAT_VALUE
153 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
159 else if (TREE_CODE (t) != INTEGER_CST)
162 low = TREE_INT_CST_LOW (t);
163 high = TREE_INT_CST_HIGH (t);
165 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
168 prec = TYPE_PRECISION (TREE_TYPE (t));
170 /* First clear all bits that are beyond the type's precision. */
172 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
174 else if (prec > HOST_BITS_PER_WIDE_INT)
176 TREE_INT_CST_HIGH (t)
177 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
181 TREE_INT_CST_HIGH (t) = 0;
182 if (prec < HOST_BITS_PER_WIDE_INT)
183 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
186 /* Unsigned types do not suffer sign extension or overflow. */
187 if (TREE_UNSIGNED (TREE_TYPE (t)))
190 /* If the value's sign bit is set, extend the sign. */
191 if (prec != 2 * HOST_BITS_PER_WIDE_INT
192 && (prec > HOST_BITS_PER_WIDE_INT
193 ? (TREE_INT_CST_HIGH (t)
194 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
195 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
197 /* Value is negative:
198 set to 1 all the bits that are outside this type's precision. */
199 if (prec > HOST_BITS_PER_WIDE_INT)
201 TREE_INT_CST_HIGH (t)
202 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
206 TREE_INT_CST_HIGH (t) = -1;
207 if (prec < HOST_BITS_PER_WIDE_INT)
208 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
212 /* Yield nonzero if signed overflow occurred. */
214 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
218 /* Add two doubleword integers with doubleword result.
219 Each argument is given as two `HOST_WIDE_INT' pieces.
220 One argument is L1 and H1; the other, L2 and H2.
221 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
224 add_double (l1, h1, l2, h2, lv, hv)
225 HOST_WIDE_INT l1, h1, l2, h2;
226 HOST_WIDE_INT *lv, *hv;
231 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
235 return overflow_sum_sign (h1, h2, h);
238 /* Negate a doubleword integer with doubleword result.
239 Return nonzero if the operation overflows, assuming it's signed.
240 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
241 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
244 neg_double (l1, h1, lv, hv)
245 HOST_WIDE_INT l1, h1;
246 HOST_WIDE_INT *lv, *hv;
252 return (*hv & h1) < 0;
262 /* Multiply two doubleword integers with doubleword result.
263 Return nonzero if the operation overflows, assuming it's signed.
264 Each argument is given as two `HOST_WIDE_INT' pieces.
265 One argument is L1 and H1; the other, L2 and H2.
266 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
269 mul_double (l1, h1, l2, h2, lv, hv)
270 HOST_WIDE_INT l1, h1, l2, h2;
271 HOST_WIDE_INT *lv, *hv;
273 HOST_WIDE_INT arg1[4];
274 HOST_WIDE_INT arg2[4];
275 HOST_WIDE_INT prod[4 * 2];
276 register unsigned HOST_WIDE_INT carry;
277 register int i, j, k;
278 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
280 encode (arg1, l1, h1);
281 encode (arg2, l2, h2);
283 bzero ((char *) prod, sizeof prod);
285 for (i = 0; i < 4; i++)
288 for (j = 0; j < 4; j++)
291 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
292 carry += arg1[i] * arg2[j];
293 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
295 prod[k] = LOWPART (carry);
296 carry = HIGHPART (carry);
301 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
303 /* Check for overflow by calculating the top half of the answer in full;
304 it should agree with the low half's sign bit. */
305 decode (prod+4, &toplow, &tophigh);
308 neg_double (l2, h2, &neglow, &neghigh);
309 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
313 neg_double (l1, h1, &neglow, &neghigh);
314 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
316 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
319 /* Shift the doubleword integer in L1, H1 left by COUNT places
320 keeping only PREC bits of result.
321 Shift right if COUNT is negative.
322 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
323 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
326 lshift_double (l1, h1, count, prec, lv, hv, arith)
327 HOST_WIDE_INT l1, h1, count;
329 HOST_WIDE_INT *lv, *hv;
334 rshift_double (l1, h1, - count, prec, lv, hv, arith);
339 count = (unsigned HOST_WIDE_INT) count & prec;
341 if (count >= HOST_BITS_PER_WIDE_INT)
343 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
348 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
349 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
350 *lv = (unsigned HOST_WIDE_INT) l1 << count;
354 /* Shift the doubleword integer in L1, H1 right by COUNT places
355 keeping only PREC bits of result. COUNT must be positive.
356 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
357 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
360 rshift_double (l1, h1, count, prec, lv, hv, arith)
361 HOST_WIDE_INT l1, h1, count;
363 HOST_WIDE_INT *lv, *hv;
366 unsigned HOST_WIDE_INT signmask;
368 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
372 count = (unsigned HOST_WIDE_INT) count % prec;
374 if (count >= HOST_BITS_PER_WIDE_INT)
377 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
378 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
382 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
383 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
384 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
385 | ((unsigned HOST_WIDE_INT) h1 >> count));
389 /* Rotate the doubleword integer in L1, H1 left by COUNT places
390 keeping only PREC bits of result.
391 Rotate right if COUNT is negative.
392 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
395 lrotate_double (l1, h1, count, prec, lv, hv)
396 HOST_WIDE_INT l1, h1, count;
398 HOST_WIDE_INT *lv, *hv;
400 HOST_WIDE_INT arg1[4];
406 rrotate_double (l1, h1, - count, prec, lv, hv);
410 encode (arg1, l1, h1);
415 carry = arg1[4 - 1] >> 16 - 1;
418 for (i = 0; i < 4; i++)
420 carry += arg1[i] << 1;
421 arg1[i] = LOWPART (carry);
422 carry = HIGHPART (carry);
427 decode (arg1, lv, hv);
430 /* Rotate the doubleword integer in L1, H1 left by COUNT places
431 keeping only PREC bits of result. COUNT must be positive.
432 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
435 rrotate_double (l1, h1, count, prec, lv, hv)
436 HOST_WIDE_INT l1, h1, count;
438 HOST_WIDE_INT *lv, *hv;
440 HOST_WIDE_INT arg1[4];
444 encode (arg1, l1, h1);
452 for (i = 4 - 1; i >= 0; i--)
456 arg1[i] = LOWPART (carry >> 1);
461 decode (arg1, lv, hv);
464 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
465 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
466 CODE is a tree code for a kind of division, one of
467 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
469 It controls how the quotient is rounded to a integer.
470 Return nonzero if the operation overflows.
471 UNS nonzero says do unsigned division. */
474 div_and_round_double (code, uns,
475 lnum_orig, hnum_orig, lden_orig, hden_orig,
476 lquo, hquo, lrem, hrem)
479 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
480 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
481 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
484 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
485 HOST_WIDE_INT den[4], quo[4];
487 unsigned HOST_WIDE_INT work;
488 register int carry = 0;
489 HOST_WIDE_INT lnum = lnum_orig;
490 HOST_WIDE_INT hnum = hnum_orig;
491 HOST_WIDE_INT lden = lden_orig;
492 HOST_WIDE_INT hden = hden_orig;
495 if ((hden == 0) && (lden == 0))
498 /* calculate quotient sign and convert operands to unsigned. */
504 /* (minimum integer) / (-1) is the only overflow case. */
505 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
511 neg_double (lden, hden, &lden, &hden);
515 if (hnum == 0 && hden == 0)
516 { /* single precision */
518 /* This unsigned division rounds toward zero. */
519 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
524 { /* trivial case: dividend < divisor */
525 /* hden != 0 already checked. */
532 bzero ((char *) quo, sizeof quo);
534 bzero ((char *) num, sizeof num); /* to zero 9th element */
535 bzero ((char *) den, sizeof den);
537 encode (num, lnum, hnum);
538 encode (den, lden, hden);
540 /* Special code for when the divisor < BASE. */
541 if (hden == 0 && lden < BASE)
543 /* hnum != 0 already checked. */
544 for (i = 4 - 1; i >= 0; i--)
546 work = num[i] + carry * BASE;
547 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
548 carry = work % (unsigned HOST_WIDE_INT) lden;
553 /* Full double precision division,
554 with thanks to Don Knuth's "Seminumerical Algorithms". */
555 int quo_est, scale, num_hi_sig, den_hi_sig;
557 /* Find the highest non-zero divisor digit. */
558 for (i = 4 - 1; ; i--)
564 /* Insure that the first digit of the divisor is at least BASE/2.
565 This is required by the quotient digit estimation algorithm. */
567 scale = BASE / (den[den_hi_sig] + 1);
568 if (scale > 1) { /* scale divisor and dividend */
570 for (i = 0; i <= 4 - 1; i++) {
571 work = (num[i] * scale) + carry;
572 num[i] = LOWPART (work);
573 carry = HIGHPART (work);
576 for (i = 0; i <= 4 - 1; i++) {
577 work = (den[i] * scale) + carry;
578 den[i] = LOWPART (work);
579 carry = HIGHPART (work);
580 if (den[i] != 0) den_hi_sig = i;
587 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
588 /* guess the next quotient digit, quo_est, by dividing the first
589 two remaining dividend digits by the high order quotient digit.
590 quo_est is never low and is at most 2 high. */
591 unsigned HOST_WIDE_INT tmp;
593 num_hi_sig = i + den_hi_sig + 1;
594 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
595 if (num[num_hi_sig] != den[den_hi_sig])
596 quo_est = work / den[den_hi_sig];
600 /* refine quo_est so it's usually correct, and at most one high. */
601 tmp = work - quo_est * den[den_hi_sig];
603 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
606 /* Try QUO_EST as the quotient digit, by multiplying the
607 divisor by QUO_EST and subtracting from the remaining dividend.
608 Keep in mind that QUO_EST is the I - 1st digit. */
611 for (j = 0; j <= den_hi_sig; j++)
613 work = quo_est * den[j] + carry;
614 carry = HIGHPART (work);
615 work = num[i + j] - LOWPART (work);
616 num[i + j] = LOWPART (work);
617 carry += HIGHPART (work) != 0;
620 /* if quo_est was high by one, then num[i] went negative and
621 we need to correct things. */
623 if (num[num_hi_sig] < carry)
626 carry = 0; /* add divisor back in */
627 for (j = 0; j <= den_hi_sig; j++)
629 work = num[i + j] + den[j] + carry;
630 carry = HIGHPART (work);
631 num[i + j] = LOWPART (work);
633 num [num_hi_sig] += carry;
636 /* store the quotient digit. */
641 decode (quo, lquo, hquo);
644 /* if result is negative, make it so. */
646 neg_double (*lquo, *hquo, lquo, hquo);
648 /* compute trial remainder: rem = num - (quo * den) */
649 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
650 neg_double (*lrem, *hrem, lrem, hrem);
651 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
656 case TRUNC_MOD_EXPR: /* round toward zero */
657 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
661 case FLOOR_MOD_EXPR: /* round toward negative infinity */
662 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
665 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
668 else return overflow;
672 case CEIL_MOD_EXPR: /* round toward positive infinity */
673 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
675 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
678 else return overflow;
682 case ROUND_MOD_EXPR: /* round to closest integer */
684 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
685 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
687 /* get absolute values */
688 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
689 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
691 /* if (2 * abs (lrem) >= abs (lden)) */
692 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
693 labs_rem, habs_rem, <wice, &htwice);
694 if (((unsigned HOST_WIDE_INT) habs_den
695 < (unsigned HOST_WIDE_INT) htwice)
696 || (((unsigned HOST_WIDE_INT) habs_den
697 == (unsigned HOST_WIDE_INT) htwice)
698 && ((HOST_WIDE_INT unsigned) labs_den
699 < (unsigned HOST_WIDE_INT) ltwice)))
703 add_double (*lquo, *hquo,
704 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
707 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
710 else return overflow;
718 /* compute true remainder: rem = num - (quo * den) */
719 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
720 neg_double (*lrem, *hrem, lrem, hrem);
721 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
725 #ifndef REAL_ARITHMETIC
726 /* Effectively truncate a real value to represent the nearest possible value
727 in a narrower mode. The result is actually represented in the same data
728 type as the argument, but its value is usually different.
730 A trap may occur during the FP operations and it is the responsibility
731 of the calling function to have a handler established. */
734 real_value_truncate (mode, arg)
735 enum machine_mode mode;
738 return REAL_VALUE_TRUNCATE (mode, arg);
741 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
743 /* Check for infinity in an IEEE double precision number. */
749 /* The IEEE 64-bit double format. */
754 unsigned exponent : 11;
755 unsigned mantissa1 : 20;
760 unsigned mantissa1 : 20;
761 unsigned exponent : 11;
767 if (u.big_endian.sign == 1)
770 return (u.big_endian.exponent == 2047
771 && u.big_endian.mantissa1 == 0
772 && u.big_endian.mantissa2 == 0);
777 return (u.little_endian.exponent == 2047
778 && u.little_endian.mantissa1 == 0
779 && u.little_endian.mantissa2 == 0);
783 /* Check whether an IEEE double precision number is a NaN. */
789 /* The IEEE 64-bit double format. */
794 unsigned exponent : 11;
795 unsigned mantissa1 : 20;
800 unsigned mantissa1 : 20;
801 unsigned exponent : 11;
807 if (u.big_endian.sign == 1)
810 return (u.big_endian.exponent == 2047
811 && (u.big_endian.mantissa1 != 0
812 || u.big_endian.mantissa2 != 0));
817 return (u.little_endian.exponent == 2047
818 && (u.little_endian.mantissa1 != 0
819 || u.little_endian.mantissa2 != 0));
823 /* Check for a negative IEEE double precision number. */
829 /* The IEEE 64-bit double format. */
834 unsigned exponent : 11;
835 unsigned mantissa1 : 20;
840 unsigned mantissa1 : 20;
841 unsigned exponent : 11;
847 if (u.big_endian.sign == 1)
850 return u.big_endian.sign;
855 return u.little_endian.sign;
858 #else /* Target not IEEE */
860 /* Let's assume other float formats don't have infinity.
861 (This can be overridden by redefining REAL_VALUE_ISINF.) */
869 /* Let's assume other float formats don't have NaNs.
870 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
878 /* Let's assume other float formats don't have minus zero.
879 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
886 #endif /* Target not IEEE */
887 #endif /* no REAL_ARITHMETIC */
889 /* Split a tree IN into a constant and a variable part
890 that could be combined with CODE to make IN.
891 CODE must be a commutative arithmetic operation.
892 Store the constant part into *CONP and the variable in &VARP.
893 Return 1 if this was done; zero means the tree IN did not decompose
896 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
897 Therefore, we must tell the caller whether the variable part
898 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
899 The value stored is the coefficient for the variable term.
900 The constant term we return should always be added;
901 we negate it if necessary. */
904 split_tree (in, code, varp, conp, varsignp)
910 register tree outtype = TREE_TYPE (in);
914 /* Strip any conversions that don't change the machine mode. */
915 while ((TREE_CODE (in) == NOP_EXPR
916 || TREE_CODE (in) == CONVERT_EXPR)
917 && (TYPE_MODE (TREE_TYPE (in))
918 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
919 in = TREE_OPERAND (in, 0);
921 if (TREE_CODE (in) == code
922 || (! FLOAT_TYPE_P (TREE_TYPE (in))
923 /* We can associate addition and subtraction together
924 (even though the C standard doesn't say so)
925 for integers because the value is not affected.
926 For reals, the value might be affected, so we can't. */
927 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
928 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
930 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
931 if (code == INTEGER_CST)
933 *conp = TREE_OPERAND (in, 0);
934 *varp = TREE_OPERAND (in, 1);
935 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
936 && TREE_TYPE (*varp) != outtype)
937 *varp = convert (outtype, *varp);
938 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
941 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
943 *conp = TREE_OPERAND (in, 1);
944 *varp = TREE_OPERAND (in, 0);
946 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
947 && TREE_TYPE (*varp) != outtype)
948 *varp = convert (outtype, *varp);
949 if (TREE_CODE (in) == MINUS_EXPR)
951 /* If operation is subtraction and constant is second,
952 must negate it to get an additive constant.
953 And this cannot be done unless it is a manifest constant.
954 It could also be the address of a static variable.
955 We cannot negate that, so give up. */
956 if (TREE_CODE (*conp) == INTEGER_CST)
957 /* Subtracting from integer_zero_node loses for long long. */
958 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
964 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
966 *conp = TREE_OPERAND (in, 0);
967 *varp = TREE_OPERAND (in, 1);
968 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
969 && TREE_TYPE (*varp) != outtype)
970 *varp = convert (outtype, *varp);
971 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
978 /* Combine two constants NUM and ARG2 under operation CODE
979 to produce a new constant.
980 We assume ARG1 and ARG2 have the same data type,
981 or at least are the same kind of constant and the same machine mode.
983 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
986 const_binop (code, arg1, arg2, notrunc)
988 register tree arg1, arg2;
991 if (TREE_CODE (arg1) == INTEGER_CST)
993 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
994 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
995 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
996 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
997 HOST_WIDE_INT low, hi;
998 HOST_WIDE_INT garbagel, garbageh;
1000 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1006 t = build_int_2 (int1l | int2l, int1h | int2h);
1010 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1014 t = build_int_2 (int1l & int2l, int1h & int2h);
1017 case BIT_ANDTC_EXPR:
1018 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1024 /* It's unclear from the C standard whether shifts can overflow.
1025 The following code ignores overflow; perhaps a C standard
1026 interpretation ruling is needed. */
1027 lshift_double (int1l, int1h, int2l,
1028 TYPE_PRECISION (TREE_TYPE (arg1)),
1031 t = build_int_2 (low, hi);
1032 TREE_TYPE (t) = TREE_TYPE (arg1);
1034 force_fit_type (t, 0);
1035 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1036 TREE_CONSTANT_OVERFLOW (t)
1037 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1043 lrotate_double (int1l, int1h, int2l,
1044 TYPE_PRECISION (TREE_TYPE (arg1)),
1046 t = build_int_2 (low, hi);
1053 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1056 overflow = int2h < hi;
1058 t = build_int_2 (int2l, int2h);
1064 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1067 overflow = int1h < hi;
1069 t = build_int_2 (int1l, int1h);
1072 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1073 t = build_int_2 (low, hi);
1077 if (int2h == 0 && int2l == 0)
1079 t = build_int_2 (int1l, int1h);
1082 neg_double (int2l, int2h, &low, &hi);
1083 add_double (int1l, int1h, low, hi, &low, &hi);
1084 overflow = overflow_sum_sign (hi, int2h, int1h);
1085 t = build_int_2 (low, hi);
1089 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1090 t = build_int_2 (low, hi);
1093 case TRUNC_DIV_EXPR:
1094 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1095 case EXACT_DIV_EXPR:
1096 /* This is a shortcut for a common special case.
1097 It reduces the number of tree nodes generated
1099 if (int2h == 0 && int2l > 0
1100 && TREE_TYPE (arg1) == sizetype
1101 && int1h == 0 && int1l >= 0)
1103 if (code == CEIL_DIV_EXPR)
1105 return size_int (int1l / int2l);
1107 case ROUND_DIV_EXPR:
1108 if (int2h == 0 && int2l == 1)
1110 t = build_int_2 (int1l, int1h);
1113 if (int1l == int2l && int1h == int2h)
1115 if ((int1l | int1h) == 0)
1117 t = build_int_2 (1, 0);
1120 overflow = div_and_round_double (code, uns,
1121 int1l, int1h, int2l, int2h,
1122 &low, &hi, &garbagel, &garbageh);
1123 t = build_int_2 (low, hi);
1126 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1127 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1128 overflow = div_and_round_double (code, uns,
1129 int1l, int1h, int2l, int2h,
1130 &garbagel, &garbageh, &low, &hi);
1131 t = build_int_2 (low, hi);
1138 low = (((unsigned HOST_WIDE_INT) int1h
1139 < (unsigned HOST_WIDE_INT) int2h)
1140 || (((unsigned HOST_WIDE_INT) int1h
1141 == (unsigned HOST_WIDE_INT) int2h)
1142 && ((unsigned HOST_WIDE_INT) int1l
1143 < (unsigned HOST_WIDE_INT) int2l)));
1147 low = ((int1h < int2h)
1148 || ((int1h == int2h)
1149 && ((unsigned HOST_WIDE_INT) int1l
1150 < (unsigned HOST_WIDE_INT) int2l)));
1152 if (low == (code == MIN_EXPR))
1153 t = build_int_2 (int1l, int1h);
1155 t = build_int_2 (int2l, int2h);
1162 TREE_TYPE (t) = TREE_TYPE (arg1);
1164 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1165 | TREE_OVERFLOW (arg1)
1166 | TREE_OVERFLOW (arg2));
1167 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1168 | TREE_CONSTANT_OVERFLOW (arg1)
1169 | TREE_CONSTANT_OVERFLOW (arg2));
1172 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1173 if (TREE_CODE (arg1) == REAL_CST)
1178 REAL_VALUE_TYPE value;
1181 d1 = TREE_REAL_CST (arg1);
1182 d2 = TREE_REAL_CST (arg2);
1184 /* If either operand is a NaN, just return it. Otherwise, set up
1185 for floating-point trap; we return an overflow. */
1186 if (REAL_VALUE_ISNAN (d1))
1188 else if (REAL_VALUE_ISNAN (d2))
1190 else if (setjmp (float_error))
1192 t = copy_node (arg1);
1197 set_float_handler (float_error);
1199 #ifdef REAL_ARITHMETIC
1200 REAL_ARITHMETIC (value, code, d1, d2);
1217 #ifndef REAL_INFINITY
1226 value = MIN (d1, d2);
1230 value = MAX (d1, d2);
1236 #endif /* no REAL_ARITHMETIC */
1237 t = build_real (TREE_TYPE (arg1),
1238 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1240 set_float_handler (NULL_PTR);
1243 = (force_fit_type (t, overflow)
1244 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1245 TREE_CONSTANT_OVERFLOW (t)
1247 | TREE_CONSTANT_OVERFLOW (arg1)
1248 | TREE_CONSTANT_OVERFLOW (arg2);
1251 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1252 if (TREE_CODE (arg1) == COMPLEX_CST)
1254 register tree r1 = TREE_REALPART (arg1);
1255 register tree i1 = TREE_IMAGPART (arg1);
1256 register tree r2 = TREE_REALPART (arg2);
1257 register tree i2 = TREE_IMAGPART (arg2);
1263 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1264 const_binop (PLUS_EXPR, i1, i2, notrunc));
1268 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1269 const_binop (MINUS_EXPR, i1, i2, notrunc));
1273 t = build_complex (const_binop (MINUS_EXPR,
1274 const_binop (MULT_EXPR,
1276 const_binop (MULT_EXPR,
1279 const_binop (PLUS_EXPR,
1280 const_binop (MULT_EXPR,
1282 const_binop (MULT_EXPR,
1289 register tree magsquared
1290 = const_binop (PLUS_EXPR,
1291 const_binop (MULT_EXPR, r2, r2, notrunc),
1292 const_binop (MULT_EXPR, i2, i2, notrunc),
1296 (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1297 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1298 const_binop (PLUS_EXPR,
1299 const_binop (MULT_EXPR, r1, r2,
1301 const_binop (MULT_EXPR, i1, i2,
1304 magsquared, notrunc),
1305 const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1306 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1307 const_binop (MINUS_EXPR,
1308 const_binop (MULT_EXPR, i1, r2,
1310 const_binop (MULT_EXPR, r1, i2,
1313 magsquared, notrunc));
1320 TREE_TYPE (t) = TREE_TYPE (arg1);
1326 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1330 unsigned int number;
1333 /* Type-size nodes already made for small sizes. */
1334 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1336 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1337 && size_table[number] != 0)
1338 return size_table[number];
1339 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1341 push_obstacks_nochange ();
1342 /* Make this a permanent node. */
1343 end_temporary_allocation ();
1344 t = build_int_2 (number, 0);
1345 TREE_TYPE (t) = sizetype;
1346 size_table[number] = t;
1351 t = build_int_2 (number, 0);
1352 TREE_TYPE (t) = sizetype;
1357 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1358 CODE is a tree code. Data type is taken from `sizetype',
1359 If the operands are constant, so is the result. */
1362 size_binop (code, arg0, arg1)
1363 enum tree_code code;
1366 /* Handle the special case of two integer constants faster. */
1367 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1369 /* And some specific cases even faster than that. */
1370 if (code == PLUS_EXPR
1371 && TREE_INT_CST_LOW (arg0) == 0
1372 && TREE_INT_CST_HIGH (arg0) == 0)
1374 if (code == MINUS_EXPR
1375 && TREE_INT_CST_LOW (arg1) == 0
1376 && TREE_INT_CST_HIGH (arg1) == 0)
1378 if (code == MULT_EXPR
1379 && TREE_INT_CST_LOW (arg0) == 1
1380 && TREE_INT_CST_HIGH (arg0) == 0)
1382 /* Handle general case of two integer constants. */
1383 return const_binop (code, arg0, arg1, 1);
1386 if (arg0 == error_mark_node || arg1 == error_mark_node)
1387 return error_mark_node;
1389 return fold (build (code, sizetype, arg0, arg1));
1392 /* Given T, a tree representing type conversion of ARG1, a constant,
1393 return a constant tree representing the result of conversion. */
1396 fold_convert (t, arg1)
1400 register tree type = TREE_TYPE (t);
1403 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1405 if (TREE_CODE (arg1) == INTEGER_CST)
1407 /* Given an integer constant, make new constant with new type,
1408 appropriately sign-extended or truncated. */
1409 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1410 TREE_INT_CST_HIGH (arg1));
1411 TREE_TYPE (t) = type;
1412 /* Indicate an overflow if (1) ARG1 already overflowed,
1413 or (2) force_fit_type indicates an overflow.
1414 Tell force_fit_type that an overflow has already occurred
1415 if ARG1 is a too-large unsigned value and T is signed. */
1417 = (TREE_OVERFLOW (arg1)
1418 | force_fit_type (t,
1419 (TREE_INT_CST_HIGH (arg1) < 0
1420 & (TREE_UNSIGNED (type)
1421 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1422 TREE_CONSTANT_OVERFLOW (t)
1423 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1425 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1426 else if (TREE_CODE (arg1) == REAL_CST)
1428 /* Don't initialize these, use assignments.
1429 Initialized local aggregates don't work on old compilers. */
1434 x = TREE_REAL_CST (arg1);
1435 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1436 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1437 /* See if X will be in range after truncation towards 0.
1438 To compensate for truncation, move the bounds away from 0,
1439 but reject if X exactly equals the adjusted bounds. */
1440 #ifdef REAL_ARITHMETIC
1441 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1442 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1447 /* If X is a NaN, use zero instead and show we have an overflow.
1448 Otherwise, range check. */
1449 if (REAL_VALUE_ISNAN (x))
1450 overflow = 1, x = dconst0;
1451 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1454 #ifndef REAL_ARITHMETIC
1456 HOST_WIDE_INT low, high;
1457 HOST_WIDE_INT half_word
1458 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1463 high = (HOST_WIDE_INT) (x / half_word / half_word);
1464 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1465 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1467 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1468 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1471 low = (HOST_WIDE_INT) x;
1472 if (TREE_REAL_CST (arg1) < 0)
1473 neg_double (low, high, &low, &high);
1474 t = build_int_2 (low, high);
1478 HOST_WIDE_INT low, high;
1479 REAL_VALUE_TO_INT (&low, &high, x);
1480 t = build_int_2 (low, high);
1483 TREE_TYPE (t) = type;
1485 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1486 TREE_CONSTANT_OVERFLOW (t)
1487 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1489 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1490 TREE_TYPE (t) = type;
1492 else if (TREE_CODE (type) == REAL_TYPE)
1494 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1495 if (TREE_CODE (arg1) == INTEGER_CST)
1496 return build_real_from_int_cst (type, arg1);
1497 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1498 if (TREE_CODE (arg1) == REAL_CST)
1500 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1502 else if (setjmp (float_error))
1505 t = copy_node (arg1);
1508 set_float_handler (float_error);
1510 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1511 TREE_REAL_CST (arg1)));
1512 set_float_handler (NULL_PTR);
1516 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1517 TREE_CONSTANT_OVERFLOW (t)
1518 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1522 TREE_CONSTANT (t) = 1;
1526 /* Return an expr equal to X but certainly not valid as an lvalue.
1527 Also make sure it is not valid as an null pointer constant. */
1535 /* These things are certainly not lvalues. */
1536 if (TREE_CODE (x) == NON_LVALUE_EXPR
1537 || TREE_CODE (x) == INTEGER_CST
1538 || TREE_CODE (x) == REAL_CST
1539 || TREE_CODE (x) == STRING_CST
1540 || TREE_CODE (x) == ADDR_EXPR)
1542 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1544 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1545 so convert_for_assignment won't strip it.
1546 This is so this 0 won't be treated as a null pointer constant. */
1547 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1548 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1554 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1555 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1559 /* When pedantic, return an expr equal to X but certainly not valid as a
1560 pedantic lvalue. Otherwise, return X. */
1563 pedantic_non_lvalue (x)
1567 return non_lvalue (x);
1572 /* Given a tree comparison code, return the code that is the logical inverse
1573 of the given code. It is not safe to do this for floating-point
1574 comparisons, except for NE_EXPR and EQ_EXPR. */
1576 static enum tree_code
1577 invert_tree_comparison (code)
1578 enum tree_code code;
1599 /* Similar, but return the comparison that results if the operands are
1600 swapped. This is safe for floating-point. */
1602 static enum tree_code
1603 swap_tree_comparison (code)
1604 enum tree_code code;
1624 /* Return nonzero if CODE is a tree code that represents a truth value. */
1627 truth_value_p (code)
1628 enum tree_code code;
1630 return (TREE_CODE_CLASS (code) == '<'
1631 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1632 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1633 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1636 /* Return nonzero if two operands are necessarily equal.
1637 If ONLY_CONST is non-zero, only return non-zero for constants.
1638 This function tests whether the operands are indistinguishable;
1639 it does not test whether they are equal using C's == operation.
1640 The distinction is important for IEEE floating point, because
1641 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1642 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1645 operand_equal_p (arg0, arg1, only_const)
1649 /* If both types don't have the same signedness, then we can't consider
1650 them equal. We must check this before the STRIP_NOPS calls
1651 because they may change the signedness of the arguments. */
1652 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1658 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1659 We don't care about side effects in that case because the SAVE_EXPR
1660 takes care of that for us. */
1661 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1662 return ! only_const;
1664 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1667 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1668 && TREE_CODE (arg0) == ADDR_EXPR
1669 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1672 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1673 && TREE_CODE (arg0) == INTEGER_CST
1674 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1675 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1678 /* Detect when real constants are equal. */
1679 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1680 && TREE_CODE (arg0) == REAL_CST)
1681 return !bcmp ((char *) &TREE_REAL_CST (arg0),
1682 (char *) &TREE_REAL_CST (arg1),
1683 sizeof (REAL_VALUE_TYPE));
1691 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1693 /* This is needed for conversions and for COMPONENT_REF.
1694 Might as well play it safe and always test this. */
1695 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1698 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1701 /* Two conversions are equal only if signedness and modes match. */
1702 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1703 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1704 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1707 return operand_equal_p (TREE_OPERAND (arg0, 0),
1708 TREE_OPERAND (arg1, 0), 0);
1712 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1713 TREE_OPERAND (arg1, 0), 0)
1714 && operand_equal_p (TREE_OPERAND (arg0, 1),
1715 TREE_OPERAND (arg1, 1), 0));
1718 switch (TREE_CODE (arg0))
1721 return operand_equal_p (TREE_OPERAND (arg0, 0),
1722 TREE_OPERAND (arg1, 0), 0);
1726 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1727 TREE_OPERAND (arg1, 0), 0)
1728 && operand_equal_p (TREE_OPERAND (arg0, 1),
1729 TREE_OPERAND (arg1, 1), 0));
1732 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1733 TREE_OPERAND (arg1, 0), 0)
1734 && operand_equal_p (TREE_OPERAND (arg0, 1),
1735 TREE_OPERAND (arg1, 1), 0)
1736 && operand_equal_p (TREE_OPERAND (arg0, 2),
1737 TREE_OPERAND (arg1, 2), 0));
1745 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1746 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1748 When in doubt, return 0. */
1751 operand_equal_for_comparison_p (arg0, arg1, other)
1755 int unsignedp1, unsignedpo;
1756 tree primarg1, primother;
1757 unsigned correct_width;
1759 if (operand_equal_p (arg0, arg1, 0))
1762 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1765 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1766 actual comparison operand, ARG0.
1768 First throw away any conversions to wider types
1769 already present in the operands. */
1771 primarg1 = get_narrower (arg1, &unsignedp1);
1772 primother = get_narrower (other, &unsignedpo);
1774 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1775 if (unsignedp1 == unsignedpo
1776 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1777 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1779 tree type = TREE_TYPE (arg0);
1781 /* Make sure shorter operand is extended the right way
1782 to match the longer operand. */
1783 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1784 TREE_TYPE (primarg1)),
1787 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1794 /* See if ARG is an expression that is either a comparison or is performing
1795 arithmetic on comparisons. The comparisons must only be comparing
1796 two different values, which will be stored in *CVAL1 and *CVAL2; if
1797 they are non-zero it means that some operands have already been found.
1798 No variables may be used anywhere else in the expression except in the
1799 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1800 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1802 If this is true, return 1. Otherwise, return zero. */
1805 twoval_comparison_p (arg, cval1, cval2, save_p)
1807 tree *cval1, *cval2;
1810 enum tree_code code = TREE_CODE (arg);
1811 char class = TREE_CODE_CLASS (code);
1813 /* We can handle some of the 'e' cases here. */
1814 if (class == 'e' && code == TRUTH_NOT_EXPR)
1816 else if (class == 'e'
1817 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1818 || code == COMPOUND_EXPR))
1821 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1822 the expression. There may be no way to make this work, but it needs
1823 to be looked at again for 2.6. */
1825 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1827 /* If we've already found a CVAL1 or CVAL2, this expression is
1828 two complex to handle. */
1829 if (*cval1 || *cval2)
1840 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1843 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1844 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1845 cval1, cval2, save_p));
1851 if (code == COND_EXPR)
1852 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1853 cval1, cval2, save_p)
1854 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1855 cval1, cval2, save_p)
1856 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1857 cval1, cval2, save_p));
1861 /* First see if we can handle the first operand, then the second. For
1862 the second operand, we know *CVAL1 can't be zero. It must be that
1863 one side of the comparison is each of the values; test for the
1864 case where this isn't true by failing if the two operands
1867 if (operand_equal_p (TREE_OPERAND (arg, 0),
1868 TREE_OPERAND (arg, 1), 0))
1872 *cval1 = TREE_OPERAND (arg, 0);
1873 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1875 else if (*cval2 == 0)
1876 *cval2 = TREE_OPERAND (arg, 0);
1877 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1882 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1884 else if (*cval2 == 0)
1885 *cval2 = TREE_OPERAND (arg, 1);
1886 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1897 /* ARG is a tree that is known to contain just arithmetic operations and
1898 comparisons. Evaluate the operations in the tree substituting NEW0 for
1899 any occurrence of OLD0 as an operand of a comparison and likewise for
1903 eval_subst (arg, old0, new0, old1, new1)
1905 tree old0, new0, old1, new1;
1907 tree type = TREE_TYPE (arg);
1908 enum tree_code code = TREE_CODE (arg);
1909 char class = TREE_CODE_CLASS (code);
1911 /* We can handle some of the 'e' cases here. */
1912 if (class == 'e' && code == TRUTH_NOT_EXPR)
1914 else if (class == 'e'
1915 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1921 return fold (build1 (code, type,
1922 eval_subst (TREE_OPERAND (arg, 0),
1923 old0, new0, old1, new1)));
1926 return fold (build (code, type,
1927 eval_subst (TREE_OPERAND (arg, 0),
1928 old0, new0, old1, new1),
1929 eval_subst (TREE_OPERAND (arg, 1),
1930 old0, new0, old1, new1)));
1936 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1939 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1942 return fold (build (code, type,
1943 eval_subst (TREE_OPERAND (arg, 0),
1944 old0, new0, old1, new1),
1945 eval_subst (TREE_OPERAND (arg, 1),
1946 old0, new0, old1, new1),
1947 eval_subst (TREE_OPERAND (arg, 2),
1948 old0, new0, old1, new1)));
1953 tree arg0 = TREE_OPERAND (arg, 0);
1954 tree arg1 = TREE_OPERAND (arg, 1);
1956 /* We need to check both for exact equality and tree equality. The
1957 former will be true if the operand has a side-effect. In that
1958 case, we know the operand occurred exactly once. */
1960 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1962 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1965 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1967 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1970 return fold (build (code, type, arg0, arg1));
1977 /* Return a tree for the case when the result of an expression is RESULT
1978 converted to TYPE and OMITTED was previously an operand of the expression
1979 but is now not needed (e.g., we folded OMITTED * 0).
1981 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1982 the conversion of RESULT to TYPE. */
1985 omit_one_operand (type, result, omitted)
1986 tree type, result, omitted;
1988 tree t = convert (type, result);
1990 if (TREE_SIDE_EFFECTS (omitted))
1991 return build (COMPOUND_EXPR, type, omitted, t);
1993 return non_lvalue (t);
1996 /* Return a simplified tree node for the truth-negation of ARG. This
1997 never alters ARG itself. We assume that ARG is an operation that
1998 returns a truth value (0 or 1). */
2001 invert_truthvalue (arg)
2004 tree type = TREE_TYPE (arg);
2005 enum tree_code code = TREE_CODE (arg);
2007 if (code == ERROR_MARK)
2010 /* If this is a comparison, we can simply invert it, except for
2011 floating-point non-equality comparisons, in which case we just
2012 enclose a TRUTH_NOT_EXPR around what we have. */
2014 if (TREE_CODE_CLASS (code) == '<')
2016 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2017 && code != NE_EXPR && code != EQ_EXPR)
2018 return build1 (TRUTH_NOT_EXPR, type, arg);
2020 return build (invert_tree_comparison (code), type,
2021 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2027 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2028 && TREE_INT_CST_HIGH (arg) == 0, 0));
2030 case TRUTH_AND_EXPR:
2031 return build (TRUTH_OR_EXPR, type,
2032 invert_truthvalue (TREE_OPERAND (arg, 0)),
2033 invert_truthvalue (TREE_OPERAND (arg, 1)));
2036 return build (TRUTH_AND_EXPR, type,
2037 invert_truthvalue (TREE_OPERAND (arg, 0)),
2038 invert_truthvalue (TREE_OPERAND (arg, 1)));
2040 case TRUTH_XOR_EXPR:
2041 /* Here we can invert either operand. We invert the first operand
2042 unless the second operand is a TRUTH_NOT_EXPR in which case our
2043 result is the XOR of the first operand with the inside of the
2044 negation of the second operand. */
2046 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2047 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2048 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2050 return build (TRUTH_XOR_EXPR, type,
2051 invert_truthvalue (TREE_OPERAND (arg, 0)),
2052 TREE_OPERAND (arg, 1));
2054 case TRUTH_ANDIF_EXPR:
2055 return build (TRUTH_ORIF_EXPR, type,
2056 invert_truthvalue (TREE_OPERAND (arg, 0)),
2057 invert_truthvalue (TREE_OPERAND (arg, 1)));
2059 case TRUTH_ORIF_EXPR:
2060 return build (TRUTH_ANDIF_EXPR, type,
2061 invert_truthvalue (TREE_OPERAND (arg, 0)),
2062 invert_truthvalue (TREE_OPERAND (arg, 1)));
2064 case TRUTH_NOT_EXPR:
2065 return TREE_OPERAND (arg, 0);
2068 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2069 invert_truthvalue (TREE_OPERAND (arg, 1)),
2070 invert_truthvalue (TREE_OPERAND (arg, 2)));
2073 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2074 invert_truthvalue (TREE_OPERAND (arg, 1)));
2076 case NON_LVALUE_EXPR:
2077 return invert_truthvalue (TREE_OPERAND (arg, 0));
2082 return build1 (TREE_CODE (arg), type,
2083 invert_truthvalue (TREE_OPERAND (arg, 0)));
2086 if (!integer_onep (TREE_OPERAND (arg, 1)))
2088 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2091 return build1 (TRUTH_NOT_EXPR, type, arg);
2093 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2095 return build1 (TRUTH_NOT_EXPR, type, arg);
2098 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2099 operands are another bit-wise operation with a common input. If so,
2100 distribute the bit operations to save an operation and possibly two if
2101 constants are involved. For example, convert
2102 (A | B) & (A | C) into A | (B & C)
2103 Further simplification will occur if B and C are constants.
2105 If this optimization cannot be done, 0 will be returned. */
2108 distribute_bit_expr (code, type, arg0, arg1)
2109 enum tree_code code;
2116 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2117 || TREE_CODE (arg0) == code
2118 || (TREE_CODE (arg0) != BIT_AND_EXPR
2119 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2122 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2124 common = TREE_OPERAND (arg0, 0);
2125 left = TREE_OPERAND (arg0, 1);
2126 right = TREE_OPERAND (arg1, 1);
2128 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2130 common = TREE_OPERAND (arg0, 0);
2131 left = TREE_OPERAND (arg0, 1);
2132 right = TREE_OPERAND (arg1, 0);
2134 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2136 common = TREE_OPERAND (arg0, 1);
2137 left = TREE_OPERAND (arg0, 0);
2138 right = TREE_OPERAND (arg1, 1);
2140 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2142 common = TREE_OPERAND (arg0, 1);
2143 left = TREE_OPERAND (arg0, 0);
2144 right = TREE_OPERAND (arg1, 0);
2149 return fold (build (TREE_CODE (arg0), type, common,
2150 fold (build (code, type, left, right))));
2153 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2154 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2157 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2160 int bitsize, bitpos;
2163 tree result = build (BIT_FIELD_REF, type, inner,
2164 size_int (bitsize), size_int (bitpos));
2166 TREE_UNSIGNED (result) = unsignedp;
2171 /* Optimize a bit-field compare.
2173 There are two cases: First is a compare against a constant and the
2174 second is a comparison of two items where the fields are at the same
2175 bit position relative to the start of a chunk (byte, halfword, word)
2176 large enough to contain it. In these cases we can avoid the shift
2177 implicit in bitfield extractions.
2179 For constants, we emit a compare of the shifted constant with the
2180 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2181 compared. For two fields at the same position, we do the ANDs with the
2182 similar mask and compare the result of the ANDs.
2184 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2185 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2186 are the left and right operands of the comparison, respectively.
2188 If the optimization described above can be done, we return the resulting
2189 tree. Otherwise we return zero. */
2192 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2193 enum tree_code code;
2197 int lbitpos, lbitsize, rbitpos, rbitsize;
2198 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2199 tree type = TREE_TYPE (lhs);
2200 tree signed_type, unsigned_type;
2201 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2202 enum machine_mode lmode, rmode, lnmode, rnmode;
2203 int lunsignedp, runsignedp;
2204 int lvolatilep = 0, rvolatilep = 0;
2205 tree linner, rinner;
2209 /* Get all the information about the extractions being done. If the bit size
2210 if the same as the size of the underlying object, we aren't doing an
2211 extraction at all and so can do nothing. */
2212 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2213 &lunsignedp, &lvolatilep);
2214 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2220 /* If this is not a constant, we can only do something if bit positions,
2221 sizes, and signedness are the same. */
2222 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2223 &rmode, &runsignedp, &rvolatilep);
2225 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2226 || lunsignedp != runsignedp || offset != 0)
2230 /* See if we can find a mode to refer to this field. We should be able to,
2231 but fail if we can't. */
2232 lnmode = get_best_mode (lbitsize, lbitpos,
2233 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2235 if (lnmode == VOIDmode)
2238 /* Set signed and unsigned types of the precision of this mode for the
2240 signed_type = type_for_mode (lnmode, 0);
2241 unsigned_type = type_for_mode (lnmode, 1);
2245 rnmode = get_best_mode (rbitsize, rbitpos,
2246 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2248 if (rnmode == VOIDmode)
2252 /* Compute the bit position and size for the new reference and our offset
2253 within it. If the new reference is the same size as the original, we
2254 won't optimize anything, so return zero. */
2255 lnbitsize = GET_MODE_BITSIZE (lnmode);
2256 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2257 lbitpos -= lnbitpos;
2258 if (lnbitsize == lbitsize)
2263 rnbitsize = GET_MODE_BITSIZE (rnmode);
2264 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2265 rbitpos -= rnbitpos;
2266 if (rnbitsize == rbitsize)
2270 #if BYTES_BIG_ENDIAN
2271 lbitpos = lnbitsize - lbitsize - lbitpos;
2274 /* Make the mask to be used against the extracted field. */
2275 mask = build_int_2 (~0, ~0);
2276 TREE_TYPE (mask) = unsigned_type;
2277 force_fit_type (mask, 0);
2278 mask = convert (unsigned_type, mask);
2279 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2280 mask = const_binop (RSHIFT_EXPR, mask,
2281 size_int (lnbitsize - lbitsize - lbitpos), 0);
2284 /* If not comparing with constant, just rework the comparison
2286 return build (code, compare_type,
2287 build (BIT_AND_EXPR, unsigned_type,
2288 make_bit_field_ref (linner, unsigned_type,
2289 lnbitsize, lnbitpos, 1),
2291 build (BIT_AND_EXPR, unsigned_type,
2292 make_bit_field_ref (rinner, unsigned_type,
2293 rnbitsize, rnbitpos, 1),
2296 /* Otherwise, we are handling the constant case. See if the constant is too
2297 big for the field. Warn and return a tree of for 0 (false) if so. We do
2298 this not only for its own sake, but to avoid having to test for this
2299 error case below. If we didn't, we might generate wrong code.
2301 For unsigned fields, the constant shifted right by the field length should
2302 be all zero. For signed fields, the high-order bits should agree with
2307 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2308 convert (unsigned_type, rhs),
2309 size_int (lbitsize), 0)))
2311 warning ("comparison is always %s due to width of bitfield",
2312 code == NE_EXPR ? "one" : "zero");
2313 return convert (compare_type,
2315 ? integer_one_node : integer_zero_node));
2320 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2321 size_int (lbitsize - 1), 0);
2322 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2324 warning ("comparison is always %s due to width of bitfield",
2325 code == NE_EXPR ? "one" : "zero");
2326 return convert (compare_type,
2328 ? integer_one_node : integer_zero_node));
2332 /* Single-bit compares should always be against zero. */
2333 if (lbitsize == 1 && ! integer_zerop (rhs))
2335 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2336 rhs = convert (type, integer_zero_node);
2339 /* Make a new bitfield reference, shift the constant over the
2340 appropriate number of bits and mask it with the computed mask
2341 (in case this was a signed field). If we changed it, make a new one. */
2342 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2345 TREE_SIDE_EFFECTS (lhs) = 1;
2346 TREE_THIS_VOLATILE (lhs) = 1;
2349 rhs = fold (const_binop (BIT_AND_EXPR,
2350 const_binop (LSHIFT_EXPR,
2351 convert (unsigned_type, rhs),
2352 size_int (lbitpos), 0),
2355 return build (code, compare_type,
2356 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2360 /* Subroutine for fold_truthop: decode a field reference.
2362 If EXP is a comparison reference, we return the innermost reference.
2364 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2365 set to the starting bit number.
2367 If the innermost field can be completely contained in a mode-sized
2368 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2370 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2371 otherwise it is not changed.
2373 *PUNSIGNEDP is set to the signedness of the field.
2375 *PMASK is set to the mask used. This is either contained in a
2376 BIT_AND_EXPR or derived from the width of the field.
2378 Return 0 if this is not a component reference or is one that we can't
2379 do anything with. */
2382 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2385 int *pbitsize, *pbitpos;
2386 enum machine_mode *pmode;
2387 int *punsignedp, *pvolatilep;
2391 tree mask, inner, offset;
2395 /* All the optimizations using this function assume integer fields.
2396 There are problems with FP fields since the type_for_size call
2397 below can fail for, e.g., XFmode. */
2398 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2403 if (TREE_CODE (exp) == BIT_AND_EXPR)
2405 and_mask = TREE_OPERAND (exp, 1);
2406 exp = TREE_OPERAND (exp, 0);
2407 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2408 if (TREE_CODE (and_mask) != INTEGER_CST)
2412 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2413 && TREE_CODE (exp) != BIT_FIELD_REF)
2416 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2417 punsignedp, pvolatilep);
2418 if (inner == exp || *pbitsize < 0 || offset != 0)
2421 /* Compute the mask to access the bitfield. */
2422 unsigned_type = type_for_size (*pbitsize, 1);
2423 precision = TYPE_PRECISION (unsigned_type);
2425 mask = build_int_2 (~0, ~0);
2426 TREE_TYPE (mask) = unsigned_type;
2427 force_fit_type (mask, 0);
2428 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2429 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2431 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2433 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2434 convert (unsigned_type, and_mask), mask));
2440 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2444 all_ones_mask_p (mask, size)
2448 tree type = TREE_TYPE (mask);
2449 int precision = TYPE_PRECISION (type);
2452 tmask = build_int_2 (~0, ~0);
2453 TREE_TYPE (tmask) = signed_type (type);
2454 force_fit_type (tmask, 0);
2456 operand_equal_p (mask,
2457 const_binop (RSHIFT_EXPR,
2458 const_binop (LSHIFT_EXPR, tmask,
2459 size_int (precision - size), 0),
2460 size_int (precision - size), 0),
2464 /* Subroutine for fold_truthop: determine if an operand is simple enough
2465 to be evaluated unconditionally. */
2468 simple_operand_p (exp)
2471 /* Strip any conversions that don't change the machine mode. */
2472 while ((TREE_CODE (exp) == NOP_EXPR
2473 || TREE_CODE (exp) == CONVERT_EXPR)
2474 && (TYPE_MODE (TREE_TYPE (exp))
2475 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2476 exp = TREE_OPERAND (exp, 0);
2478 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2479 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2480 && ! TREE_ADDRESSABLE (exp)
2481 && ! TREE_THIS_VOLATILE (exp)
2482 && ! DECL_NONLOCAL (exp)
2483 /* Don't regard global variables as simple. They may be
2484 allocated in ways unknown to the compiler (shared memory,
2485 #pragma weak, etc). */
2486 && ! TREE_PUBLIC (exp)
2487 && ! DECL_EXTERNAL (exp)
2488 /* Loading a static variable is unduly expensive, but global
2489 registers aren't expensive. */
2490 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2493 /* Subroutine for fold_truthop: try to optimize a range test.
2495 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2497 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2498 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2499 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2502 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2503 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2504 larger than HI_CST (they may be equal).
2506 We return the simplified tree or 0 if no optimization is possible. */
2509 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2510 enum tree_code jcode, lo_code, hi_code;
2511 tree type, var, lo_cst, hi_cst;
2514 enum tree_code rcode;
2516 /* See if this is a range test and normalize the constant terms. */
2518 if (jcode == TRUTH_AND_EXPR)
2523 /* See if we have VAR != CST && VAR != CST+1. */
2524 if (! (hi_code == NE_EXPR
2525 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2526 && tree_int_cst_equal (integer_one_node,
2527 const_binop (MINUS_EXPR,
2528 hi_cst, lo_cst, 0))))
2536 if (hi_code == LT_EXPR)
2537 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2538 else if (hi_code != LE_EXPR)
2541 if (lo_code == GT_EXPR)
2542 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2544 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2557 /* See if we have VAR == CST || VAR == CST+1. */
2558 if (! (hi_code == EQ_EXPR
2559 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2560 && tree_int_cst_equal (integer_one_node,
2561 const_binop (MINUS_EXPR,
2562 hi_cst, lo_cst, 0))))
2570 if (hi_code == GE_EXPR)
2571 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2572 else if (hi_code != GT_EXPR)
2575 if (lo_code == LE_EXPR)
2576 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2578 /* We now have VAR < LO_CST || VAR > HI_CST. */
2587 /* When normalizing, it is possible to both increment the smaller constant
2588 and decrement the larger constant. See if they are still ordered. */
2589 if (tree_int_cst_lt (hi_cst, lo_cst))
2592 /* Fail if VAR isn't an integer. */
2593 utype = TREE_TYPE (var);
2594 if (! INTEGRAL_TYPE_P (utype))
2597 /* The range test is invalid if subtracting the two constants results
2598 in overflow. This can happen in traditional mode. */
2599 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2600 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2603 if (! TREE_UNSIGNED (utype))
2605 utype = unsigned_type (utype);
2606 var = convert (utype, var);
2607 lo_cst = convert (utype, lo_cst);
2608 hi_cst = convert (utype, hi_cst);
2611 return fold (convert (type,
2612 build (rcode, utype,
2613 build (MINUS_EXPR, utype, var, lo_cst),
2614 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2617 /* Find ways of folding logical expressions of LHS and RHS:
2618 Try to merge two comparisons to the same innermost item.
2619 Look for range tests like "ch >= '0' && ch <= '9'".
2620 Look for combinations of simple terms on machines with expensive branches
2621 and evaluate the RHS unconditionally.
2623 For example, if we have p->a == 2 && p->b == 4 and we can make an
2624 object large enough to span both A and B, we can do this with a comparison
2625 against the object ANDed with the a mask.
2627 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2628 operations to do this with one comparison.
2630 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2631 function and the one above.
2633 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2634 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2636 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2639 We return the simplified tree or 0 if no optimization is possible. */
2642 fold_truthop (code, truth_type, lhs, rhs)
2643 enum tree_code code;
2644 tree truth_type, lhs, rhs;
2646 /* If this is the "or" of two comparisons, we can do something if we
2647 the comparisons are NE_EXPR. If this is the "and", we can do something
2648 if the comparisons are EQ_EXPR. I.e.,
2649 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2651 WANTED_CODE is this operation code. For single bit fields, we can
2652 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2653 comparison for one-bit fields. */
2655 enum tree_code wanted_code;
2656 enum tree_code lcode, rcode;
2657 tree ll_arg, lr_arg, rl_arg, rr_arg;
2658 tree ll_inner, lr_inner, rl_inner, rr_inner;
2659 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2660 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2661 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2662 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2663 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2664 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2665 enum machine_mode lnmode, rnmode;
2666 tree ll_mask, lr_mask, rl_mask, rr_mask;
2667 tree l_const, r_const;
2669 int first_bit, end_bit;
2672 /* Start by getting the comparison codes and seeing if this looks like
2673 a range test. Fail if anything is volatile. If one operand is a
2674 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2677 if (TREE_SIDE_EFFECTS (lhs)
2678 || TREE_SIDE_EFFECTS (rhs))
2681 lcode = TREE_CODE (lhs);
2682 rcode = TREE_CODE (rhs);
2684 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2685 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2687 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2688 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2690 if (TREE_CODE_CLASS (lcode) != '<'
2691 || TREE_CODE_CLASS (rcode) != '<')
2694 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2695 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2697 ll_arg = TREE_OPERAND (lhs, 0);
2698 lr_arg = TREE_OPERAND (lhs, 1);
2699 rl_arg = TREE_OPERAND (rhs, 0);
2700 rr_arg = TREE_OPERAND (rhs, 1);
2702 if (TREE_CODE (lr_arg) == INTEGER_CST
2703 && TREE_CODE (rr_arg) == INTEGER_CST
2704 && operand_equal_p (ll_arg, rl_arg, 0))
2706 if (tree_int_cst_lt (lr_arg, rr_arg))
2707 result = range_test (code, truth_type, lcode, rcode,
2708 ll_arg, lr_arg, rr_arg);
2710 result = range_test (code, truth_type, rcode, lcode,
2711 ll_arg, rr_arg, lr_arg);
2713 /* If this isn't a range test, it also isn't a comparison that
2714 can be merged. However, it wins to evaluate the RHS unconditionally
2715 on machines with expensive branches. */
2717 if (result == 0 && BRANCH_COST >= 2)
2719 if (TREE_CODE (ll_arg) != VAR_DECL
2720 && TREE_CODE (ll_arg) != PARM_DECL)
2722 /* Avoid evaluating the variable part twice. */
2723 ll_arg = save_expr (ll_arg);
2724 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2725 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2727 return build (code, truth_type, lhs, rhs);
2732 /* If the RHS can be evaluated unconditionally and its operands are
2733 simple, it wins to evaluate the RHS unconditionally on machines
2734 with expensive branches. In this case, this isn't a comparison
2735 that can be merged. */
2737 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2738 are with zero (tmw). */
2740 if (BRANCH_COST >= 2
2741 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2742 && simple_operand_p (rl_arg)
2743 && simple_operand_p (rr_arg))
2744 return build (code, truth_type, lhs, rhs);
2746 /* See if the comparisons can be merged. Then get all the parameters for
2749 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2750 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2754 ll_inner = decode_field_reference (ll_arg,
2755 &ll_bitsize, &ll_bitpos, &ll_mode,
2756 &ll_unsignedp, &volatilep, &ll_mask);
2757 lr_inner = decode_field_reference (lr_arg,
2758 &lr_bitsize, &lr_bitpos, &lr_mode,
2759 &lr_unsignedp, &volatilep, &lr_mask);
2760 rl_inner = decode_field_reference (rl_arg,
2761 &rl_bitsize, &rl_bitpos, &rl_mode,
2762 &rl_unsignedp, &volatilep, &rl_mask);
2763 rr_inner = decode_field_reference (rr_arg,
2764 &rr_bitsize, &rr_bitpos, &rr_mode,
2765 &rr_unsignedp, &volatilep, &rr_mask);
2767 /* It must be true that the inner operation on the lhs of each
2768 comparison must be the same if we are to be able to do anything.
2769 Then see if we have constants. If not, the same must be true for
2771 if (volatilep || ll_inner == 0 || rl_inner == 0
2772 || ! operand_equal_p (ll_inner, rl_inner, 0))
2775 if (TREE_CODE (lr_arg) == INTEGER_CST
2776 && TREE_CODE (rr_arg) == INTEGER_CST)
2777 l_const = lr_arg, r_const = rr_arg;
2778 else if (lr_inner == 0 || rr_inner == 0
2779 || ! operand_equal_p (lr_inner, rr_inner, 0))
2782 l_const = r_const = 0;
2784 /* If either comparison code is not correct for our logical operation,
2785 fail. However, we can convert a one-bit comparison against zero into
2786 the opposite comparison against that bit being set in the field. */
2788 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2789 if (lcode != wanted_code)
2791 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2797 if (rcode != wanted_code)
2799 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2805 /* See if we can find a mode that contains both fields being compared on
2806 the left. If we can't, fail. Otherwise, update all constants and masks
2807 to be relative to a field of that size. */
2808 first_bit = MIN (ll_bitpos, rl_bitpos);
2809 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2810 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2811 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2813 if (lnmode == VOIDmode)
2816 lnbitsize = GET_MODE_BITSIZE (lnmode);
2817 lnbitpos = first_bit & ~ (lnbitsize - 1);
2818 type = type_for_size (lnbitsize, 1);
2819 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2821 #if BYTES_BIG_ENDIAN
2822 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2823 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2826 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2827 size_int (xll_bitpos), 0);
2828 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2829 size_int (xrl_bitpos), 0);
2831 /* Make sure the constants are interpreted as unsigned, so we
2832 don't have sign bits outside the range of their type. */
2836 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2837 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2838 size_int (xll_bitpos), 0);
2839 l_const = const_binop (BIT_AND_EXPR, l_const, ll_mask, 0);
2843 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2844 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2845 size_int (xrl_bitpos), 0);
2846 r_const = const_binop (BIT_AND_EXPR, r_const, rl_mask, 0);
2849 /* If the right sides are not constant, do the same for it. Also,
2850 disallow this optimization if a size or signedness mismatch occurs
2851 between the left and right sides. */
2854 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2855 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2856 /* Make sure the two fields on the right
2857 correspond to the left without being swapped. */
2858 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2861 first_bit = MIN (lr_bitpos, rr_bitpos);
2862 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2863 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2864 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2866 if (rnmode == VOIDmode)
2869 rnbitsize = GET_MODE_BITSIZE (rnmode);
2870 rnbitpos = first_bit & ~ (rnbitsize - 1);
2871 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2873 #if BYTES_BIG_ENDIAN
2874 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2875 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2878 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2879 size_int (xlr_bitpos), 0);
2880 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2881 size_int (xrr_bitpos), 0);
2883 /* Make a mask that corresponds to both fields being compared.
2884 Do this for both items being compared. If the masks agree,
2885 we can do this by masking both and comparing the masked
2887 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2888 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2889 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2891 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2892 ll_unsignedp || rl_unsignedp);
2893 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2894 lr_unsignedp || rr_unsignedp);
2895 if (! all_ones_mask_p (ll_mask, lnbitsize))
2897 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2898 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2900 return build (wanted_code, truth_type, lhs, rhs);
2903 /* There is still another way we can do something: If both pairs of
2904 fields being compared are adjacent, we may be able to make a wider
2905 field containing them both. */
2906 if ((ll_bitsize + ll_bitpos == rl_bitpos
2907 && lr_bitsize + lr_bitpos == rr_bitpos)
2908 || (ll_bitpos == rl_bitpos + rl_bitsize
2909 && lr_bitpos == rr_bitpos + rr_bitsize))
2910 return build (wanted_code, truth_type,
2911 make_bit_field_ref (ll_inner, type,
2912 ll_bitsize + rl_bitsize,
2913 MIN (ll_bitpos, rl_bitpos),
2915 make_bit_field_ref (lr_inner, type,
2916 lr_bitsize + rr_bitsize,
2917 MIN (lr_bitpos, rr_bitpos),
2923 /* Handle the case of comparisons with constants. If there is something in
2924 common between the masks, those bits of the constants must be the same.
2925 If not, the condition is always false. Test for this to avoid generating
2926 incorrect code below. */
2927 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2928 if (! integer_zerop (result)
2929 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2930 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2932 if (wanted_code == NE_EXPR)
2934 warning ("`or' of unmatched not-equal tests is always 1");
2935 return convert (truth_type, integer_one_node);
2939 warning ("`and' of mutually exclusive equal-tests is always zero");
2940 return convert (truth_type, integer_zero_node);
2944 /* Construct the expression we will return. First get the component
2945 reference we will make. Unless the mask is all ones the width of
2946 that field, perform the mask operation. Then compare with the
2948 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2949 ll_unsignedp || rl_unsignedp);
2951 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2952 if (! all_ones_mask_p (ll_mask, lnbitsize))
2953 result = build (BIT_AND_EXPR, type, result, ll_mask);
2955 return build (wanted_code, truth_type, result,
2956 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
2959 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
2960 S, a SAVE_EXPR, return the expression actually being evaluated. Note
2961 that we may sometimes modify the tree. */
2964 strip_compound_expr (t, s)
2968 tree type = TREE_TYPE (t);
2969 enum tree_code code = TREE_CODE (t);
2971 /* See if this is the COMPOUND_EXPR we want to eliminate. */
2972 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
2973 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
2974 return TREE_OPERAND (t, 1);
2976 /* See if this is a COND_EXPR or a simple arithmetic operator. We
2977 don't bother handling any other types. */
2978 else if (code == COND_EXPR)
2980 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
2981 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
2982 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
2984 else if (TREE_CODE_CLASS (code) == '1')
2985 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
2986 else if (TREE_CODE_CLASS (code) == '<'
2987 || TREE_CODE_CLASS (code) == '2')
2989 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
2990 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
2996 /* Perform constant folding and related simplification of EXPR.
2997 The related simplifications include x*1 => x, x*0 => 0, etc.,
2998 and application of the associative law.
2999 NOP_EXPR conversions may be removed freely (as long as we
3000 are careful not to change the C type of the overall expression)
3001 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3002 but we can constant-fold them if they have constant operands. */
3008 register tree t = expr;
3009 tree t1 = NULL_TREE;
3011 tree type = TREE_TYPE (expr);
3012 register tree arg0, arg1;
3013 register enum tree_code code = TREE_CODE (t);
3017 /* WINS will be nonzero when the switch is done
3018 if all operands are constant. */
3022 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3023 if (code == RTL_EXPR)
3026 /* Return right away if already constant. */
3027 if (TREE_CONSTANT (t))
3029 if (code == CONST_DECL)
3030 return DECL_INITIAL (t);
3034 kind = TREE_CODE_CLASS (code);
3035 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3039 /* Special case for conversion ops that can have fixed point args. */
3040 arg0 = TREE_OPERAND (t, 0);
3042 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3044 STRIP_TYPE_NOPS (arg0);
3046 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3047 subop = TREE_REALPART (arg0);
3051 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3052 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3053 && TREE_CODE (subop) != REAL_CST
3054 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3056 /* Note that TREE_CONSTANT isn't enough:
3057 static var addresses are constant but we can't
3058 do arithmetic on them. */
3061 else if (kind == 'e' || kind == '<'
3062 || kind == '1' || kind == '2' || kind == 'r')
3064 register int len = tree_code_length[(int) code];
3066 for (i = 0; i < len; i++)
3068 tree op = TREE_OPERAND (t, i);
3072 continue; /* Valid for CALL_EXPR, at least. */
3074 if (kind == '<' || code == RSHIFT_EXPR)
3076 /* Signedness matters here. Perhaps we can refine this
3078 STRIP_TYPE_NOPS (op);
3082 /* Strip any conversions that don't change the mode. */
3086 if (TREE_CODE (op) == COMPLEX_CST)
3087 subop = TREE_REALPART (op);
3091 if (TREE_CODE (subop) != INTEGER_CST
3092 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3093 && TREE_CODE (subop) != REAL_CST
3094 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3096 /* Note that TREE_CONSTANT isn't enough:
3097 static var addresses are constant but we can't
3098 do arithmetic on them. */
3108 /* If this is a commutative operation, and ARG0 is a constant, move it
3109 to ARG1 to reduce the number of tests below. */
3110 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3111 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3112 || code == BIT_AND_EXPR)
3113 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3115 tem = arg0; arg0 = arg1; arg1 = tem;
3117 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3118 TREE_OPERAND (t, 1) = tem;
3121 /* Now WINS is set as described above,
3122 ARG0 is the first operand of EXPR,
3123 and ARG1 is the second operand (if it has more than one operand).
3125 First check for cases where an arithmetic operation is applied to a
3126 compound, conditional, or comparison operation. Push the arithmetic
3127 operation inside the compound or conditional to see if any folding
3128 can then be done. Convert comparison to conditional for this purpose.
3129 The also optimizes non-constant cases that used to be done in
3132 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3133 one of the operands is a comparison and the other is a comparison, a
3134 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3135 code below would make the expression more complex. Change it to a
3136 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3137 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3139 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3140 || code == EQ_EXPR || code == NE_EXPR)
3141 && ((truth_value_p (TREE_CODE (arg0))
3142 && (truth_value_p (TREE_CODE (arg1))
3143 || (TREE_CODE (arg1) == BIT_AND_EXPR
3144 && integer_onep (TREE_OPERAND (arg1, 1)))))
3145 || (truth_value_p (TREE_CODE (arg1))
3146 && (truth_value_p (TREE_CODE (arg0))
3147 || (TREE_CODE (arg0) == BIT_AND_EXPR
3148 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3150 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3151 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3155 if (code == EQ_EXPR)
3156 t = invert_truthvalue (t);
3161 if (TREE_CODE_CLASS (code) == '1')
3163 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3164 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3165 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3166 else if (TREE_CODE (arg0) == COND_EXPR)
3168 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3169 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3170 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3172 /* If this was a conversion, and all we did was to move into
3173 inside the COND_EXPR, bring it back out. But leave it if
3174 it is a conversion from integer to integer and the
3175 result precision is no wider than a word since such a
3176 conversion is cheap and may be optimized away by combine,
3177 while it couldn't if it were outside the COND_EXPR. Then return
3178 so we don't get into an infinite recursion loop taking the
3179 conversion out and then back in. */
3181 if ((code == NOP_EXPR || code == CONVERT_EXPR
3182 || code == NON_LVALUE_EXPR)
3183 && TREE_CODE (t) == COND_EXPR
3184 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3185 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3186 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3187 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
3188 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
3189 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
3190 && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
3191 t = build1 (code, type,
3193 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3194 TREE_OPERAND (t, 0),
3195 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3196 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3199 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3200 return fold (build (COND_EXPR, type, arg0,
3201 fold (build1 (code, type, integer_one_node)),
3202 fold (build1 (code, type, integer_zero_node))));
3204 else if (TREE_CODE_CLASS (code) == '2'
3205 || TREE_CODE_CLASS (code) == '<')
3207 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3208 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3209 fold (build (code, type,
3210 arg0, TREE_OPERAND (arg1, 1))));
3211 else if (TREE_CODE (arg1) == COND_EXPR
3212 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3214 tree test, true_value, false_value;
3216 if (TREE_CODE (arg1) == COND_EXPR)
3218 test = TREE_OPERAND (arg1, 0);
3219 true_value = TREE_OPERAND (arg1, 1);
3220 false_value = TREE_OPERAND (arg1, 2);
3225 true_value = integer_one_node;
3226 false_value = integer_zero_node;
3229 /* If ARG0 is complex we want to make sure we only evaluate
3230 it once. Though this is only required if it is volatile, it
3231 might be more efficient even if it is not. However, if we
3232 succeed in folding one part to a constant, we do not need
3233 to make this SAVE_EXPR. Since we do this optimization
3234 primarily to see if we do end up with constant and this
3235 SAVE_EXPR interfers with later optimizations, suppressing
3236 it when we can is important. */
3238 if (TREE_CODE (arg0) != SAVE_EXPR
3239 && ((TREE_CODE (arg0) != VAR_DECL
3240 && TREE_CODE (arg0) != PARM_DECL)
3241 || TREE_SIDE_EFFECTS (arg0)))
3243 tree lhs = fold (build (code, type, arg0, true_value));
3244 tree rhs = fold (build (code, type, arg0, false_value));
3246 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3247 return fold (build (COND_EXPR, type, test, lhs, rhs));
3249 arg0 = save_expr (arg0);
3252 test = fold (build (COND_EXPR, type, test,
3253 fold (build (code, type, arg0, true_value)),
3254 fold (build (code, type, arg0, false_value))));
3255 if (TREE_CODE (arg0) == SAVE_EXPR)
3256 return build (COMPOUND_EXPR, type,
3257 convert (void_type_node, arg0),
3258 strip_compound_expr (test, arg0));
3260 return convert (type, test);
3263 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3264 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3265 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3266 else if (TREE_CODE (arg0) == COND_EXPR
3267 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3269 tree test, true_value, false_value;
3271 if (TREE_CODE (arg0) == COND_EXPR)
3273 test = TREE_OPERAND (arg0, 0);
3274 true_value = TREE_OPERAND (arg0, 1);
3275 false_value = TREE_OPERAND (arg0, 2);
3280 true_value = integer_one_node;
3281 false_value = integer_zero_node;
3284 if (TREE_CODE (arg1) != SAVE_EXPR
3285 && ((TREE_CODE (arg1) != VAR_DECL
3286 && TREE_CODE (arg1) != PARM_DECL)
3287 || TREE_SIDE_EFFECTS (arg1)))
3289 tree lhs = fold (build (code, type, true_value, arg1));
3290 tree rhs = fold (build (code, type, false_value, arg1));
3292 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
3293 || TREE_CONSTANT (arg1))
3294 return fold (build (COND_EXPR, type, test, lhs, rhs));
3296 arg1 = save_expr (arg1);
3299 test = fold (build (COND_EXPR, type, test,
3300 fold (build (code, type, true_value, arg1)),
3301 fold (build (code, type, false_value, arg1))));
3302 if (TREE_CODE (arg1) == SAVE_EXPR)
3303 return build (COMPOUND_EXPR, type,
3304 convert (void_type_node, arg1),
3305 strip_compound_expr (test, arg1));
3307 return convert (type, test);
3310 else if (TREE_CODE_CLASS (code) == '<'
3311 && TREE_CODE (arg0) == COMPOUND_EXPR)
3312 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3313 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3314 else if (TREE_CODE_CLASS (code) == '<'
3315 && TREE_CODE (arg1) == COMPOUND_EXPR)
3316 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3317 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3329 return fold (DECL_INITIAL (t));
3334 case FIX_TRUNC_EXPR:
3335 /* Other kinds of FIX are not handled properly by fold_convert. */
3337 if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
3338 return TREE_OPERAND (t, 0);
3340 /* In addition to the cases of two conversions in a row
3341 handled below, if we are converting something to its own
3342 type via an object of identical or wider precision, neither
3343 conversion is needed. */
3344 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3345 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3346 && TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == TREE_TYPE (t)
3347 && ((INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3348 && INTEGRAL_TYPE_P (TREE_TYPE (t)))
3349 || (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3350 && FLOAT_TYPE_P (TREE_TYPE (t))))
3351 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3352 >= TYPE_PRECISION (TREE_TYPE (t))))
3353 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3355 /* Two conversions in a row are not needed unless:
3356 - the intermediate type is narrower than both initial and final, or
3357 - the intermediate type and innermost type differ in signedness,
3358 and the outermost type is wider than the intermediate, or
3359 - the initial type is a pointer type and the precisions of the
3360 intermediate and final types differ, or
3361 - the final type is a pointer type and the precisions of the
3362 initial and intermediate types differ. */
3363 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3364 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3365 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3366 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3368 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3369 > TYPE_PRECISION (TREE_TYPE (t)))
3370 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3372 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3374 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3375 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3376 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3377 < TYPE_PRECISION (TREE_TYPE (t))))
3378 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3379 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3380 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3382 (TREE_UNSIGNED (TREE_TYPE (t))
3383 && (TYPE_PRECISION (TREE_TYPE (t))
3384 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3385 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3387 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3388 != TYPE_PRECISION (TREE_TYPE (t))))
3389 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3390 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3391 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3392 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3394 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3395 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3396 /* Detect assigning a bitfield. */
3397 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3398 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3400 /* Don't leave an assignment inside a conversion
3401 unless assigning a bitfield. */
3402 tree prev = TREE_OPERAND (t, 0);
3403 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3404 /* First do the assignment, then return converted constant. */
3405 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3411 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3414 return fold_convert (t, arg0);
3416 #if 0 /* This loses on &"foo"[0]. */
3421 /* Fold an expression like: "foo"[2] */
3422 if (TREE_CODE (arg0) == STRING_CST
3423 && TREE_CODE (arg1) == INTEGER_CST
3424 && !TREE_INT_CST_HIGH (arg1)
3425 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3427 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3428 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3429 force_fit_type (t, 0);
3436 if (TREE_CODE (arg0) == CONSTRUCTOR)
3438 tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
3445 TREE_CONSTANT (t) = wins;
3451 if (TREE_CODE (arg0) == INTEGER_CST)
3453 HOST_WIDE_INT low, high;
3454 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3455 TREE_INT_CST_HIGH (arg0),
3457 t = build_int_2 (low, high);
3458 TREE_TYPE (t) = type;
3460 = (TREE_OVERFLOW (arg0)
3461 | force_fit_type (t, overflow));
3462 TREE_CONSTANT_OVERFLOW (t)
3463 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3465 else if (TREE_CODE (arg0) == REAL_CST)
3466 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3467 TREE_TYPE (t) = type;
3469 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3470 return TREE_OPERAND (arg0, 0);
3472 /* Convert - (a - b) to (b - a) for non-floating-point. */
3473 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3474 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3475 TREE_OPERAND (arg0, 0));
3482 if (TREE_CODE (arg0) == INTEGER_CST)
3484 if (! TREE_UNSIGNED (type)
3485 && TREE_INT_CST_HIGH (arg0) < 0)
3487 HOST_WIDE_INT low, high;
3488 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3489 TREE_INT_CST_HIGH (arg0),
3491 t = build_int_2 (low, high);
3492 TREE_TYPE (t) = type;
3494 = (TREE_OVERFLOW (arg0)
3495 | force_fit_type (t, overflow));
3496 TREE_CONSTANT_OVERFLOW (t)
3497 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3500 else if (TREE_CODE (arg0) == REAL_CST)
3502 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3503 t = build_real (type,
3504 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3506 TREE_TYPE (t) = type;
3508 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3509 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3513 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3515 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3516 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3517 TREE_OPERAND (arg0, 0),
3518 fold (build1 (NEGATE_EXPR,
3519 TREE_TYPE (TREE_TYPE (arg0)),
3520 TREE_OPERAND (arg0, 1))));
3521 else if (TREE_CODE (arg0) == COMPLEX_CST)
3522 return build_complex (TREE_OPERAND (arg0, 0),
3523 fold (build1 (NEGATE_EXPR,
3524 TREE_TYPE (TREE_TYPE (arg0)),
3525 TREE_OPERAND (arg0, 1))));
3526 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3527 return fold (build (TREE_CODE (arg0), type,
3528 fold (build1 (CONJ_EXPR, type,
3529 TREE_OPERAND (arg0, 0))),
3530 fold (build1 (CONJ_EXPR,
3531 type, TREE_OPERAND (arg0, 1)))));
3532 else if (TREE_CODE (arg0) == CONJ_EXPR)
3533 return TREE_OPERAND (arg0, 0);
3539 if (TREE_CODE (arg0) == INTEGER_CST)
3540 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3541 ~ TREE_INT_CST_HIGH (arg0));
3542 TREE_TYPE (t) = type;
3543 force_fit_type (t, 0);
3544 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3545 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3547 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3548 return TREE_OPERAND (arg0, 0);
3552 /* A + (-B) -> A - B */
3553 if (TREE_CODE (arg1) == NEGATE_EXPR)
3554 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3555 else if (! FLOAT_TYPE_P (type))
3557 if (integer_zerop (arg1))
3558 return non_lvalue (convert (type, arg0));
3560 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3561 with a constant, and the two constants have no bits in common,
3562 we should treat this as a BIT_IOR_EXPR since this may produce more
3564 if (TREE_CODE (arg0) == BIT_AND_EXPR
3565 && TREE_CODE (arg1) == BIT_AND_EXPR
3566 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3567 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3568 && integer_zerop (const_binop (BIT_AND_EXPR,
3569 TREE_OPERAND (arg0, 1),
3570 TREE_OPERAND (arg1, 1), 0)))
3572 code = BIT_IOR_EXPR;
3576 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3577 about the case where C is a constant, just try one of the
3578 four possibilities. */
3580 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3581 && operand_equal_p (TREE_OPERAND (arg0, 1),
3582 TREE_OPERAND (arg1, 1), 0))
3583 return fold (build (MULT_EXPR, type,
3584 fold (build (PLUS_EXPR, type,
3585 TREE_OPERAND (arg0, 0),
3586 TREE_OPERAND (arg1, 0))),
3587 TREE_OPERAND (arg0, 1)));
3589 /* In IEEE floating point, x+0 may not equal x. */
3590 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3592 && real_zerop (arg1))
3593 return non_lvalue (convert (type, arg0));
3595 /* In most languages, can't associate operations on floats
3596 through parentheses. Rather than remember where the parentheses
3597 were, we don't associate floats at all. It shouldn't matter much.
3598 However, associating multiplications is only very slightly
3599 inaccurate, so do that if -ffast-math is specified. */
3600 if (FLOAT_TYPE_P (type)
3601 && ! (flag_fast_math && code == MULT_EXPR))
3604 /* The varsign == -1 cases happen only for addition and subtraction.
3605 It says that the arg that was split was really CON minus VAR.
3606 The rest of the code applies to all associative operations. */
3612 if (split_tree (arg0, code, &var, &con, &varsign))
3616 /* EXPR is (CON-VAR) +- ARG1. */
3617 /* If it is + and VAR==ARG1, return just CONST. */
3618 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3619 return convert (TREE_TYPE (t), con);
3621 /* If ARG0 is a constant, don't change things around;
3622 instead keep all the constant computations together. */
3624 if (TREE_CONSTANT (arg0))
3627 /* Otherwise return (CON +- ARG1) - VAR. */
3628 TREE_SET_CODE (t, MINUS_EXPR);
3629 TREE_OPERAND (t, 1) = var;
3631 = fold (build (code, TREE_TYPE (t), con, arg1));
3635 /* EXPR is (VAR+CON) +- ARG1. */
3636 /* If it is - and VAR==ARG1, return just CONST. */
3637 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3638 return convert (TREE_TYPE (t), con);
3640 /* If ARG0 is a constant, don't change things around;
3641 instead keep all the constant computations together. */
3643 if (TREE_CONSTANT (arg0))
3646 /* Otherwise return VAR +- (ARG1 +- CON). */
3647 TREE_OPERAND (t, 1) = tem
3648 = fold (build (code, TREE_TYPE (t), arg1, con));
3649 TREE_OPERAND (t, 0) = var;
3650 if (integer_zerop (tem)
3651 && (code == PLUS_EXPR || code == MINUS_EXPR))
3652 return convert (type, var);
3653 /* If we have x +/- (c - d) [c an explicit integer]
3654 change it to x -/+ (d - c) since if d is relocatable
3655 then the latter can be a single immediate insn
3656 and the former cannot. */
3657 if (TREE_CODE (tem) == MINUS_EXPR
3658 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3660 tree tem1 = TREE_OPERAND (tem, 1);
3661 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3662 TREE_OPERAND (tem, 0) = tem1;
3664 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3670 if (split_tree (arg1, code, &var, &con, &varsign))
3672 if (TREE_CONSTANT (arg1))
3677 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3679 /* EXPR is ARG0 +- (CON +- VAR). */
3680 if (TREE_CODE (t) == MINUS_EXPR
3681 && operand_equal_p (var, arg0, 0))
3683 /* If VAR and ARG0 cancel, return just CON or -CON. */
3684 if (code == PLUS_EXPR)
3685 return convert (TREE_TYPE (t), con);
3686 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3687 convert (TREE_TYPE (t), con)));
3691 = fold (build (code, TREE_TYPE (t), arg0, con));
3692 TREE_OPERAND (t, 1) = var;
3693 if (integer_zerop (TREE_OPERAND (t, 0))
3694 && TREE_CODE (t) == PLUS_EXPR)
3695 return convert (TREE_TYPE (t), var);
3700 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3701 if (TREE_CODE (arg1) == REAL_CST)
3703 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3705 t1 = const_binop (code, arg0, arg1, 0);
3706 if (t1 != NULL_TREE)
3708 /* The return value should always have
3709 the same type as the original expression. */
3710 TREE_TYPE (t1) = TREE_TYPE (t);
3716 if (! FLOAT_TYPE_P (type))
3718 if (! wins && integer_zerop (arg0))
3719 return build1 (NEGATE_EXPR, type, arg1);
3720 if (integer_zerop (arg1))
3721 return non_lvalue (convert (type, arg0));
3723 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3724 about the case where C is a constant, just try one of the
3725 four possibilities. */
3727 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3728 && operand_equal_p (TREE_OPERAND (arg0, 1),
3729 TREE_OPERAND (arg1, 1), 0))
3730 return fold (build (MULT_EXPR, type,
3731 fold (build (MINUS_EXPR, type,
3732 TREE_OPERAND (arg0, 0),
3733 TREE_OPERAND (arg1, 0))),
3734 TREE_OPERAND (arg0, 1)));
3736 /* Convert A - (-B) to A + B. */
3737 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3738 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3740 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3743 /* Except with IEEE floating point, 0-x equals -x. */
3744 if (! wins && real_zerop (arg0))
3745 return build1 (NEGATE_EXPR, type, arg1);
3746 /* Except with IEEE floating point, x-0 equals x. */
3747 if (real_zerop (arg1))
3748 return non_lvalue (convert (type, arg0));
3751 /* Fold &x - &x. This can happen from &x.foo - &x.
3752 This is unsafe for certain floats even in non-IEEE formats.
3753 In IEEE, it is unsafe because it does wrong for NaNs.
3754 Also note that operand_equal_p is always false if an operand
3757 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
3758 && operand_equal_p (arg0, arg1, 0))
3759 return convert (type, integer_zero_node);
3764 if (! FLOAT_TYPE_P (type))
3766 if (integer_zerop (arg1))
3767 return omit_one_operand (type, arg1, arg0);
3768 if (integer_onep (arg1))
3769 return non_lvalue (convert (type, arg0));
3771 /* ((A / C) * C) is A if the division is an
3772 EXACT_DIV_EXPR. Since C is normally a constant,
3773 just check for one of the four possibilities. */
3775 if (TREE_CODE (arg0) == EXACT_DIV_EXPR
3776 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3777 return TREE_OPERAND (arg0, 0);
3779 /* (a * (1 << b)) is (a << b) */
3780 if (TREE_CODE (arg1) == LSHIFT_EXPR
3781 && integer_onep (TREE_OPERAND (arg1, 0)))
3782 return fold (build (LSHIFT_EXPR, type, arg0,
3783 TREE_OPERAND (arg1, 1)));
3784 if (TREE_CODE (arg0) == LSHIFT_EXPR
3785 && integer_onep (TREE_OPERAND (arg0, 0)))
3786 return fold (build (LSHIFT_EXPR, type, arg1,
3787 TREE_OPERAND (arg0, 1)));
3791 /* x*0 is 0, except for IEEE floating point. */
3792 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3794 && real_zerop (arg1))
3795 return omit_one_operand (type, arg1, arg0);
3796 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3797 However, ANSI says we can drop signals,
3798 so we can do this anyway. */
3799 if (real_onep (arg1))
3800 return non_lvalue (convert (type, arg0));
3802 if (! wins && real_twop (arg1))
3804 tree arg = save_expr (arg0);
3805 return build (PLUS_EXPR, type, arg, arg);
3812 if (integer_all_onesp (arg1))
3813 return omit_one_operand (type, arg1, arg0);
3814 if (integer_zerop (arg1))
3815 return non_lvalue (convert (type, arg0));
3816 t1 = distribute_bit_expr (code, type, arg0, arg1);
3817 if (t1 != NULL_TREE)
3820 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3821 is a rotate of A by C1 bits. */
3823 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3824 || TREE_CODE (arg0) == LSHIFT_EXPR)
3825 && (TREE_CODE (arg1) == RSHIFT_EXPR
3826 || TREE_CODE (arg1) == LSHIFT_EXPR)
3827 && TREE_CODE (arg0) != TREE_CODE (arg1)
3828 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3829 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3830 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3831 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3832 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3833 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3834 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3835 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3836 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3837 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3838 TREE_CODE (arg0) == LSHIFT_EXPR
3839 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3844 if (integer_zerop (arg1))
3845 return non_lvalue (convert (type, arg0));
3846 if (integer_all_onesp (arg1))
3847 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3852 if (integer_all_onesp (arg1))
3853 return non_lvalue (convert (type, arg0));
3854 if (integer_zerop (arg1))
3855 return omit_one_operand (type, arg1, arg0);
3856 t1 = distribute_bit_expr (code, type, arg0, arg1);
3857 if (t1 != NULL_TREE)
3859 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3860 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3861 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3863 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3864 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3865 && (~TREE_INT_CST_LOW (arg0)
3866 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3867 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3869 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3870 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3872 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3873 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3874 && (~TREE_INT_CST_LOW (arg1)
3875 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3876 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3880 case BIT_ANDTC_EXPR:
3881 if (integer_all_onesp (arg0))
3882 return non_lvalue (convert (type, arg1));
3883 if (integer_zerop (arg0))
3884 return omit_one_operand (type, arg0, arg1);
3885 if (TREE_CODE (arg1) == INTEGER_CST)
3887 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3888 code = BIT_AND_EXPR;
3894 /* In most cases, do nothing with a divide by zero. */
3895 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3896 #ifndef REAL_INFINITY
3897 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
3900 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3902 /* In IEEE floating point, x/1 is not equivalent to x for snans.
3903 However, ANSI says we can drop signals, so we can do this anyway. */
3904 if (real_onep (arg1))
3905 return non_lvalue (convert (type, arg0));
3907 /* If ARG1 is a constant, we can convert this to a multiply by the
3908 reciprocal. This does not have the same rounding properties,
3909 so only do this if -ffast-math. We can actually always safely
3910 do it if ARG1 is a power of two, but it's hard to tell if it is
3911 or not in a portable manner. */
3912 if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
3913 && 0 != (tem = const_binop (code, build_real (type, dconst1),
3915 return fold (build (MULT_EXPR, type, arg0, tem));
3919 case TRUNC_DIV_EXPR:
3920 case ROUND_DIV_EXPR:
3921 case FLOOR_DIV_EXPR:
3923 case EXACT_DIV_EXPR:
3924 if (integer_onep (arg1))
3925 return non_lvalue (convert (type, arg0));
3926 if (integer_zerop (arg1))
3929 /* If we have ((a / C1) / C2) where both division are the same type, try
3930 to simplify. First see if C1 * C2 overflows or not. */
3931 if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
3932 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
3936 new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
3937 tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
3939 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
3940 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
3942 /* If no overflow, divide by C1*C2. */
3943 return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
3947 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
3948 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
3949 expressions, which often appear in the offsets or sizes of
3950 objects with a varying size. Only deal with positive divisors
3951 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
3953 Look for NOPs and SAVE_EXPRs inside. */
3955 if (TREE_CODE (arg1) == INTEGER_CST
3956 && tree_int_cst_sgn (arg1) >= 0)
3958 int have_save_expr = 0;
3959 tree c2 = integer_zero_node;
3962 if (TREE_CODE (xarg0) == SAVE_EXPR)
3963 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3967 if (TREE_CODE (xarg0) == PLUS_EXPR
3968 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3969 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
3970 else if (TREE_CODE (xarg0) == MINUS_EXPR
3971 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3972 /* If we are doing this computation unsigned, the negate
3974 && ! TREE_UNSIGNED (type))
3976 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
3977 xarg0 = TREE_OPERAND (xarg0, 0);
3980 if (TREE_CODE (xarg0) == SAVE_EXPR)
3981 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3985 if (TREE_CODE (xarg0) == MULT_EXPR
3986 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3987 && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
3988 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
3989 TREE_OPERAND (xarg0, 1), arg1, 1))
3990 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
3991 TREE_OPERAND (xarg0, 1), 1)))
3992 && (tree_int_cst_sgn (c2) >= 0
3993 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
3996 tree outer_div = integer_one_node;
3997 tree c1 = TREE_OPERAND (xarg0, 1);
4000 /* If C3 > C1, set them equal and do a divide by
4001 C3/C1 at the end of the operation. */
4002 if (tree_int_cst_lt (c1, c3))
4003 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
4005 /* The result is A * (C1/C3) + (C2/C3). */
4006 t = fold (build (PLUS_EXPR, type,
4007 fold (build (MULT_EXPR, type,
4008 TREE_OPERAND (xarg0, 0),
4009 const_binop (code, c1, c3, 1))),
4010 const_binop (code, c2, c3, 1)));
4012 if (! integer_onep (outer_div))
4013 t = fold (build (code, type, t, convert (type, outer_div)));
4025 case FLOOR_MOD_EXPR:
4026 case ROUND_MOD_EXPR:
4027 case TRUNC_MOD_EXPR:
4028 if (integer_onep (arg1))
4029 return omit_one_operand (type, integer_zero_node, arg0);
4030 if (integer_zerop (arg1))
4033 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4034 where C1 % C3 == 0. Handle similarly to the division case,
4035 but don't bother with SAVE_EXPRs. */
4037 if (TREE_CODE (arg1) == INTEGER_CST
4038 && ! integer_zerop (arg1))
4040 tree c2 = integer_zero_node;
4043 if (TREE_CODE (xarg0) == PLUS_EXPR
4044 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4045 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4046 else if (TREE_CODE (xarg0) == MINUS_EXPR
4047 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4048 && ! TREE_UNSIGNED (type))
4050 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4051 xarg0 = TREE_OPERAND (xarg0, 0);
4056 if (TREE_CODE (xarg0) == MULT_EXPR
4057 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4058 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4059 TREE_OPERAND (xarg0, 1),
4061 && tree_int_cst_sgn (c2) >= 0)
4062 /* The result is (C2%C3). */
4063 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4064 TREE_OPERAND (xarg0, 0));
4073 if (integer_zerop (arg1))
4074 return non_lvalue (convert (type, arg0));
4075 /* Since negative shift count is not well-defined,
4076 don't try to compute it in the compiler. */
4077 if (tree_int_cst_sgn (arg1) < 0)
4082 if (operand_equal_p (arg0, arg1, 0))
4084 if (INTEGRAL_TYPE_P (type)
4085 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4086 return omit_one_operand (type, arg1, arg0);
4090 if (operand_equal_p (arg0, arg1, 0))
4092 if (INTEGRAL_TYPE_P (type)
4093 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4094 return omit_one_operand (type, arg1, arg0);
4097 case TRUTH_NOT_EXPR:
4098 /* Note that the operand of this must be an int
4099 and its values must be 0 or 1.
4100 ("true" is a fixed value perhaps depending on the language,
4101 but we don't handle values other than 1 correctly yet.) */
4102 return invert_truthvalue (arg0);
4104 case TRUTH_ANDIF_EXPR:
4105 /* Note that the operands of this must be ints
4106 and their values must be 0 or 1.
4107 ("true" is a fixed value perhaps depending on the language.) */
4108 /* If first arg is constant zero, return it. */
4109 if (integer_zerop (arg0))
4111 case TRUTH_AND_EXPR:
4112 /* If either arg is constant true, drop it. */
4113 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4114 return non_lvalue (arg1);
4115 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4116 return non_lvalue (arg0);
4117 /* If second arg is constant zero, result is zero, but first arg
4118 must be evaluated. */
4119 if (integer_zerop (arg1))
4120 return omit_one_operand (type, arg1, arg0);
4123 /* We only do these simplifications if we are optimizing. */
4127 /* Check for things like (A || B) && (A || C). We can convert this
4128 to A || (B && C). Note that either operator can be any of the four
4129 truth and/or operations and the transformation will still be
4130 valid. Also note that we only care about order for the
4131 ANDIF and ORIF operators. */
4132 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4133 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4134 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4135 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4136 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4138 tree a00 = TREE_OPERAND (arg0, 0);
4139 tree a01 = TREE_OPERAND (arg0, 1);
4140 tree a10 = TREE_OPERAND (arg1, 0);
4141 tree a11 = TREE_OPERAND (arg1, 1);
4142 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
4143 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
4144 && (code == TRUTH_AND_EXPR
4145 || code == TRUTH_OR_EXPR));
4147 if (operand_equal_p (a00, a10, 0))
4148 return fold (build (TREE_CODE (arg0), type, a00,
4149 fold (build (code, type, a01, a11))));
4150 else if (commutative && operand_equal_p (a00, a11, 0))
4151 return fold (build (TREE_CODE (arg0), type, a00,
4152 fold (build (code, type, a01, a10))));
4153 else if (commutative && operand_equal_p (a01, a10, 0))
4154 return fold (build (TREE_CODE (arg0), type, a01,
4155 fold (build (code, type, a00, a11))));
4157 /* This case if tricky because we must either have commutative
4158 operators or else A10 must not have side-effects. */
4160 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
4161 && operand_equal_p (a01, a11, 0))
4162 return fold (build (TREE_CODE (arg0), type,
4163 fold (build (code, type, a00, a10)),
4167 /* Check for the possibility of merging component references. If our
4168 lhs is another similar operation, try to merge its rhs with our
4169 rhs. Then try to merge our lhs and rhs. */
4170 if (TREE_CODE (arg0) == code
4171 && 0 != (tem = fold_truthop (code, type,
4172 TREE_OPERAND (arg0, 1), arg1)))
4173 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4175 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
4180 case TRUTH_ORIF_EXPR:
4181 /* Note that the operands of this must be ints
4182 and their values must be 0 or true.
4183 ("true" is a fixed value perhaps depending on the language.) */
4184 /* If first arg is constant true, return it. */
4185 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4188 /* If either arg is constant zero, drop it. */
4189 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4190 return non_lvalue (arg1);
4191 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4192 return non_lvalue (arg0);
4193 /* If second arg is constant true, result is true, but we must
4194 evaluate first arg. */
4195 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4196 return omit_one_operand (type, arg1, arg0);
4199 case TRUTH_XOR_EXPR:
4200 /* If either arg is constant zero, drop it. */
4201 if (integer_zerop (arg0))
4202 return non_lvalue (arg1);
4203 if (integer_zerop (arg1))
4204 return non_lvalue (arg0);
4205 /* If either arg is constant true, this is a logical inversion. */
4206 if (integer_onep (arg0))
4207 return non_lvalue (invert_truthvalue (arg1));
4208 if (integer_onep (arg1))
4209 return non_lvalue (invert_truthvalue (arg0));
4218 /* If one arg is a constant integer, put it last. */
4219 if (TREE_CODE (arg0) == INTEGER_CST
4220 && TREE_CODE (arg1) != INTEGER_CST)
4222 TREE_OPERAND (t, 0) = arg1;
4223 TREE_OPERAND (t, 1) = arg0;
4224 arg0 = TREE_OPERAND (t, 0);
4225 arg1 = TREE_OPERAND (t, 1);
4226 code = swap_tree_comparison (code);
4227 TREE_SET_CODE (t, code);
4230 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4231 First, see if one arg is constant; find the constant arg
4232 and the other one. */
4234 tree constop = 0, varop;
4237 if (TREE_CONSTANT (arg1))
4238 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
4239 if (TREE_CONSTANT (arg0))
4240 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
4242 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4244 /* This optimization is invalid for ordered comparisons
4245 if CONST+INCR overflows or if foo+incr might overflow.
4246 This optimization is invalid for floating point due to rounding.
4247 For pointer types we assume overflow doesn't happen. */
4248 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4249 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4250 && (code == EQ_EXPR || code == NE_EXPR)))
4253 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4254 constop, TREE_OPERAND (varop, 1)));
4255 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4256 *constoploc = newconst;
4260 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4262 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4263 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4264 && (code == EQ_EXPR || code == NE_EXPR)))
4267 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4268 constop, TREE_OPERAND (varop, 1)));
4269 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4270 *constoploc = newconst;
4276 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4277 if (TREE_CODE (arg1) == INTEGER_CST
4278 && TREE_CODE (arg0) != INTEGER_CST
4279 && tree_int_cst_sgn (arg1) > 0)
4281 switch (TREE_CODE (t))
4285 TREE_SET_CODE (t, code);
4286 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4287 TREE_OPERAND (t, 1) = arg1;
4292 TREE_SET_CODE (t, code);
4293 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4294 TREE_OPERAND (t, 1) = arg1;
4298 /* If this is an EQ or NE comparison with zero and ARG0 is
4299 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4300 two operations, but the latter can be done in one less insn
4301 one machine that have only two-operand insns or on which a
4302 constant cannot be the first operand. */
4303 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4304 && TREE_CODE (arg0) == BIT_AND_EXPR)
4306 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4307 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4309 fold (build (code, type,
4310 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4312 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4313 TREE_OPERAND (arg0, 1),
4314 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4315 convert (TREE_TYPE (arg0),
4318 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4319 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4321 fold (build (code, type,
4322 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4324 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4325 TREE_OPERAND (arg0, 0),
4326 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4327 convert (TREE_TYPE (arg0),
4332 /* If this is an NE or EQ comparison of zero against the result of a
4333 signed MOD operation whose second operand is a power of 2, make
4334 the MOD operation unsigned since it is simpler and equivalent. */
4335 if ((code == NE_EXPR || code == EQ_EXPR)
4336 && integer_zerop (arg1)
4337 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4338 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4339 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4340 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4341 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4342 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4344 tree newtype = unsigned_type (TREE_TYPE (arg0));
4345 tree newmod = build (TREE_CODE (arg0), newtype,
4346 convert (newtype, TREE_OPERAND (arg0, 0)),
4347 convert (newtype, TREE_OPERAND (arg0, 1)));
4349 return build (code, type, newmod, convert (newtype, arg1));
4352 /* If this is an NE comparison of zero with an AND of one, remove the
4353 comparison since the AND will give the correct value. */
4354 if (code == NE_EXPR && integer_zerop (arg1)
4355 && TREE_CODE (arg0) == BIT_AND_EXPR
4356 && integer_onep (TREE_OPERAND (arg0, 1)))
4357 return convert (type, arg0);
4359 /* If we have (A & C) == C where C is a power of 2, convert this into
4360 (A & C) != 0. Similarly for NE_EXPR. */
4361 if ((code == EQ_EXPR || code == NE_EXPR)
4362 && TREE_CODE (arg0) == BIT_AND_EXPR
4363 && integer_pow2p (TREE_OPERAND (arg0, 1))
4364 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4365 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4366 arg0, integer_zero_node);
4368 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
4369 and similarly for >= into !=. */
4370 if ((code == LT_EXPR || code == GE_EXPR)
4371 && TREE_UNSIGNED (TREE_TYPE (arg0))
4372 && TREE_CODE (arg1) == LSHIFT_EXPR
4373 && integer_onep (TREE_OPERAND (arg1, 0)))
4374 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4375 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4376 TREE_OPERAND (arg1, 1)),
4377 convert (TREE_TYPE (arg0), integer_zero_node));
4379 else if ((code == LT_EXPR || code == GE_EXPR)
4380 && TREE_UNSIGNED (TREE_TYPE (arg0))
4381 && (TREE_CODE (arg1) == NOP_EXPR
4382 || TREE_CODE (arg1) == CONVERT_EXPR)
4383 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
4384 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
4386 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4387 convert (TREE_TYPE (arg0),
4388 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4389 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
4390 convert (TREE_TYPE (arg0), integer_zero_node));
4392 /* Simplify comparison of something with itself. (For IEEE
4393 floating-point, we can only do some of these simplifications.) */
4394 if (operand_equal_p (arg0, arg1, 0))
4401 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4403 t = build_int_2 (1, 0);
4404 TREE_TYPE (t) = type;
4408 TREE_SET_CODE (t, code);
4412 /* For NE, we can only do this simplification if integer. */
4413 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4415 /* ... fall through ... */
4418 t = build_int_2 (0, 0);
4419 TREE_TYPE (t) = type;
4424 /* An unsigned comparison against 0 can be simplified. */
4425 if (integer_zerop (arg1)
4426 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4427 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4428 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4430 switch (TREE_CODE (t))
4434 TREE_SET_CODE (t, NE_EXPR);
4438 TREE_SET_CODE (t, EQ_EXPR);
4441 return omit_one_operand (type,
4442 convert (type, integer_one_node),
4445 return omit_one_operand (type,
4446 convert (type, integer_zero_node),
4451 /* If we are comparing an expression that just has comparisons
4452 of two integer values, arithmetic expressions of those comparisons,
4453 and constants, we can simplify it. There are only three cases
4454 to check: the two values can either be equal, the first can be
4455 greater, or the second can be greater. Fold the expression for
4456 those three values. Since each value must be 0 or 1, we have
4457 eight possibilities, each of which corresponds to the constant 0
4458 or 1 or one of the six possible comparisons.
4460 This handles common cases like (a > b) == 0 but also handles
4461 expressions like ((x > y) - (y > x)) > 0, which supposedly
4462 occur in macroized code. */
4464 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4466 tree cval1 = 0, cval2 = 0;
4469 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4470 /* Don't handle degenerate cases here; they should already
4471 have been handled anyway. */
4472 && cval1 != 0 && cval2 != 0
4473 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4474 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4475 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4476 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4477 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4479 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4480 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4482 /* We can't just pass T to eval_subst in case cval1 or cval2
4483 was the same as ARG1. */
4486 = fold (build (code, type,
4487 eval_subst (arg0, cval1, maxval, cval2, minval),
4490 = fold (build (code, type,
4491 eval_subst (arg0, cval1, maxval, cval2, maxval),
4494 = fold (build (code, type,
4495 eval_subst (arg0, cval1, minval, cval2, maxval),
4498 /* All three of these results should be 0 or 1. Confirm they
4499 are. Then use those values to select the proper code
4502 if ((integer_zerop (high_result)
4503 || integer_onep (high_result))
4504 && (integer_zerop (equal_result)
4505 || integer_onep (equal_result))
4506 && (integer_zerop (low_result)
4507 || integer_onep (low_result)))
4509 /* Make a 3-bit mask with the high-order bit being the
4510 value for `>', the next for '=', and the low for '<'. */
4511 switch ((integer_onep (high_result) * 4)
4512 + (integer_onep (equal_result) * 2)
4513 + integer_onep (low_result))
4517 return omit_one_operand (type, integer_zero_node, arg0);
4538 return omit_one_operand (type, integer_one_node, arg0);
4541 t = build (code, type, cval1, cval2);
4543 return save_expr (t);
4550 /* If this is a comparison of a field, we may be able to simplify it. */
4551 if ((TREE_CODE (arg0) == COMPONENT_REF
4552 || TREE_CODE (arg0) == BIT_FIELD_REF)
4553 && (code == EQ_EXPR || code == NE_EXPR)
4554 /* Handle the constant case even without -O
4555 to make sure the warnings are given. */
4556 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4558 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4562 /* If this is a comparison of complex values and either or both
4563 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4564 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4565 may prevent needless evaluations. */
4566 if ((code == EQ_EXPR || code == NE_EXPR)
4567 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4568 && (TREE_CODE (arg0) == COMPLEX_EXPR
4569 || TREE_CODE (arg1) == COMPLEX_EXPR))
4571 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4572 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4573 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4574 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4575 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4577 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4580 fold (build (code, type, real0, real1)),
4581 fold (build (code, type, imag0, imag1))));
4584 /* From here on, the only cases we handle are when the result is
4585 known to be a constant.
4587 To compute GT, swap the arguments and do LT.
4588 To compute GE, do LT and invert the result.
4589 To compute LE, swap the arguments, do LT and invert the result.
4590 To compute NE, do EQ and invert the result.
4592 Therefore, the code below must handle only EQ and LT. */
4594 if (code == LE_EXPR || code == GT_EXPR)
4596 tem = arg0, arg0 = arg1, arg1 = tem;
4597 code = swap_tree_comparison (code);
4600 /* Note that it is safe to invert for real values here because we
4601 will check below in the one case that it matters. */
4604 if (code == NE_EXPR || code == GE_EXPR)
4607 code = invert_tree_comparison (code);
4610 /* Compute a result for LT or EQ if args permit;
4611 otherwise return T. */
4612 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4614 if (code == EQ_EXPR)
4615 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4616 == TREE_INT_CST_LOW (arg1))
4617 && (TREE_INT_CST_HIGH (arg0)
4618 == TREE_INT_CST_HIGH (arg1)),
4621 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4622 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4623 : INT_CST_LT (arg0, arg1)),
4627 /* Assume a nonexplicit constant cannot equal an explicit one,
4628 since such code would be undefined anyway.
4629 Exception: on sysvr4, using #pragma weak,
4630 a label can come out as 0. */
4631 else if (TREE_CODE (arg1) == INTEGER_CST
4632 && !integer_zerop (arg1)
4633 && TREE_CONSTANT (arg0)
4634 && TREE_CODE (arg0) == ADDR_EXPR
4636 t1 = build_int_2 (0, 0);
4638 /* Two real constants can be compared explicitly. */
4639 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4641 /* If either operand is a NaN, the result is false with two
4642 exceptions: First, an NE_EXPR is true on NaNs, but that case
4643 is already handled correctly since we will be inverting the
4644 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4645 or a GE_EXPR into a LT_EXPR, we must return true so that it
4646 will be inverted into false. */
4648 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4649 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4650 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4652 else if (code == EQ_EXPR)
4653 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4654 TREE_REAL_CST (arg1)),
4657 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4658 TREE_REAL_CST (arg1)),
4662 if (t1 == NULL_TREE)
4666 TREE_INT_CST_LOW (t1) ^= 1;
4668 TREE_TYPE (t1) = type;
4672 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4673 so all simple results must be passed through pedantic_non_lvalue. */
4674 if (TREE_CODE (arg0) == INTEGER_CST)
4675 return pedantic_non_lvalue
4676 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4677 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4678 return pedantic_non_lvalue (omit_one_operand (type, arg1, arg0));
4680 /* If the second operand is zero, invert the comparison and swap
4681 the second and third operands. Likewise if the second operand
4682 is constant and the third is not or if the third operand is
4683 equivalent to the first operand of the comparison. */
4685 if (integer_zerop (arg1)
4686 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4687 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4688 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4689 TREE_OPERAND (t, 2),
4690 TREE_OPERAND (arg0, 1))))
4692 /* See if this can be inverted. If it can't, possibly because
4693 it was a floating-point inequality comparison, don't do
4695 tem = invert_truthvalue (arg0);
4697 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4699 arg0 = TREE_OPERAND (t, 0) = tem;
4700 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4701 TREE_OPERAND (t, 2) = arg1;
4702 arg1 = TREE_OPERAND (t, 1);
4706 /* If we have A op B ? A : C, we may be able to convert this to a
4707 simpler expression, depending on the operation and the values
4708 of B and C. IEEE floating point prevents this though,
4709 because A or B might be -0.0 or a NaN. */
4711 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4712 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4713 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4715 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4716 arg1, TREE_OPERAND (arg0, 1)))
4718 tree arg2 = TREE_OPERAND (t, 2);
4719 enum tree_code comp_code = TREE_CODE (arg0);
4721 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4722 depending on the comparison operation. */
4723 if (integer_zerop (TREE_OPERAND (arg0, 1))
4724 && TREE_CODE (arg2) == NEGATE_EXPR
4725 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4729 return pedantic_non_lvalue
4730 (fold (build1 (NEGATE_EXPR, type, arg1)));
4732 return pedantic_non_lvalue (convert (type, arg1));
4735 return pedantic_non_lvalue
4736 (fold (build1 (ABS_EXPR, type, arg1)));
4739 return pedantic_non_lvalue
4740 (fold (build1 (NEGATE_EXPR, type,
4741 fold (build1 (ABS_EXPR, type, arg1)))));
4744 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4747 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4749 if (comp_code == NE_EXPR)
4750 return pedantic_non_lvalue (convert (type, arg1));
4751 else if (comp_code == EQ_EXPR)
4752 return pedantic_non_lvalue (convert (type, integer_zero_node));
4755 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4756 or max (A, B), depending on the operation. */
4758 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4759 arg2, TREE_OPERAND (arg0, 0)))
4763 return pedantic_non_lvalue (convert (type, arg2));
4765 return pedantic_non_lvalue (convert (type, arg1));
4768 return pedantic_non_lvalue
4769 (fold (build (MIN_EXPR, type, arg1, arg2)));
4772 return pedantic_non_lvalue
4773 (fold (build (MAX_EXPR, type, arg1, arg2)));
4776 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4777 we might still be able to simplify this. For example,
4778 if C1 is one less or one more than C2, this might have started
4779 out as a MIN or MAX and been transformed by this function.
4780 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4782 if (INTEGRAL_TYPE_P (type)
4783 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4784 && TREE_CODE (arg2) == INTEGER_CST)
4788 /* We can replace A with C1 in this case. */
4789 arg1 = TREE_OPERAND (t, 1)
4790 = convert (type, TREE_OPERAND (arg0, 1));
4794 /* If C1 is C2 + 1, this is min(A, C2). */
4795 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4796 && operand_equal_p (TREE_OPERAND (arg0, 1),
4797 const_binop (PLUS_EXPR, arg2,
4798 integer_one_node, 0), 1))
4799 return pedantic_non_lvalue
4800 (fold (build (MIN_EXPR, type, arg1, arg2)));
4804 /* If C1 is C2 - 1, this is min(A, C2). */
4805 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4806 && operand_equal_p (TREE_OPERAND (arg0, 1),
4807 const_binop (MINUS_EXPR, arg2,
4808 integer_one_node, 0), 1))
4809 return pedantic_non_lvalue
4810 (fold (build (MIN_EXPR, type, arg1, arg2)));
4814 /* If C1 is C2 - 1, this is max(A, C2). */
4815 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4816 && operand_equal_p (TREE_OPERAND (arg0, 1),
4817 const_binop (MINUS_EXPR, arg2,
4818 integer_one_node, 0), 1))
4819 return pedantic_non_lvalue
4820 (fold (build (MAX_EXPR, type, arg1, arg2)));
4824 /* If C1 is C2 + 1, this is max(A, C2). */
4825 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4826 && operand_equal_p (TREE_OPERAND (arg0, 1),
4827 const_binop (PLUS_EXPR, arg2,
4828 integer_one_node, 0), 1))
4829 return pedantic_non_lvalue
4830 (fold (build (MAX_EXPR, type, arg1, arg2)));
4835 /* If the second operand is simpler than the third, swap them
4836 since that produces better jump optimization results. */
4837 if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
4838 || TREE_CODE (arg1) == SAVE_EXPR)
4839 && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
4840 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
4841 || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
4843 /* See if this can be inverted. If it can't, possibly because
4844 it was a floating-point inequality comparison, don't do
4846 tem = invert_truthvalue (arg0);
4848 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4850 arg0 = TREE_OPERAND (t, 0) = tem;
4851 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4852 TREE_OPERAND (t, 2) = arg1;
4853 arg1 = TREE_OPERAND (t, 1);
4857 /* Convert A ? 1 : 0 to simply A. */
4858 if (integer_onep (TREE_OPERAND (t, 1))
4859 && integer_zerop (TREE_OPERAND (t, 2))
4860 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4861 call to fold will try to move the conversion inside
4862 a COND, which will recurse. In that case, the COND_EXPR
4863 is probably the best choice, so leave it alone. */
4864 && type == TREE_TYPE (arg0))
4865 return pedantic_non_lvalue (arg0);
4867 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4868 operation is simply A & 2. */
4870 if (integer_zerop (TREE_OPERAND (t, 2))
4871 && TREE_CODE (arg0) == NE_EXPR
4872 && integer_zerop (TREE_OPERAND (arg0, 1))
4873 && integer_pow2p (arg1)
4874 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4875 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4877 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
4882 /* When pedantic, a compound expression can be neither an lvalue
4883 nor an integer constant expression. */
4884 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4886 /* Don't let (0, 0) be null pointer constant. */
4887 if (integer_zerop (arg1))
4888 return non_lvalue (arg1);
4893 return build_complex (arg0, arg1);
4897 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4899 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4900 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4901 TREE_OPERAND (arg0, 1));
4902 else if (TREE_CODE (arg0) == COMPLEX_CST)
4903 return TREE_REALPART (arg0);
4904 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4905 return fold (build (TREE_CODE (arg0), type,
4906 fold (build1 (REALPART_EXPR, type,
4907 TREE_OPERAND (arg0, 0))),
4908 fold (build1 (REALPART_EXPR,
4909 type, TREE_OPERAND (arg0, 1)))));
4913 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4914 return convert (type, integer_zero_node);
4915 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4916 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4917 TREE_OPERAND (arg0, 0));
4918 else if (TREE_CODE (arg0) == COMPLEX_CST)
4919 return TREE_IMAGPART (arg0);
4920 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4921 return fold (build (TREE_CODE (arg0), type,
4922 fold (build1 (IMAGPART_EXPR, type,
4923 TREE_OPERAND (arg0, 0))),
4924 fold (build1 (IMAGPART_EXPR, type,
4925 TREE_OPERAND (arg0, 1)))));
4930 } /* switch (code) */