1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ This file should be rewritten to use an arbitrary precision
21 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
22 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
23 @@ The routines that translate from the ap rep should
24 @@ warn if precision et. al. is lost.
25 @@ This would also make life easier when this technology is used
26 @@ for cross-compilers. */
29 /* The entry points in this file are fold, size_int and size_binop.
31 fold takes a tree as argument and returns a simplified tree.
33 size_binop takes a tree code for an arithmetic operation
34 and two operands that are trees, and produces a tree for the
35 result, assuming the type comes from `sizetype'.
37 size_int takes an integer value, and creates a tree constant
38 with type from `sizetype'. */
46 /* Handle floating overflow for `const_binop'. */
47 static jmp_buf float_error;
49 static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
50 static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
51 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
52 HOST_WIDE_INT, HOST_WIDE_INT,
53 HOST_WIDE_INT, HOST_WIDE_INT *,
54 HOST_WIDE_INT *, HOST_WIDE_INT *,
56 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
57 static tree const_binop PROTO((enum tree_code, tree, tree, int));
58 static tree fold_convert PROTO((tree, tree));
59 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
60 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
61 static int truth_value_p PROTO((enum tree_code));
62 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
63 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
64 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
65 static tree omit_one_operand PROTO((tree, tree, tree));
66 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
67 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
68 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
70 static tree decode_field_reference PROTO((tree, int *, int *,
71 enum machine_mode *, int *,
73 static int all_ones_mask_p PROTO((tree, int));
74 static int simple_operand_p PROTO((tree));
75 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
76 enum tree_code, tree, tree, tree));
77 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
78 static tree strip_compound_expr PROTO((tree, tree));
84 /* Yield nonzero if a signed left shift of A by B bits overflows. */
85 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
87 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
88 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
89 Then this yields nonzero if overflow occurred during the addition.
90 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
91 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
92 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
94 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
95 We do that by representing the two-word integer in 4 words, with only
96 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
99 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
100 #define HIGHPART(x) \
101 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
102 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
104 /* Unpack a two-word integer into 4 words.
105 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
106 WORDS points to the array of HOST_WIDE_INTs. */
109 encode (words, low, hi)
110 HOST_WIDE_INT *words;
111 HOST_WIDE_INT low, hi;
113 words[0] = LOWPART (low);
114 words[1] = HIGHPART (low);
115 words[2] = LOWPART (hi);
116 words[3] = HIGHPART (hi);
119 /* Pack an array of 4 words into a two-word integer.
120 WORDS points to the array of words.
121 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
124 decode (words, low, hi)
125 HOST_WIDE_INT *words;
126 HOST_WIDE_INT *low, *hi;
128 *low = words[0] | words[1] * BASE;
129 *hi = words[2] | words[3] * BASE;
132 /* Make the integer constant T valid for its type
133 by setting to 0 or 1 all the bits in the constant
134 that don't belong in the type.
135 Yield 1 if a signed overflow occurs, 0 otherwise.
136 If OVERFLOW is nonzero, a signed overflow has already occurred
137 in calculating T, so propagate it.
139 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
143 force_fit_type (t, overflow)
147 HOST_WIDE_INT low, high;
150 if (TREE_CODE (t) == REAL_CST)
152 #ifdef CHECK_FLOAT_VALUE
153 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
159 else if (TREE_CODE (t) != INTEGER_CST)
162 low = TREE_INT_CST_LOW (t);
163 high = TREE_INT_CST_HIGH (t);
165 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
168 prec = TYPE_PRECISION (TREE_TYPE (t));
170 /* First clear all bits that are beyond the type's precision. */
172 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
174 else if (prec > HOST_BITS_PER_WIDE_INT)
176 TREE_INT_CST_HIGH (t)
177 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
181 TREE_INT_CST_HIGH (t) = 0;
182 if (prec < HOST_BITS_PER_WIDE_INT)
183 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
186 /* Unsigned types do not suffer sign extension or overflow. */
187 if (TREE_UNSIGNED (TREE_TYPE (t)))
190 /* If the value's sign bit is set, extend the sign. */
191 if (prec != 2 * HOST_BITS_PER_WIDE_INT
192 && (prec > HOST_BITS_PER_WIDE_INT
193 ? (TREE_INT_CST_HIGH (t)
194 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
195 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
197 /* Value is negative:
198 set to 1 all the bits that are outside this type's precision. */
199 if (prec > HOST_BITS_PER_WIDE_INT)
201 TREE_INT_CST_HIGH (t)
202 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
206 TREE_INT_CST_HIGH (t) = -1;
207 if (prec < HOST_BITS_PER_WIDE_INT)
208 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
212 /* Yield nonzero if signed overflow occurred. */
214 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
218 /* Add two doubleword integers with doubleword result.
219 Each argument is given as two `HOST_WIDE_INT' pieces.
220 One argument is L1 and H1; the other, L2 and H2.
221 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
224 add_double (l1, h1, l2, h2, lv, hv)
225 HOST_WIDE_INT l1, h1, l2, h2;
226 HOST_WIDE_INT *lv, *hv;
231 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
235 return overflow_sum_sign (h1, h2, h);
238 /* Negate a doubleword integer with doubleword result.
239 Return nonzero if the operation overflows, assuming it's signed.
240 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
241 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
244 neg_double (l1, h1, lv, hv)
245 HOST_WIDE_INT l1, h1;
246 HOST_WIDE_INT *lv, *hv;
252 return (*hv & h1) < 0;
262 /* Multiply two doubleword integers with doubleword result.
263 Return nonzero if the operation overflows, assuming it's signed.
264 Each argument is given as two `HOST_WIDE_INT' pieces.
265 One argument is L1 and H1; the other, L2 and H2.
266 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
269 mul_double (l1, h1, l2, h2, lv, hv)
270 HOST_WIDE_INT l1, h1, l2, h2;
271 HOST_WIDE_INT *lv, *hv;
273 HOST_WIDE_INT arg1[4];
274 HOST_WIDE_INT arg2[4];
275 HOST_WIDE_INT prod[4 * 2];
276 register unsigned HOST_WIDE_INT carry;
277 register int i, j, k;
278 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
280 encode (arg1, l1, h1);
281 encode (arg2, l2, h2);
283 bzero ((char *) prod, sizeof prod);
285 for (i = 0; i < 4; i++)
288 for (j = 0; j < 4; j++)
291 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
292 carry += arg1[i] * arg2[j];
293 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
295 prod[k] = LOWPART (carry);
296 carry = HIGHPART (carry);
301 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
303 /* Check for overflow by calculating the top half of the answer in full;
304 it should agree with the low half's sign bit. */
305 decode (prod+4, &toplow, &tophigh);
308 neg_double (l2, h2, &neglow, &neghigh);
309 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
313 neg_double (l1, h1, &neglow, &neghigh);
314 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
316 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
319 /* Shift the doubleword integer in L1, H1 left by COUNT places
320 keeping only PREC bits of result.
321 Shift right if COUNT is negative.
322 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
323 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
326 lshift_double (l1, h1, count, prec, lv, hv, arith)
327 HOST_WIDE_INT l1, h1, count;
329 HOST_WIDE_INT *lv, *hv;
334 rshift_double (l1, h1, - count, prec, lv, hv, arith);
339 count = (unsigned HOST_WIDE_INT) count & prec;
341 if (count >= HOST_BITS_PER_WIDE_INT)
343 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
348 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
349 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
350 *lv = (unsigned HOST_WIDE_INT) l1 << count;
354 /* Shift the doubleword integer in L1, H1 right by COUNT places
355 keeping only PREC bits of result. COUNT must be positive.
356 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
357 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
360 rshift_double (l1, h1, count, prec, lv, hv, arith)
361 HOST_WIDE_INT l1, h1, count;
363 HOST_WIDE_INT *lv, *hv;
366 unsigned HOST_WIDE_INT signmask;
368 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
372 count = (unsigned HOST_WIDE_INT) count % prec;
374 if (count >= HOST_BITS_PER_WIDE_INT)
377 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
378 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
382 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
383 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
384 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
385 | ((unsigned HOST_WIDE_INT) h1 >> count));
389 /* Rotate the doubleword integer in L1, H1 left by COUNT places
390 keeping only PREC bits of result.
391 Rotate right if COUNT is negative.
392 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
395 lrotate_double (l1, h1, count, prec, lv, hv)
396 HOST_WIDE_INT l1, h1, count;
398 HOST_WIDE_INT *lv, *hv;
400 HOST_WIDE_INT arg1[4];
406 rrotate_double (l1, h1, - count, prec, lv, hv);
410 encode (arg1, l1, h1);
415 carry = arg1[4 - 1] >> 16 - 1;
418 for (i = 0; i < 4; i++)
420 carry += arg1[i] << 1;
421 arg1[i] = LOWPART (carry);
422 carry = HIGHPART (carry);
427 decode (arg1, lv, hv);
430 /* Rotate the doubleword integer in L1, H1 left by COUNT places
431 keeping only PREC bits of result. COUNT must be positive.
432 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
435 rrotate_double (l1, h1, count, prec, lv, hv)
436 HOST_WIDE_INT l1, h1, count;
438 HOST_WIDE_INT *lv, *hv;
440 HOST_WIDE_INT arg1[4];
444 encode (arg1, l1, h1);
452 for (i = 4 - 1; i >= 0; i--)
456 arg1[i] = LOWPART (carry >> 1);
461 decode (arg1, lv, hv);
464 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
465 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
466 CODE is a tree code for a kind of division, one of
467 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
469 It controls how the quotient is rounded to a integer.
470 Return nonzero if the operation overflows.
471 UNS nonzero says do unsigned division. */
474 div_and_round_double (code, uns,
475 lnum_orig, hnum_orig, lden_orig, hden_orig,
476 lquo, hquo, lrem, hrem)
479 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
480 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
481 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
484 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
485 HOST_WIDE_INT den[4], quo[4];
487 unsigned HOST_WIDE_INT work;
488 register int carry = 0;
489 HOST_WIDE_INT lnum = lnum_orig;
490 HOST_WIDE_INT hnum = hnum_orig;
491 HOST_WIDE_INT lden = lden_orig;
492 HOST_WIDE_INT hden = hden_orig;
495 if ((hden == 0) && (lden == 0))
498 /* calculate quotient sign and convert operands to unsigned. */
504 /* (minimum integer) / (-1) is the only overflow case. */
505 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
511 neg_double (lden, hden, &lden, &hden);
515 if (hnum == 0 && hden == 0)
516 { /* single precision */
518 /* This unsigned division rounds toward zero. */
519 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
524 { /* trivial case: dividend < divisor */
525 /* hden != 0 already checked. */
532 bzero ((char *) quo, sizeof quo);
534 bzero ((char *) num, sizeof num); /* to zero 9th element */
535 bzero ((char *) den, sizeof den);
537 encode (num, lnum, hnum);
538 encode (den, lden, hden);
540 /* Special code for when the divisor < BASE. */
541 if (hden == 0 && lden < BASE)
543 /* hnum != 0 already checked. */
544 for (i = 4 - 1; i >= 0; i--)
546 work = num[i] + carry * BASE;
547 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
548 carry = work % (unsigned HOST_WIDE_INT) lden;
553 /* Full double precision division,
554 with thanks to Don Knuth's "Seminumerical Algorithms". */
555 int quo_est, scale, num_hi_sig, den_hi_sig;
557 /* Find the highest non-zero divisor digit. */
558 for (i = 4 - 1; ; i--)
564 /* Insure that the first digit of the divisor is at least BASE/2.
565 This is required by the quotient digit estimation algorithm. */
567 scale = BASE / (den[den_hi_sig] + 1);
568 if (scale > 1) { /* scale divisor and dividend */
570 for (i = 0; i <= 4 - 1; i++) {
571 work = (num[i] * scale) + carry;
572 num[i] = LOWPART (work);
573 carry = HIGHPART (work);
576 for (i = 0; i <= 4 - 1; i++) {
577 work = (den[i] * scale) + carry;
578 den[i] = LOWPART (work);
579 carry = HIGHPART (work);
580 if (den[i] != 0) den_hi_sig = i;
587 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
588 /* guess the next quotient digit, quo_est, by dividing the first
589 two remaining dividend digits by the high order quotient digit.
590 quo_est is never low and is at most 2 high. */
591 unsigned HOST_WIDE_INT tmp;
593 num_hi_sig = i + den_hi_sig + 1;
594 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
595 if (num[num_hi_sig] != den[den_hi_sig])
596 quo_est = work / den[den_hi_sig];
600 /* refine quo_est so it's usually correct, and at most one high. */
601 tmp = work - quo_est * den[den_hi_sig];
603 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
606 /* Try QUO_EST as the quotient digit, by multiplying the
607 divisor by QUO_EST and subtracting from the remaining dividend.
608 Keep in mind that QUO_EST is the I - 1st digit. */
611 for (j = 0; j <= den_hi_sig; j++)
613 work = quo_est * den[j] + carry;
614 carry = HIGHPART (work);
615 work = num[i + j] - LOWPART (work);
616 num[i + j] = LOWPART (work);
617 carry += HIGHPART (work) != 0;
620 /* if quo_est was high by one, then num[i] went negative and
621 we need to correct things. */
623 if (num[num_hi_sig] < carry)
626 carry = 0; /* add divisor back in */
627 for (j = 0; j <= den_hi_sig; j++)
629 work = num[i + j] + den[j] + carry;
630 carry = HIGHPART (work);
631 num[i + j] = LOWPART (work);
633 num [num_hi_sig] += carry;
636 /* store the quotient digit. */
641 decode (quo, lquo, hquo);
644 /* if result is negative, make it so. */
646 neg_double (*lquo, *hquo, lquo, hquo);
648 /* compute trial remainder: rem = num - (quo * den) */
649 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
650 neg_double (*lrem, *hrem, lrem, hrem);
651 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
656 case TRUNC_MOD_EXPR: /* round toward zero */
657 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
661 case FLOOR_MOD_EXPR: /* round toward negative infinity */
662 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
665 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
668 else return overflow;
672 case CEIL_MOD_EXPR: /* round toward positive infinity */
673 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
675 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
678 else return overflow;
682 case ROUND_MOD_EXPR: /* round to closest integer */
684 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
685 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
687 /* get absolute values */
688 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
689 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
691 /* if (2 * abs (lrem) >= abs (lden)) */
692 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
693 labs_rem, habs_rem, <wice, &htwice);
694 if (((unsigned HOST_WIDE_INT) habs_den
695 < (unsigned HOST_WIDE_INT) htwice)
696 || (((unsigned HOST_WIDE_INT) habs_den
697 == (unsigned HOST_WIDE_INT) htwice)
698 && ((HOST_WIDE_INT unsigned) labs_den
699 < (unsigned HOST_WIDE_INT) ltwice)))
703 add_double (*lquo, *hquo,
704 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
707 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
710 else return overflow;
718 /* compute true remainder: rem = num - (quo * den) */
719 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
720 neg_double (*lrem, *hrem, lrem, hrem);
721 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
725 #ifndef REAL_ARITHMETIC
726 /* Effectively truncate a real value to represent the nearest possible value
727 in a narrower mode. The result is actually represented in the same data
728 type as the argument, but its value is usually different.
730 A trap may occur during the FP operations and it is the responsibility
731 of the calling function to have a handler established. */
734 real_value_truncate (mode, arg)
735 enum machine_mode mode;
738 return REAL_VALUE_TRUNCATE (mode, arg);
741 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
743 /* Check for infinity in an IEEE double precision number. */
749 /* The IEEE 64-bit double format. */
754 unsigned exponent : 11;
755 unsigned mantissa1 : 20;
760 unsigned mantissa1 : 20;
761 unsigned exponent : 11;
767 if (u.big_endian.sign == 1)
770 return (u.big_endian.exponent == 2047
771 && u.big_endian.mantissa1 == 0
772 && u.big_endian.mantissa2 == 0);
777 return (u.little_endian.exponent == 2047
778 && u.little_endian.mantissa1 == 0
779 && u.little_endian.mantissa2 == 0);
783 /* Check whether an IEEE double precision number is a NaN. */
789 /* The IEEE 64-bit double format. */
794 unsigned exponent : 11;
795 unsigned mantissa1 : 20;
800 unsigned mantissa1 : 20;
801 unsigned exponent : 11;
807 if (u.big_endian.sign == 1)
810 return (u.big_endian.exponent == 2047
811 && (u.big_endian.mantissa1 != 0
812 || u.big_endian.mantissa2 != 0));
817 return (u.little_endian.exponent == 2047
818 && (u.little_endian.mantissa1 != 0
819 || u.little_endian.mantissa2 != 0));
823 /* Check for a negative IEEE double precision number. */
829 /* The IEEE 64-bit double format. */
834 unsigned exponent : 11;
835 unsigned mantissa1 : 20;
840 unsigned mantissa1 : 20;
841 unsigned exponent : 11;
847 if (u.big_endian.sign == 1)
850 return u.big_endian.sign;
855 return u.little_endian.sign;
858 #else /* Target not IEEE */
860 /* Let's assume other float formats don't have infinity.
861 (This can be overridden by redefining REAL_VALUE_ISINF.) */
869 /* Let's assume other float formats don't have NaNs.
870 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
878 /* Let's assume other float formats don't have minus zero.
879 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
886 #endif /* Target not IEEE */
887 #endif /* no REAL_ARITHMETIC */
889 /* Split a tree IN into a constant and a variable part
890 that could be combined with CODE to make IN.
891 CODE must be a commutative arithmetic operation.
892 Store the constant part into *CONP and the variable in &VARP.
893 Return 1 if this was done; zero means the tree IN did not decompose
896 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
897 Therefore, we must tell the caller whether the variable part
898 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
899 The value stored is the coefficient for the variable term.
900 The constant term we return should always be added;
901 we negate it if necessary. */
904 split_tree (in, code, varp, conp, varsignp)
910 register tree outtype = TREE_TYPE (in);
914 /* Strip any conversions that don't change the machine mode. */
915 while ((TREE_CODE (in) == NOP_EXPR
916 || TREE_CODE (in) == CONVERT_EXPR)
917 && (TYPE_MODE (TREE_TYPE (in))
918 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
919 in = TREE_OPERAND (in, 0);
921 if (TREE_CODE (in) == code
922 || (! FLOAT_TYPE_P (TREE_TYPE (in))
923 /* We can associate addition and subtraction together
924 (even though the C standard doesn't say so)
925 for integers because the value is not affected.
926 For reals, the value might be affected, so we can't. */
927 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
928 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
930 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
931 if (code == INTEGER_CST)
933 *conp = TREE_OPERAND (in, 0);
934 *varp = TREE_OPERAND (in, 1);
935 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
936 && TREE_TYPE (*varp) != outtype)
937 *varp = convert (outtype, *varp);
938 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
941 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
943 *conp = TREE_OPERAND (in, 1);
944 *varp = TREE_OPERAND (in, 0);
946 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
947 && TREE_TYPE (*varp) != outtype)
948 *varp = convert (outtype, *varp);
949 if (TREE_CODE (in) == MINUS_EXPR)
951 /* If operation is subtraction and constant is second,
952 must negate it to get an additive constant.
953 And this cannot be done unless it is a manifest constant.
954 It could also be the address of a static variable.
955 We cannot negate that, so give up. */
956 if (TREE_CODE (*conp) == INTEGER_CST)
957 /* Subtracting from integer_zero_node loses for long long. */
958 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
964 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
966 *conp = TREE_OPERAND (in, 0);
967 *varp = TREE_OPERAND (in, 1);
968 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
969 && TREE_TYPE (*varp) != outtype)
970 *varp = convert (outtype, *varp);
971 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
978 /* Combine two constants NUM and ARG2 under operation CODE
979 to produce a new constant.
980 We assume ARG1 and ARG2 have the same data type,
981 or at least are the same kind of constant and the same machine mode.
983 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
986 const_binop (code, arg1, arg2, notrunc)
988 register tree arg1, arg2;
991 if (TREE_CODE (arg1) == INTEGER_CST)
993 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
994 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
995 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
996 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
997 HOST_WIDE_INT low, hi;
998 HOST_WIDE_INT garbagel, garbageh;
1000 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1006 t = build_int_2 (int1l | int2l, int1h | int2h);
1010 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1014 t = build_int_2 (int1l & int2l, int1h & int2h);
1017 case BIT_ANDTC_EXPR:
1018 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1024 /* It's unclear from the C standard whether shifts can overflow.
1025 The following code ignores overflow; perhaps a C standard
1026 interpretation ruling is needed. */
1027 lshift_double (int1l, int1h, int2l,
1028 TYPE_PRECISION (TREE_TYPE (arg1)),
1031 t = build_int_2 (low, hi);
1032 TREE_TYPE (t) = TREE_TYPE (arg1);
1034 force_fit_type (t, 0);
1035 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1036 TREE_CONSTANT_OVERFLOW (t)
1037 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1043 lrotate_double (int1l, int1h, int2l,
1044 TYPE_PRECISION (TREE_TYPE (arg1)),
1046 t = build_int_2 (low, hi);
1053 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1056 overflow = int2h < hi;
1058 t = build_int_2 (int2l, int2h);
1064 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1067 overflow = int1h < hi;
1069 t = build_int_2 (int1l, int1h);
1072 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1073 t = build_int_2 (low, hi);
1077 if (int2h == 0 && int2l == 0)
1079 t = build_int_2 (int1l, int1h);
1082 neg_double (int2l, int2h, &low, &hi);
1083 add_double (int1l, int1h, low, hi, &low, &hi);
1084 overflow = overflow_sum_sign (hi, int2h, int1h);
1085 t = build_int_2 (low, hi);
1089 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1090 t = build_int_2 (low, hi);
1093 case TRUNC_DIV_EXPR:
1094 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1095 case EXACT_DIV_EXPR:
1096 /* This is a shortcut for a common special case.
1097 It reduces the number of tree nodes generated
1099 if (int2h == 0 && int2l > 0
1100 && TREE_TYPE (arg1) == sizetype
1101 && int1h == 0 && int1l >= 0)
1103 if (code == CEIL_DIV_EXPR)
1105 return size_int (int1l / int2l);
1107 case ROUND_DIV_EXPR:
1108 if (int2h == 0 && int2l == 1)
1110 t = build_int_2 (int1l, int1h);
1113 if (int1l == int2l && int1h == int2h)
1115 if ((int1l | int1h) == 0)
1117 t = build_int_2 (1, 0);
1120 overflow = div_and_round_double (code, uns,
1121 int1l, int1h, int2l, int2h,
1122 &low, &hi, &garbagel, &garbageh);
1123 t = build_int_2 (low, hi);
1126 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1127 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1128 overflow = div_and_round_double (code, uns,
1129 int1l, int1h, int2l, int2h,
1130 &garbagel, &garbageh, &low, &hi);
1131 t = build_int_2 (low, hi);
1138 low = (((unsigned HOST_WIDE_INT) int1h
1139 < (unsigned HOST_WIDE_INT) int2h)
1140 || (((unsigned HOST_WIDE_INT) int1h
1141 == (unsigned HOST_WIDE_INT) int2h)
1142 && ((unsigned HOST_WIDE_INT) int1l
1143 < (unsigned HOST_WIDE_INT) int2l)));
1147 low = ((int1h < int2h)
1148 || ((int1h == int2h)
1149 && ((unsigned HOST_WIDE_INT) int1l
1150 < (unsigned HOST_WIDE_INT) int2l)));
1152 if (low == (code == MIN_EXPR))
1153 t = build_int_2 (int1l, int1h);
1155 t = build_int_2 (int2l, int2h);
1162 TREE_TYPE (t) = TREE_TYPE (arg1);
1164 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1165 | TREE_OVERFLOW (arg1)
1166 | TREE_OVERFLOW (arg2));
1167 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1168 | TREE_CONSTANT_OVERFLOW (arg1)
1169 | TREE_CONSTANT_OVERFLOW (arg2));
1172 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1173 if (TREE_CODE (arg1) == REAL_CST)
1178 REAL_VALUE_TYPE value;
1181 d1 = TREE_REAL_CST (arg1);
1182 d2 = TREE_REAL_CST (arg2);
1184 /* If either operand is a NaN, just return it. Otherwise, set up
1185 for floating-point trap; we return an overflow. */
1186 if (REAL_VALUE_ISNAN (d1))
1188 else if (REAL_VALUE_ISNAN (d2))
1190 else if (setjmp (float_error))
1192 t = copy_node (arg1);
1197 set_float_handler (float_error);
1199 #ifdef REAL_ARITHMETIC
1200 REAL_ARITHMETIC (value, code, d1, d2);
1217 #ifndef REAL_INFINITY
1226 value = MIN (d1, d2);
1230 value = MAX (d1, d2);
1236 #endif /* no REAL_ARITHMETIC */
1237 t = build_real (TREE_TYPE (arg1),
1238 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1240 set_float_handler (NULL_PTR);
1243 = (force_fit_type (t, overflow)
1244 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1245 TREE_CONSTANT_OVERFLOW (t)
1247 | TREE_CONSTANT_OVERFLOW (arg1)
1248 | TREE_CONSTANT_OVERFLOW (arg2);
1251 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1252 if (TREE_CODE (arg1) == COMPLEX_CST)
1254 register tree r1 = TREE_REALPART (arg1);
1255 register tree i1 = TREE_IMAGPART (arg1);
1256 register tree r2 = TREE_REALPART (arg2);
1257 register tree i2 = TREE_IMAGPART (arg2);
1263 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1264 const_binop (PLUS_EXPR, i1, i2, notrunc));
1268 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1269 const_binop (MINUS_EXPR, i1, i2, notrunc));
1273 t = build_complex (const_binop (MINUS_EXPR,
1274 const_binop (MULT_EXPR,
1276 const_binop (MULT_EXPR,
1279 const_binop (PLUS_EXPR,
1280 const_binop (MULT_EXPR,
1282 const_binop (MULT_EXPR,
1289 register tree magsquared
1290 = const_binop (PLUS_EXPR,
1291 const_binop (MULT_EXPR, r2, r2, notrunc),
1292 const_binop (MULT_EXPR, i2, i2, notrunc),
1296 (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1297 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1298 const_binop (PLUS_EXPR,
1299 const_binop (MULT_EXPR, r1, r2,
1301 const_binop (MULT_EXPR, i1, i2,
1304 magsquared, notrunc),
1305 const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1306 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1307 const_binop (MINUS_EXPR,
1308 const_binop (MULT_EXPR, i1, r2,
1310 const_binop (MULT_EXPR, r1, i2,
1313 magsquared, notrunc));
1320 TREE_TYPE (t) = TREE_TYPE (arg1);
1326 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1330 unsigned int number;
1333 /* Type-size nodes already made for small sizes. */
1334 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1336 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1337 && size_table[number] != 0)
1338 return size_table[number];
1339 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1341 push_obstacks_nochange ();
1342 /* Make this a permanent node. */
1343 end_temporary_allocation ();
1344 t = build_int_2 (number, 0);
1345 TREE_TYPE (t) = sizetype;
1346 size_table[number] = t;
1351 t = build_int_2 (number, 0);
1352 TREE_TYPE (t) = sizetype;
1357 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1358 CODE is a tree code. Data type is taken from `sizetype',
1359 If the operands are constant, so is the result. */
1362 size_binop (code, arg0, arg1)
1363 enum tree_code code;
1366 /* Handle the special case of two integer constants faster. */
1367 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1369 /* And some specific cases even faster than that. */
1370 if (code == PLUS_EXPR
1371 && TREE_INT_CST_LOW (arg0) == 0
1372 && TREE_INT_CST_HIGH (arg0) == 0)
1374 if (code == MINUS_EXPR
1375 && TREE_INT_CST_LOW (arg1) == 0
1376 && TREE_INT_CST_HIGH (arg1) == 0)
1378 if (code == MULT_EXPR
1379 && TREE_INT_CST_LOW (arg0) == 1
1380 && TREE_INT_CST_HIGH (arg0) == 0)
1382 /* Handle general case of two integer constants. */
1383 return const_binop (code, arg0, arg1, 1);
1386 if (arg0 == error_mark_node || arg1 == error_mark_node)
1387 return error_mark_node;
1389 return fold (build (code, sizetype, arg0, arg1));
1392 /* Given T, a tree representing type conversion of ARG1, a constant,
1393 return a constant tree representing the result of conversion. */
1396 fold_convert (t, arg1)
1400 register tree type = TREE_TYPE (t);
1403 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1405 if (TREE_CODE (arg1) == INTEGER_CST)
1407 /* If we would build a constant wider than GCC supports,
1408 leave the conversion unfolded. */
1409 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1412 /* Given an integer constant, make new constant with new type,
1413 appropriately sign-extended or truncated. */
1414 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1415 TREE_INT_CST_HIGH (arg1));
1416 TREE_TYPE (t) = type;
1417 /* Indicate an overflow if (1) ARG1 already overflowed,
1418 or (2) force_fit_type indicates an overflow.
1419 Tell force_fit_type that an overflow has already occurred
1420 if ARG1 is a too-large unsigned value and T is signed. */
1422 = (TREE_OVERFLOW (arg1)
1423 | force_fit_type (t,
1424 (TREE_INT_CST_HIGH (arg1) < 0
1425 & (TREE_UNSIGNED (type)
1426 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1427 TREE_CONSTANT_OVERFLOW (t)
1428 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1430 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1431 else if (TREE_CODE (arg1) == REAL_CST)
1433 /* Don't initialize these, use assignments.
1434 Initialized local aggregates don't work on old compilers. */
1439 x = TREE_REAL_CST (arg1);
1440 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1441 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1442 /* See if X will be in range after truncation towards 0.
1443 To compensate for truncation, move the bounds away from 0,
1444 but reject if X exactly equals the adjusted bounds. */
1445 #ifdef REAL_ARITHMETIC
1446 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1447 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1452 /* If X is a NaN, use zero instead and show we have an overflow.
1453 Otherwise, range check. */
1454 if (REAL_VALUE_ISNAN (x))
1455 overflow = 1, x = dconst0;
1456 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1459 #ifndef REAL_ARITHMETIC
1461 HOST_WIDE_INT low, high;
1462 HOST_WIDE_INT half_word
1463 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1468 high = (HOST_WIDE_INT) (x / half_word / half_word);
1469 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1470 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1472 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1473 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1476 low = (HOST_WIDE_INT) x;
1477 if (TREE_REAL_CST (arg1) < 0)
1478 neg_double (low, high, &low, &high);
1479 t = build_int_2 (low, high);
1483 HOST_WIDE_INT low, high;
1484 REAL_VALUE_TO_INT (&low, &high, x);
1485 t = build_int_2 (low, high);
1488 TREE_TYPE (t) = type;
1490 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1491 TREE_CONSTANT_OVERFLOW (t)
1492 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1494 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1495 TREE_TYPE (t) = type;
1497 else if (TREE_CODE (type) == REAL_TYPE)
1499 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1500 if (TREE_CODE (arg1) == INTEGER_CST)
1501 return build_real_from_int_cst (type, arg1);
1502 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1503 if (TREE_CODE (arg1) == REAL_CST)
1505 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1507 else if (setjmp (float_error))
1510 t = copy_node (arg1);
1513 set_float_handler (float_error);
1515 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1516 TREE_REAL_CST (arg1)));
1517 set_float_handler (NULL_PTR);
1521 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1522 TREE_CONSTANT_OVERFLOW (t)
1523 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1527 TREE_CONSTANT (t) = 1;
1531 /* Return an expr equal to X but certainly not valid as an lvalue.
1532 Also make sure it is not valid as an null pointer constant. */
1540 /* These things are certainly not lvalues. */
1541 if (TREE_CODE (x) == NON_LVALUE_EXPR
1542 || TREE_CODE (x) == INTEGER_CST
1543 || TREE_CODE (x) == REAL_CST
1544 || TREE_CODE (x) == STRING_CST
1545 || TREE_CODE (x) == ADDR_EXPR)
1547 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1549 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1550 so convert_for_assignment won't strip it.
1551 This is so this 0 won't be treated as a null pointer constant. */
1552 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1553 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1559 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1560 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1564 /* When pedantic, return an expr equal to X but certainly not valid as a
1565 pedantic lvalue. Otherwise, return X. */
1568 pedantic_non_lvalue (x)
1572 return non_lvalue (x);
1577 /* Given a tree comparison code, return the code that is the logical inverse
1578 of the given code. It is not safe to do this for floating-point
1579 comparisons, except for NE_EXPR and EQ_EXPR. */
1581 static enum tree_code
1582 invert_tree_comparison (code)
1583 enum tree_code code;
1604 /* Similar, but return the comparison that results if the operands are
1605 swapped. This is safe for floating-point. */
1607 static enum tree_code
1608 swap_tree_comparison (code)
1609 enum tree_code code;
1629 /* Return nonzero if CODE is a tree code that represents a truth value. */
1632 truth_value_p (code)
1633 enum tree_code code;
1635 return (TREE_CODE_CLASS (code) == '<'
1636 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1637 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1638 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1641 /* Return nonzero if two operands are necessarily equal.
1642 If ONLY_CONST is non-zero, only return non-zero for constants.
1643 This function tests whether the operands are indistinguishable;
1644 it does not test whether they are equal using C's == operation.
1645 The distinction is important for IEEE floating point, because
1646 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1647 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1650 operand_equal_p (arg0, arg1, only_const)
1654 /* If both types don't have the same signedness, then we can't consider
1655 them equal. We must check this before the STRIP_NOPS calls
1656 because they may change the signedness of the arguments. */
1657 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1663 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1664 We don't care about side effects in that case because the SAVE_EXPR
1665 takes care of that for us. */
1666 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1667 return ! only_const;
1669 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1672 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1673 && TREE_CODE (arg0) == ADDR_EXPR
1674 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1677 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1678 && TREE_CODE (arg0) == INTEGER_CST
1679 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1680 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1683 /* Detect when real constants are equal. */
1684 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1685 && TREE_CODE (arg0) == REAL_CST)
1686 return !bcmp ((char *) &TREE_REAL_CST (arg0),
1687 (char *) &TREE_REAL_CST (arg1),
1688 sizeof (REAL_VALUE_TYPE));
1696 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1698 /* This is needed for conversions and for COMPONENT_REF.
1699 Might as well play it safe and always test this. */
1700 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1703 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1706 /* Two conversions are equal only if signedness and modes match. */
1707 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1708 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1709 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1712 return operand_equal_p (TREE_OPERAND (arg0, 0),
1713 TREE_OPERAND (arg1, 0), 0);
1717 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1718 TREE_OPERAND (arg1, 0), 0)
1719 && operand_equal_p (TREE_OPERAND (arg0, 1),
1720 TREE_OPERAND (arg1, 1), 0));
1723 switch (TREE_CODE (arg0))
1726 return operand_equal_p (TREE_OPERAND (arg0, 0),
1727 TREE_OPERAND (arg1, 0), 0);
1731 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1732 TREE_OPERAND (arg1, 0), 0)
1733 && operand_equal_p (TREE_OPERAND (arg0, 1),
1734 TREE_OPERAND (arg1, 1), 0));
1737 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1738 TREE_OPERAND (arg1, 0), 0)
1739 && operand_equal_p (TREE_OPERAND (arg0, 1),
1740 TREE_OPERAND (arg1, 1), 0)
1741 && operand_equal_p (TREE_OPERAND (arg0, 2),
1742 TREE_OPERAND (arg1, 2), 0));
1750 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1751 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1753 When in doubt, return 0. */
1756 operand_equal_for_comparison_p (arg0, arg1, other)
1760 int unsignedp1, unsignedpo;
1761 tree primarg1, primother;
1762 unsigned correct_width;
1764 if (operand_equal_p (arg0, arg1, 0))
1767 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1770 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1771 actual comparison operand, ARG0.
1773 First throw away any conversions to wider types
1774 already present in the operands. */
1776 primarg1 = get_narrower (arg1, &unsignedp1);
1777 primother = get_narrower (other, &unsignedpo);
1779 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1780 if (unsignedp1 == unsignedpo
1781 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1782 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1784 tree type = TREE_TYPE (arg0);
1786 /* Make sure shorter operand is extended the right way
1787 to match the longer operand. */
1788 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1789 TREE_TYPE (primarg1)),
1792 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1799 /* See if ARG is an expression that is either a comparison or is performing
1800 arithmetic on comparisons. The comparisons must only be comparing
1801 two different values, which will be stored in *CVAL1 and *CVAL2; if
1802 they are non-zero it means that some operands have already been found.
1803 No variables may be used anywhere else in the expression except in the
1804 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1805 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1807 If this is true, return 1. Otherwise, return zero. */
1810 twoval_comparison_p (arg, cval1, cval2, save_p)
1812 tree *cval1, *cval2;
1815 enum tree_code code = TREE_CODE (arg);
1816 char class = TREE_CODE_CLASS (code);
1818 /* We can handle some of the 'e' cases here. */
1819 if (class == 'e' && code == TRUTH_NOT_EXPR)
1821 else if (class == 'e'
1822 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1823 || code == COMPOUND_EXPR))
1826 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1827 the expression. There may be no way to make this work, but it needs
1828 to be looked at again for 2.6. */
1830 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1832 /* If we've already found a CVAL1 or CVAL2, this expression is
1833 two complex to handle. */
1834 if (*cval1 || *cval2)
1845 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1848 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1849 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1850 cval1, cval2, save_p));
1856 if (code == COND_EXPR)
1857 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1858 cval1, cval2, save_p)
1859 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1860 cval1, cval2, save_p)
1861 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1862 cval1, cval2, save_p));
1866 /* First see if we can handle the first operand, then the second. For
1867 the second operand, we know *CVAL1 can't be zero. It must be that
1868 one side of the comparison is each of the values; test for the
1869 case where this isn't true by failing if the two operands
1872 if (operand_equal_p (TREE_OPERAND (arg, 0),
1873 TREE_OPERAND (arg, 1), 0))
1877 *cval1 = TREE_OPERAND (arg, 0);
1878 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1880 else if (*cval2 == 0)
1881 *cval2 = TREE_OPERAND (arg, 0);
1882 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1887 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1889 else if (*cval2 == 0)
1890 *cval2 = TREE_OPERAND (arg, 1);
1891 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1902 /* ARG is a tree that is known to contain just arithmetic operations and
1903 comparisons. Evaluate the operations in the tree substituting NEW0 for
1904 any occurrence of OLD0 as an operand of a comparison and likewise for
1908 eval_subst (arg, old0, new0, old1, new1)
1910 tree old0, new0, old1, new1;
1912 tree type = TREE_TYPE (arg);
1913 enum tree_code code = TREE_CODE (arg);
1914 char class = TREE_CODE_CLASS (code);
1916 /* We can handle some of the 'e' cases here. */
1917 if (class == 'e' && code == TRUTH_NOT_EXPR)
1919 else if (class == 'e'
1920 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1926 return fold (build1 (code, type,
1927 eval_subst (TREE_OPERAND (arg, 0),
1928 old0, new0, old1, new1)));
1931 return fold (build (code, type,
1932 eval_subst (TREE_OPERAND (arg, 0),
1933 old0, new0, old1, new1),
1934 eval_subst (TREE_OPERAND (arg, 1),
1935 old0, new0, old1, new1)));
1941 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1944 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1947 return fold (build (code, type,
1948 eval_subst (TREE_OPERAND (arg, 0),
1949 old0, new0, old1, new1),
1950 eval_subst (TREE_OPERAND (arg, 1),
1951 old0, new0, old1, new1),
1952 eval_subst (TREE_OPERAND (arg, 2),
1953 old0, new0, old1, new1)));
1958 tree arg0 = TREE_OPERAND (arg, 0);
1959 tree arg1 = TREE_OPERAND (arg, 1);
1961 /* We need to check both for exact equality and tree equality. The
1962 former will be true if the operand has a side-effect. In that
1963 case, we know the operand occurred exactly once. */
1965 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1967 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1970 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1972 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1975 return fold (build (code, type, arg0, arg1));
1982 /* Return a tree for the case when the result of an expression is RESULT
1983 converted to TYPE and OMITTED was previously an operand of the expression
1984 but is now not needed (e.g., we folded OMITTED * 0).
1986 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1987 the conversion of RESULT to TYPE. */
1990 omit_one_operand (type, result, omitted)
1991 tree type, result, omitted;
1993 tree t = convert (type, result);
1995 if (TREE_SIDE_EFFECTS (omitted))
1996 return build (COMPOUND_EXPR, type, omitted, t);
1998 return non_lvalue (t);
2001 /* Return a simplified tree node for the truth-negation of ARG. This
2002 never alters ARG itself. We assume that ARG is an operation that
2003 returns a truth value (0 or 1). */
2006 invert_truthvalue (arg)
2009 tree type = TREE_TYPE (arg);
2010 enum tree_code code = TREE_CODE (arg);
2012 if (code == ERROR_MARK)
2015 /* If this is a comparison, we can simply invert it, except for
2016 floating-point non-equality comparisons, in which case we just
2017 enclose a TRUTH_NOT_EXPR around what we have. */
2019 if (TREE_CODE_CLASS (code) == '<')
2021 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2022 && code != NE_EXPR && code != EQ_EXPR)
2023 return build1 (TRUTH_NOT_EXPR, type, arg);
2025 return build (invert_tree_comparison (code), type,
2026 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2032 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2033 && TREE_INT_CST_HIGH (arg) == 0, 0));
2035 case TRUTH_AND_EXPR:
2036 return build (TRUTH_OR_EXPR, type,
2037 invert_truthvalue (TREE_OPERAND (arg, 0)),
2038 invert_truthvalue (TREE_OPERAND (arg, 1)));
2041 return build (TRUTH_AND_EXPR, type,
2042 invert_truthvalue (TREE_OPERAND (arg, 0)),
2043 invert_truthvalue (TREE_OPERAND (arg, 1)));
2045 case TRUTH_XOR_EXPR:
2046 /* Here we can invert either operand. We invert the first operand
2047 unless the second operand is a TRUTH_NOT_EXPR in which case our
2048 result is the XOR of the first operand with the inside of the
2049 negation of the second operand. */
2051 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2052 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2053 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2055 return build (TRUTH_XOR_EXPR, type,
2056 invert_truthvalue (TREE_OPERAND (arg, 0)),
2057 TREE_OPERAND (arg, 1));
2059 case TRUTH_ANDIF_EXPR:
2060 return build (TRUTH_ORIF_EXPR, type,
2061 invert_truthvalue (TREE_OPERAND (arg, 0)),
2062 invert_truthvalue (TREE_OPERAND (arg, 1)));
2064 case TRUTH_ORIF_EXPR:
2065 return build (TRUTH_ANDIF_EXPR, type,
2066 invert_truthvalue (TREE_OPERAND (arg, 0)),
2067 invert_truthvalue (TREE_OPERAND (arg, 1)));
2069 case TRUTH_NOT_EXPR:
2070 return TREE_OPERAND (arg, 0);
2073 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2074 invert_truthvalue (TREE_OPERAND (arg, 1)),
2075 invert_truthvalue (TREE_OPERAND (arg, 2)));
2078 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2079 invert_truthvalue (TREE_OPERAND (arg, 1)));
2081 case NON_LVALUE_EXPR:
2082 return invert_truthvalue (TREE_OPERAND (arg, 0));
2087 return build1 (TREE_CODE (arg), type,
2088 invert_truthvalue (TREE_OPERAND (arg, 0)));
2091 if (!integer_onep (TREE_OPERAND (arg, 1)))
2093 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2096 return build1 (TRUTH_NOT_EXPR, type, arg);
2098 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2100 return build1 (TRUTH_NOT_EXPR, type, arg);
2103 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2104 operands are another bit-wise operation with a common input. If so,
2105 distribute the bit operations to save an operation and possibly two if
2106 constants are involved. For example, convert
2107 (A | B) & (A | C) into A | (B & C)
2108 Further simplification will occur if B and C are constants.
2110 If this optimization cannot be done, 0 will be returned. */
2113 distribute_bit_expr (code, type, arg0, arg1)
2114 enum tree_code code;
2121 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2122 || TREE_CODE (arg0) == code
2123 || (TREE_CODE (arg0) != BIT_AND_EXPR
2124 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2127 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2129 common = TREE_OPERAND (arg0, 0);
2130 left = TREE_OPERAND (arg0, 1);
2131 right = TREE_OPERAND (arg1, 1);
2133 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2135 common = TREE_OPERAND (arg0, 0);
2136 left = TREE_OPERAND (arg0, 1);
2137 right = TREE_OPERAND (arg1, 0);
2139 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2141 common = TREE_OPERAND (arg0, 1);
2142 left = TREE_OPERAND (arg0, 0);
2143 right = TREE_OPERAND (arg1, 1);
2145 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2147 common = TREE_OPERAND (arg0, 1);
2148 left = TREE_OPERAND (arg0, 0);
2149 right = TREE_OPERAND (arg1, 0);
2154 return fold (build (TREE_CODE (arg0), type, common,
2155 fold (build (code, type, left, right))));
2158 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2159 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2162 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2165 int bitsize, bitpos;
2168 tree result = build (BIT_FIELD_REF, type, inner,
2169 size_int (bitsize), size_int (bitpos));
2171 TREE_UNSIGNED (result) = unsignedp;
2176 /* Optimize a bit-field compare.
2178 There are two cases: First is a compare against a constant and the
2179 second is a comparison of two items where the fields are at the same
2180 bit position relative to the start of a chunk (byte, halfword, word)
2181 large enough to contain it. In these cases we can avoid the shift
2182 implicit in bitfield extractions.
2184 For constants, we emit a compare of the shifted constant with the
2185 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2186 compared. For two fields at the same position, we do the ANDs with the
2187 similar mask and compare the result of the ANDs.
2189 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2190 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2191 are the left and right operands of the comparison, respectively.
2193 If the optimization described above can be done, we return the resulting
2194 tree. Otherwise we return zero. */
2197 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2198 enum tree_code code;
2202 int lbitpos, lbitsize, rbitpos, rbitsize;
2203 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2204 tree type = TREE_TYPE (lhs);
2205 tree signed_type, unsigned_type;
2206 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2207 enum machine_mode lmode, rmode, lnmode, rnmode;
2208 int lunsignedp, runsignedp;
2209 int lvolatilep = 0, rvolatilep = 0;
2210 tree linner, rinner;
2214 /* Get all the information about the extractions being done. If the bit size
2215 if the same as the size of the underlying object, we aren't doing an
2216 extraction at all and so can do nothing. */
2217 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2218 &lunsignedp, &lvolatilep);
2219 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2225 /* If this is not a constant, we can only do something if bit positions,
2226 sizes, and signedness are the same. */
2227 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2228 &rmode, &runsignedp, &rvolatilep);
2230 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2231 || lunsignedp != runsignedp || offset != 0)
2235 /* See if we can find a mode to refer to this field. We should be able to,
2236 but fail if we can't. */
2237 lnmode = get_best_mode (lbitsize, lbitpos,
2238 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2240 if (lnmode == VOIDmode)
2243 /* Set signed and unsigned types of the precision of this mode for the
2245 signed_type = type_for_mode (lnmode, 0);
2246 unsigned_type = type_for_mode (lnmode, 1);
2250 rnmode = get_best_mode (rbitsize, rbitpos,
2251 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2253 if (rnmode == VOIDmode)
2257 /* Compute the bit position and size for the new reference and our offset
2258 within it. If the new reference is the same size as the original, we
2259 won't optimize anything, so return zero. */
2260 lnbitsize = GET_MODE_BITSIZE (lnmode);
2261 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2262 lbitpos -= lnbitpos;
2263 if (lnbitsize == lbitsize)
2268 rnbitsize = GET_MODE_BITSIZE (rnmode);
2269 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2270 rbitpos -= rnbitpos;
2271 if (rnbitsize == rbitsize)
2275 if (BYTES_BIG_ENDIAN)
2276 lbitpos = lnbitsize - lbitsize - lbitpos;
2278 /* Make the mask to be used against the extracted field. */
2279 mask = build_int_2 (~0, ~0);
2280 TREE_TYPE (mask) = unsigned_type;
2281 force_fit_type (mask, 0);
2282 mask = convert (unsigned_type, mask);
2283 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2284 mask = const_binop (RSHIFT_EXPR, mask,
2285 size_int (lnbitsize - lbitsize - lbitpos), 0);
2288 /* If not comparing with constant, just rework the comparison
2290 return build (code, compare_type,
2291 build (BIT_AND_EXPR, unsigned_type,
2292 make_bit_field_ref (linner, unsigned_type,
2293 lnbitsize, lnbitpos, 1),
2295 build (BIT_AND_EXPR, unsigned_type,
2296 make_bit_field_ref (rinner, unsigned_type,
2297 rnbitsize, rnbitpos, 1),
2300 /* Otherwise, we are handling the constant case. See if the constant is too
2301 big for the field. Warn and return a tree of for 0 (false) if so. We do
2302 this not only for its own sake, but to avoid having to test for this
2303 error case below. If we didn't, we might generate wrong code.
2305 For unsigned fields, the constant shifted right by the field length should
2306 be all zero. For signed fields, the high-order bits should agree with
2311 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2312 convert (unsigned_type, rhs),
2313 size_int (lbitsize), 0)))
2315 warning ("comparison is always %s due to width of bitfield",
2316 code == NE_EXPR ? "one" : "zero");
2317 return convert (compare_type,
2319 ? integer_one_node : integer_zero_node));
2324 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2325 size_int (lbitsize - 1), 0);
2326 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2328 warning ("comparison is always %s due to width of bitfield",
2329 code == NE_EXPR ? "one" : "zero");
2330 return convert (compare_type,
2332 ? integer_one_node : integer_zero_node));
2336 /* Single-bit compares should always be against zero. */
2337 if (lbitsize == 1 && ! integer_zerop (rhs))
2339 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2340 rhs = convert (type, integer_zero_node);
2343 /* Make a new bitfield reference, shift the constant over the
2344 appropriate number of bits and mask it with the computed mask
2345 (in case this was a signed field). If we changed it, make a new one. */
2346 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2349 TREE_SIDE_EFFECTS (lhs) = 1;
2350 TREE_THIS_VOLATILE (lhs) = 1;
2353 rhs = fold (const_binop (BIT_AND_EXPR,
2354 const_binop (LSHIFT_EXPR,
2355 convert (unsigned_type, rhs),
2356 size_int (lbitpos), 0),
2359 return build (code, compare_type,
2360 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2364 /* Subroutine for fold_truthop: decode a field reference.
2366 If EXP is a comparison reference, we return the innermost reference.
2368 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2369 set to the starting bit number.
2371 If the innermost field can be completely contained in a mode-sized
2372 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2374 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2375 otherwise it is not changed.
2377 *PUNSIGNEDP is set to the signedness of the field.
2379 *PMASK is set to the mask used. This is either contained in a
2380 BIT_AND_EXPR or derived from the width of the field.
2382 Return 0 if this is not a component reference or is one that we can't
2383 do anything with. */
2386 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2389 int *pbitsize, *pbitpos;
2390 enum machine_mode *pmode;
2391 int *punsignedp, *pvolatilep;
2395 tree mask, inner, offset;
2399 /* All the optimizations using this function assume integer fields.
2400 There are problems with FP fields since the type_for_size call
2401 below can fail for, e.g., XFmode. */
2402 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2407 if (TREE_CODE (exp) == BIT_AND_EXPR)
2409 and_mask = TREE_OPERAND (exp, 1);
2410 exp = TREE_OPERAND (exp, 0);
2411 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2412 if (TREE_CODE (and_mask) != INTEGER_CST)
2416 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2417 && TREE_CODE (exp) != BIT_FIELD_REF)
2420 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2421 punsignedp, pvolatilep);
2422 if (inner == exp || *pbitsize < 0 || offset != 0)
2425 /* Compute the mask to access the bitfield. */
2426 unsigned_type = type_for_size (*pbitsize, 1);
2427 precision = TYPE_PRECISION (unsigned_type);
2429 mask = build_int_2 (~0, ~0);
2430 TREE_TYPE (mask) = unsigned_type;
2431 force_fit_type (mask, 0);
2432 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2433 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2435 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2437 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2438 convert (unsigned_type, and_mask), mask));
2444 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2448 all_ones_mask_p (mask, size)
2452 tree type = TREE_TYPE (mask);
2453 int precision = TYPE_PRECISION (type);
2456 tmask = build_int_2 (~0, ~0);
2457 TREE_TYPE (tmask) = signed_type (type);
2458 force_fit_type (tmask, 0);
2460 operand_equal_p (mask,
2461 const_binop (RSHIFT_EXPR,
2462 const_binop (LSHIFT_EXPR, tmask,
2463 size_int (precision - size), 0),
2464 size_int (precision - size), 0),
2468 /* Subroutine for fold_truthop: determine if an operand is simple enough
2469 to be evaluated unconditionally. */
2472 simple_operand_p (exp)
2475 /* Strip any conversions that don't change the machine mode. */
2476 while ((TREE_CODE (exp) == NOP_EXPR
2477 || TREE_CODE (exp) == CONVERT_EXPR)
2478 && (TYPE_MODE (TREE_TYPE (exp))
2479 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2480 exp = TREE_OPERAND (exp, 0);
2482 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2483 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2484 && ! TREE_ADDRESSABLE (exp)
2485 && ! TREE_THIS_VOLATILE (exp)
2486 && ! DECL_NONLOCAL (exp)
2487 /* Don't regard global variables as simple. They may be
2488 allocated in ways unknown to the compiler (shared memory,
2489 #pragma weak, etc). */
2490 && ! TREE_PUBLIC (exp)
2491 && ! DECL_EXTERNAL (exp)
2492 /* Loading a static variable is unduly expensive, but global
2493 registers aren't expensive. */
2494 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2497 /* Subroutine for fold_truthop: try to optimize a range test.
2499 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2501 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2502 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2503 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2506 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2507 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2508 larger than HI_CST (they may be equal).
2510 We return the simplified tree or 0 if no optimization is possible. */
2513 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2514 enum tree_code jcode, lo_code, hi_code;
2515 tree type, var, lo_cst, hi_cst;
2518 enum tree_code rcode;
2520 /* See if this is a range test and normalize the constant terms. */
2522 if (jcode == TRUTH_AND_EXPR)
2527 /* See if we have VAR != CST && VAR != CST+1. */
2528 if (! (hi_code == NE_EXPR
2529 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2530 && tree_int_cst_equal (integer_one_node,
2531 const_binop (MINUS_EXPR,
2532 hi_cst, lo_cst, 0))))
2540 if (hi_code == LT_EXPR)
2541 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2542 else if (hi_code != LE_EXPR)
2545 if (lo_code == GT_EXPR)
2546 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2548 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2561 /* See if we have VAR == CST || VAR == CST+1. */
2562 if (! (hi_code == EQ_EXPR
2563 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2564 && tree_int_cst_equal (integer_one_node,
2565 const_binop (MINUS_EXPR,
2566 hi_cst, lo_cst, 0))))
2574 if (hi_code == GE_EXPR)
2575 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2576 else if (hi_code != GT_EXPR)
2579 if (lo_code == LE_EXPR)
2580 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2582 /* We now have VAR < LO_CST || VAR > HI_CST. */
2591 /* When normalizing, it is possible to both increment the smaller constant
2592 and decrement the larger constant. See if they are still ordered. */
2593 if (tree_int_cst_lt (hi_cst, lo_cst))
2596 /* Fail if VAR isn't an integer. */
2597 utype = TREE_TYPE (var);
2598 if (! INTEGRAL_TYPE_P (utype))
2601 /* The range test is invalid if subtracting the two constants results
2602 in overflow. This can happen in traditional mode. */
2603 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2604 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2607 if (! TREE_UNSIGNED (utype))
2609 utype = unsigned_type (utype);
2610 var = convert (utype, var);
2611 lo_cst = convert (utype, lo_cst);
2612 hi_cst = convert (utype, hi_cst);
2615 return fold (convert (type,
2616 build (rcode, utype,
2617 build (MINUS_EXPR, utype, var, lo_cst),
2618 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2621 /* Find ways of folding logical expressions of LHS and RHS:
2622 Try to merge two comparisons to the same innermost item.
2623 Look for range tests like "ch >= '0' && ch <= '9'".
2624 Look for combinations of simple terms on machines with expensive branches
2625 and evaluate the RHS unconditionally.
2627 For example, if we have p->a == 2 && p->b == 4 and we can make an
2628 object large enough to span both A and B, we can do this with a comparison
2629 against the object ANDed with the a mask.
2631 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2632 operations to do this with one comparison.
2634 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2635 function and the one above.
2637 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2638 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2640 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2643 We return the simplified tree or 0 if no optimization is possible. */
2646 fold_truthop (code, truth_type, lhs, rhs)
2647 enum tree_code code;
2648 tree truth_type, lhs, rhs;
2650 /* If this is the "or" of two comparisons, we can do something if we
2651 the comparisons are NE_EXPR. If this is the "and", we can do something
2652 if the comparisons are EQ_EXPR. I.e.,
2653 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2655 WANTED_CODE is this operation code. For single bit fields, we can
2656 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2657 comparison for one-bit fields. */
2659 enum tree_code wanted_code;
2660 enum tree_code lcode, rcode;
2661 tree ll_arg, lr_arg, rl_arg, rr_arg;
2662 tree ll_inner, lr_inner, rl_inner, rr_inner;
2663 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2664 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2665 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2666 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2667 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2668 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2669 enum machine_mode lnmode, rnmode;
2670 tree ll_mask, lr_mask, rl_mask, rr_mask;
2671 tree l_const, r_const;
2673 int first_bit, end_bit;
2676 /* Start by getting the comparison codes and seeing if this looks like
2677 a range test. Fail if anything is volatile. If one operand is a
2678 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2681 if (TREE_SIDE_EFFECTS (lhs)
2682 || TREE_SIDE_EFFECTS (rhs))
2685 lcode = TREE_CODE (lhs);
2686 rcode = TREE_CODE (rhs);
2688 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2689 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2691 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2692 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2694 if (TREE_CODE_CLASS (lcode) != '<'
2695 || TREE_CODE_CLASS (rcode) != '<')
2698 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2699 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2701 ll_arg = TREE_OPERAND (lhs, 0);
2702 lr_arg = TREE_OPERAND (lhs, 1);
2703 rl_arg = TREE_OPERAND (rhs, 0);
2704 rr_arg = TREE_OPERAND (rhs, 1);
2706 if (TREE_CODE (lr_arg) == INTEGER_CST
2707 && TREE_CODE (rr_arg) == INTEGER_CST
2708 && operand_equal_p (ll_arg, rl_arg, 0))
2710 if (tree_int_cst_lt (lr_arg, rr_arg))
2711 result = range_test (code, truth_type, lcode, rcode,
2712 ll_arg, lr_arg, rr_arg);
2714 result = range_test (code, truth_type, rcode, lcode,
2715 ll_arg, rr_arg, lr_arg);
2717 /* If this isn't a range test, it also isn't a comparison that
2718 can be merged. However, it wins to evaluate the RHS unconditionally
2719 on machines with expensive branches. */
2721 if (result == 0 && BRANCH_COST >= 2)
2723 if (TREE_CODE (ll_arg) != VAR_DECL
2724 && TREE_CODE (ll_arg) != PARM_DECL)
2726 /* Avoid evaluating the variable part twice. */
2727 ll_arg = save_expr (ll_arg);
2728 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2729 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2731 return build (code, truth_type, lhs, rhs);
2736 /* If the RHS can be evaluated unconditionally and its operands are
2737 simple, it wins to evaluate the RHS unconditionally on machines
2738 with expensive branches. In this case, this isn't a comparison
2739 that can be merged. */
2741 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2742 are with zero (tmw). */
2744 if (BRANCH_COST >= 2
2745 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2746 && simple_operand_p (rl_arg)
2747 && simple_operand_p (rr_arg))
2748 return build (code, truth_type, lhs, rhs);
2750 /* See if the comparisons can be merged. Then get all the parameters for
2753 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2754 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2758 ll_inner = decode_field_reference (ll_arg,
2759 &ll_bitsize, &ll_bitpos, &ll_mode,
2760 &ll_unsignedp, &volatilep, &ll_mask);
2761 lr_inner = decode_field_reference (lr_arg,
2762 &lr_bitsize, &lr_bitpos, &lr_mode,
2763 &lr_unsignedp, &volatilep, &lr_mask);
2764 rl_inner = decode_field_reference (rl_arg,
2765 &rl_bitsize, &rl_bitpos, &rl_mode,
2766 &rl_unsignedp, &volatilep, &rl_mask);
2767 rr_inner = decode_field_reference (rr_arg,
2768 &rr_bitsize, &rr_bitpos, &rr_mode,
2769 &rr_unsignedp, &volatilep, &rr_mask);
2771 /* It must be true that the inner operation on the lhs of each
2772 comparison must be the same if we are to be able to do anything.
2773 Then see if we have constants. If not, the same must be true for
2775 if (volatilep || ll_inner == 0 || rl_inner == 0
2776 || ! operand_equal_p (ll_inner, rl_inner, 0))
2779 if (TREE_CODE (lr_arg) == INTEGER_CST
2780 && TREE_CODE (rr_arg) == INTEGER_CST)
2781 l_const = lr_arg, r_const = rr_arg;
2782 else if (lr_inner == 0 || rr_inner == 0
2783 || ! operand_equal_p (lr_inner, rr_inner, 0))
2786 l_const = r_const = 0;
2788 /* If either comparison code is not correct for our logical operation,
2789 fail. However, we can convert a one-bit comparison against zero into
2790 the opposite comparison against that bit being set in the field. */
2792 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2793 if (lcode != wanted_code)
2795 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2801 if (rcode != wanted_code)
2803 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2809 /* See if we can find a mode that contains both fields being compared on
2810 the left. If we can't, fail. Otherwise, update all constants and masks
2811 to be relative to a field of that size. */
2812 first_bit = MIN (ll_bitpos, rl_bitpos);
2813 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2814 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2815 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2817 if (lnmode == VOIDmode)
2820 lnbitsize = GET_MODE_BITSIZE (lnmode);
2821 lnbitpos = first_bit & ~ (lnbitsize - 1);
2822 type = type_for_size (lnbitsize, 1);
2823 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2825 if (BYTES_BIG_ENDIAN)
2827 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2828 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2831 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2832 size_int (xll_bitpos), 0);
2833 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2834 size_int (xrl_bitpos), 0);
2836 /* Make sure the constants are interpreted as unsigned, so we
2837 don't have sign bits outside the range of their type. */
2841 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2842 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2843 size_int (xll_bitpos), 0);
2847 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2848 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2849 size_int (xrl_bitpos), 0);
2852 /* If the right sides are not constant, do the same for it. Also,
2853 disallow this optimization if a size or signedness mismatch occurs
2854 between the left and right sides. */
2857 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2858 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2859 /* Make sure the two fields on the right
2860 correspond to the left without being swapped. */
2861 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2864 first_bit = MIN (lr_bitpos, rr_bitpos);
2865 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2866 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2867 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2869 if (rnmode == VOIDmode)
2872 rnbitsize = GET_MODE_BITSIZE (rnmode);
2873 rnbitpos = first_bit & ~ (rnbitsize - 1);
2874 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2876 if (BYTES_BIG_ENDIAN)
2878 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2879 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2882 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2883 size_int (xlr_bitpos), 0);
2884 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2885 size_int (xrr_bitpos), 0);
2887 /* Make a mask that corresponds to both fields being compared.
2888 Do this for both items being compared. If the masks agree,
2889 we can do this by masking both and comparing the masked
2891 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2892 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2893 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2895 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2896 ll_unsignedp || rl_unsignedp);
2897 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2898 lr_unsignedp || rr_unsignedp);
2899 if (! all_ones_mask_p (ll_mask, lnbitsize))
2901 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2902 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2904 return build (wanted_code, truth_type, lhs, rhs);
2907 /* There is still another way we can do something: If both pairs of
2908 fields being compared are adjacent, we may be able to make a wider
2909 field containing them both. */
2910 if ((ll_bitsize + ll_bitpos == rl_bitpos
2911 && lr_bitsize + lr_bitpos == rr_bitpos)
2912 || (ll_bitpos == rl_bitpos + rl_bitsize
2913 && lr_bitpos == rr_bitpos + rr_bitsize))
2914 return build (wanted_code, truth_type,
2915 make_bit_field_ref (ll_inner, type,
2916 ll_bitsize + rl_bitsize,
2917 MIN (ll_bitpos, rl_bitpos),
2919 make_bit_field_ref (lr_inner, type,
2920 lr_bitsize + rr_bitsize,
2921 MIN (lr_bitpos, rr_bitpos),
2927 /* Handle the case of comparisons with constants. If there is something in
2928 common between the masks, those bits of the constants must be the same.
2929 If not, the condition is always false. Test for this to avoid generating
2930 incorrect code below. */
2931 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2932 if (! integer_zerop (result)
2933 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2934 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2936 if (wanted_code == NE_EXPR)
2938 warning ("`or' of unmatched not-equal tests is always 1");
2939 return convert (truth_type, integer_one_node);
2943 warning ("`and' of mutually exclusive equal-tests is always zero");
2944 return convert (truth_type, integer_zero_node);
2948 /* Construct the expression we will return. First get the component
2949 reference we will make. Unless the mask is all ones the width of
2950 that field, perform the mask operation. Then compare with the
2952 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2953 ll_unsignedp || rl_unsignedp);
2955 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2956 if (! all_ones_mask_p (ll_mask, lnbitsize))
2957 result = build (BIT_AND_EXPR, type, result, ll_mask);
2959 return build (wanted_code, truth_type, result,
2960 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
2963 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
2964 S, a SAVE_EXPR, return the expression actually being evaluated. Note
2965 that we may sometimes modify the tree. */
2968 strip_compound_expr (t, s)
2972 tree type = TREE_TYPE (t);
2973 enum tree_code code = TREE_CODE (t);
2975 /* See if this is the COMPOUND_EXPR we want to eliminate. */
2976 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
2977 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
2978 return TREE_OPERAND (t, 1);
2980 /* See if this is a COND_EXPR or a simple arithmetic operator. We
2981 don't bother handling any other types. */
2982 else if (code == COND_EXPR)
2984 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
2985 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
2986 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
2988 else if (TREE_CODE_CLASS (code) == '1')
2989 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
2990 else if (TREE_CODE_CLASS (code) == '<'
2991 || TREE_CODE_CLASS (code) == '2')
2993 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
2994 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3000 /* Perform constant folding and related simplification of EXPR.
3001 The related simplifications include x*1 => x, x*0 => 0, etc.,
3002 and application of the associative law.
3003 NOP_EXPR conversions may be removed freely (as long as we
3004 are careful not to change the C type of the overall expression)
3005 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3006 but we can constant-fold them if they have constant operands. */
3012 register tree t = expr;
3013 tree t1 = NULL_TREE;
3015 tree type = TREE_TYPE (expr);
3016 register tree arg0, arg1;
3017 register enum tree_code code = TREE_CODE (t);
3021 /* WINS will be nonzero when the switch is done
3022 if all operands are constant. */
3026 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3027 if (code == RTL_EXPR)
3030 /* Return right away if already constant. */
3031 if (TREE_CONSTANT (t))
3033 if (code == CONST_DECL)
3034 return DECL_INITIAL (t);
3038 kind = TREE_CODE_CLASS (code);
3039 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3043 /* Special case for conversion ops that can have fixed point args. */
3044 arg0 = TREE_OPERAND (t, 0);
3046 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3048 STRIP_TYPE_NOPS (arg0);
3050 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3051 subop = TREE_REALPART (arg0);
3055 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3056 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3057 && TREE_CODE (subop) != REAL_CST
3058 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3060 /* Note that TREE_CONSTANT isn't enough:
3061 static var addresses are constant but we can't
3062 do arithmetic on them. */
3065 else if (kind == 'e' || kind == '<'
3066 || kind == '1' || kind == '2' || kind == 'r')
3068 register int len = tree_code_length[(int) code];
3070 for (i = 0; i < len; i++)
3072 tree op = TREE_OPERAND (t, i);
3076 continue; /* Valid for CALL_EXPR, at least. */
3078 if (kind == '<' || code == RSHIFT_EXPR)
3080 /* Signedness matters here. Perhaps we can refine this
3082 STRIP_TYPE_NOPS (op);
3086 /* Strip any conversions that don't change the mode. */
3090 if (TREE_CODE (op) == COMPLEX_CST)
3091 subop = TREE_REALPART (op);
3095 if (TREE_CODE (subop) != INTEGER_CST
3096 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3097 && TREE_CODE (subop) != REAL_CST
3098 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3100 /* Note that TREE_CONSTANT isn't enough:
3101 static var addresses are constant but we can't
3102 do arithmetic on them. */
3112 /* If this is a commutative operation, and ARG0 is a constant, move it
3113 to ARG1 to reduce the number of tests below. */
3114 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3115 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3116 || code == BIT_AND_EXPR)
3117 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3119 tem = arg0; arg0 = arg1; arg1 = tem;
3121 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3122 TREE_OPERAND (t, 1) = tem;
3125 /* Now WINS is set as described above,
3126 ARG0 is the first operand of EXPR,
3127 and ARG1 is the second operand (if it has more than one operand).
3129 First check for cases where an arithmetic operation is applied to a
3130 compound, conditional, or comparison operation. Push the arithmetic
3131 operation inside the compound or conditional to see if any folding
3132 can then be done. Convert comparison to conditional for this purpose.
3133 The also optimizes non-constant cases that used to be done in
3136 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3137 one of the operands is a comparison and the other is a comparison, a
3138 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3139 code below would make the expression more complex. Change it to a
3140 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3141 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3143 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3144 || code == EQ_EXPR || code == NE_EXPR)
3145 && ((truth_value_p (TREE_CODE (arg0))
3146 && (truth_value_p (TREE_CODE (arg1))
3147 || (TREE_CODE (arg1) == BIT_AND_EXPR
3148 && integer_onep (TREE_OPERAND (arg1, 1)))))
3149 || (truth_value_p (TREE_CODE (arg1))
3150 && (truth_value_p (TREE_CODE (arg0))
3151 || (TREE_CODE (arg0) == BIT_AND_EXPR
3152 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3154 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3155 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3159 if (code == EQ_EXPR)
3160 t = invert_truthvalue (t);
3165 if (TREE_CODE_CLASS (code) == '1')
3167 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3168 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3169 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3170 else if (TREE_CODE (arg0) == COND_EXPR)
3172 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3173 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3174 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3176 /* If this was a conversion, and all we did was to move into
3177 inside the COND_EXPR, bring it back out. But leave it if
3178 it is a conversion from integer to integer and the
3179 result precision is no wider than a word since such a
3180 conversion is cheap and may be optimized away by combine,
3181 while it couldn't if it were outside the COND_EXPR. Then return
3182 so we don't get into an infinite recursion loop taking the
3183 conversion out and then back in. */
3185 if ((code == NOP_EXPR || code == CONVERT_EXPR
3186 || code == NON_LVALUE_EXPR)
3187 && TREE_CODE (t) == COND_EXPR
3188 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3189 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3190 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3191 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
3192 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
3193 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
3194 && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
3195 t = build1 (code, type,
3197 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3198 TREE_OPERAND (t, 0),
3199 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3200 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3203 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3204 return fold (build (COND_EXPR, type, arg0,
3205 fold (build1 (code, type, integer_one_node)),
3206 fold (build1 (code, type, integer_zero_node))));
3208 else if (TREE_CODE_CLASS (code) == '2'
3209 || TREE_CODE_CLASS (code) == '<')
3211 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3212 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3213 fold (build (code, type,
3214 arg0, TREE_OPERAND (arg1, 1))));
3215 else if (TREE_CODE (arg1) == COND_EXPR
3216 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3218 tree test, true_value, false_value;
3220 if (TREE_CODE (arg1) == COND_EXPR)
3222 test = TREE_OPERAND (arg1, 0);
3223 true_value = TREE_OPERAND (arg1, 1);
3224 false_value = TREE_OPERAND (arg1, 2);
3229 true_value = integer_one_node;
3230 false_value = integer_zero_node;
3233 /* If ARG0 is complex we want to make sure we only evaluate
3234 it once. Though this is only required if it is volatile, it
3235 might be more efficient even if it is not. However, if we
3236 succeed in folding one part to a constant, we do not need
3237 to make this SAVE_EXPR. Since we do this optimization
3238 primarily to see if we do end up with constant and this
3239 SAVE_EXPR interfers with later optimizations, suppressing
3240 it when we can is important. */
3242 if (TREE_CODE (arg0) != SAVE_EXPR
3243 && ((TREE_CODE (arg0) != VAR_DECL
3244 && TREE_CODE (arg0) != PARM_DECL)
3245 || TREE_SIDE_EFFECTS (arg0)))
3247 tree lhs = fold (build (code, type, arg0, true_value));
3248 tree rhs = fold (build (code, type, arg0, false_value));
3250 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3251 return fold (build (COND_EXPR, type, test, lhs, rhs));
3253 arg0 = save_expr (arg0);
3256 test = fold (build (COND_EXPR, type, test,
3257 fold (build (code, type, arg0, true_value)),
3258 fold (build (code, type, arg0, false_value))));
3259 if (TREE_CODE (arg0) == SAVE_EXPR)
3260 return build (COMPOUND_EXPR, type,
3261 convert (void_type_node, arg0),
3262 strip_compound_expr (test, arg0));
3264 return convert (type, test);
3267 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3268 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3269 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3270 else if (TREE_CODE (arg0) == COND_EXPR
3271 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3273 tree test, true_value, false_value;
3275 if (TREE_CODE (arg0) == COND_EXPR)
3277 test = TREE_OPERAND (arg0, 0);
3278 true_value = TREE_OPERAND (arg0, 1);
3279 false_value = TREE_OPERAND (arg0, 2);
3284 true_value = integer_one_node;
3285 false_value = integer_zero_node;
3288 if (TREE_CODE (arg1) != SAVE_EXPR
3289 && ((TREE_CODE (arg1) != VAR_DECL
3290 && TREE_CODE (arg1) != PARM_DECL)
3291 || TREE_SIDE_EFFECTS (arg1)))
3293 tree lhs = fold (build (code, type, true_value, arg1));
3294 tree rhs = fold (build (code, type, false_value, arg1));
3296 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
3297 || TREE_CONSTANT (arg1))
3298 return fold (build (COND_EXPR, type, test, lhs, rhs));
3300 arg1 = save_expr (arg1);
3303 test = fold (build (COND_EXPR, type, test,
3304 fold (build (code, type, true_value, arg1)),
3305 fold (build (code, type, false_value, arg1))));
3306 if (TREE_CODE (arg1) == SAVE_EXPR)
3307 return build (COMPOUND_EXPR, type,
3308 convert (void_type_node, arg1),
3309 strip_compound_expr (test, arg1));
3311 return convert (type, test);
3314 else if (TREE_CODE_CLASS (code) == '<'
3315 && TREE_CODE (arg0) == COMPOUND_EXPR)
3316 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3317 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3318 else if (TREE_CODE_CLASS (code) == '<'
3319 && TREE_CODE (arg1) == COMPOUND_EXPR)
3320 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3321 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3333 return fold (DECL_INITIAL (t));
3338 case FIX_TRUNC_EXPR:
3339 /* Other kinds of FIX are not handled properly by fold_convert. */
3341 if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
3342 return TREE_OPERAND (t, 0);
3344 /* In addition to the cases of two conversions in a row
3345 handled below, if we are converting something to its own
3346 type via an object of identical or wider precision, neither
3347 conversion is needed. */
3348 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3349 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3350 && TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == TREE_TYPE (t)
3351 && ((INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3352 && INTEGRAL_TYPE_P (TREE_TYPE (t)))
3353 || (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3354 && FLOAT_TYPE_P (TREE_TYPE (t))))
3355 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3356 >= TYPE_PRECISION (TREE_TYPE (t))))
3357 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3359 /* Two conversions in a row are not needed unless:
3360 - the intermediate type is narrower than both initial and final, or
3361 - the intermediate type and innermost type differ in signedness,
3362 and the outermost type is wider than the intermediate, or
3363 - the initial type is a pointer type and the precisions of the
3364 intermediate and final types differ, or
3365 - the final type is a pointer type and the precisions of the
3366 initial and intermediate types differ. */
3367 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3368 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3369 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3370 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3372 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3373 > TYPE_PRECISION (TREE_TYPE (t)))
3374 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3376 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3378 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3379 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3380 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3381 < TYPE_PRECISION (TREE_TYPE (t))))
3382 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3383 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3384 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3386 (TREE_UNSIGNED (TREE_TYPE (t))
3387 && (TYPE_PRECISION (TREE_TYPE (t))
3388 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3389 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3391 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3392 != TYPE_PRECISION (TREE_TYPE (t))))
3393 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3394 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3395 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3396 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3398 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3399 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3400 /* Detect assigning a bitfield. */
3401 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3402 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3404 /* Don't leave an assignment inside a conversion
3405 unless assigning a bitfield. */
3406 tree prev = TREE_OPERAND (t, 0);
3407 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3408 /* First do the assignment, then return converted constant. */
3409 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3415 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3418 return fold_convert (t, arg0);
3420 #if 0 /* This loses on &"foo"[0]. */
3425 /* Fold an expression like: "foo"[2] */
3426 if (TREE_CODE (arg0) == STRING_CST
3427 && TREE_CODE (arg1) == INTEGER_CST
3428 && !TREE_INT_CST_HIGH (arg1)
3429 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3431 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3432 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3433 force_fit_type (t, 0);
3440 if (TREE_CODE (arg0) == CONSTRUCTOR)
3442 tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
3449 TREE_CONSTANT (t) = wins;
3455 if (TREE_CODE (arg0) == INTEGER_CST)
3457 HOST_WIDE_INT low, high;
3458 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3459 TREE_INT_CST_HIGH (arg0),
3461 t = build_int_2 (low, high);
3462 TREE_TYPE (t) = type;
3464 = (TREE_OVERFLOW (arg0)
3465 | force_fit_type (t, overflow));
3466 TREE_CONSTANT_OVERFLOW (t)
3467 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3469 else if (TREE_CODE (arg0) == REAL_CST)
3470 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3471 TREE_TYPE (t) = type;
3473 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3474 return TREE_OPERAND (arg0, 0);
3476 /* Convert - (a - b) to (b - a) for non-floating-point. */
3477 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3478 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3479 TREE_OPERAND (arg0, 0));
3486 if (TREE_CODE (arg0) == INTEGER_CST)
3488 if (! TREE_UNSIGNED (type)
3489 && TREE_INT_CST_HIGH (arg0) < 0)
3491 HOST_WIDE_INT low, high;
3492 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3493 TREE_INT_CST_HIGH (arg0),
3495 t = build_int_2 (low, high);
3496 TREE_TYPE (t) = type;
3498 = (TREE_OVERFLOW (arg0)
3499 | force_fit_type (t, overflow));
3500 TREE_CONSTANT_OVERFLOW (t)
3501 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3504 else if (TREE_CODE (arg0) == REAL_CST)
3506 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3507 t = build_real (type,
3508 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3510 TREE_TYPE (t) = type;
3512 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3513 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3517 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3519 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3520 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3521 TREE_OPERAND (arg0, 0),
3522 fold (build1 (NEGATE_EXPR,
3523 TREE_TYPE (TREE_TYPE (arg0)),
3524 TREE_OPERAND (arg0, 1))));
3525 else if (TREE_CODE (arg0) == COMPLEX_CST)
3526 return build_complex (TREE_OPERAND (arg0, 0),
3527 fold (build1 (NEGATE_EXPR,
3528 TREE_TYPE (TREE_TYPE (arg0)),
3529 TREE_OPERAND (arg0, 1))));
3530 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3531 return fold (build (TREE_CODE (arg0), type,
3532 fold (build1 (CONJ_EXPR, type,
3533 TREE_OPERAND (arg0, 0))),
3534 fold (build1 (CONJ_EXPR,
3535 type, TREE_OPERAND (arg0, 1)))));
3536 else if (TREE_CODE (arg0) == CONJ_EXPR)
3537 return TREE_OPERAND (arg0, 0);
3543 if (TREE_CODE (arg0) == INTEGER_CST)
3544 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3545 ~ TREE_INT_CST_HIGH (arg0));
3546 TREE_TYPE (t) = type;
3547 force_fit_type (t, 0);
3548 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3549 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3551 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3552 return TREE_OPERAND (arg0, 0);
3556 /* A + (-B) -> A - B */
3557 if (TREE_CODE (arg1) == NEGATE_EXPR)
3558 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3559 else if (! FLOAT_TYPE_P (type))
3561 if (integer_zerop (arg1))
3562 return non_lvalue (convert (type, arg0));
3564 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3565 with a constant, and the two constants have no bits in common,
3566 we should treat this as a BIT_IOR_EXPR since this may produce more
3568 if (TREE_CODE (arg0) == BIT_AND_EXPR
3569 && TREE_CODE (arg1) == BIT_AND_EXPR
3570 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3571 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3572 && integer_zerop (const_binop (BIT_AND_EXPR,
3573 TREE_OPERAND (arg0, 1),
3574 TREE_OPERAND (arg1, 1), 0)))
3576 code = BIT_IOR_EXPR;
3580 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3581 about the case where C is a constant, just try one of the
3582 four possibilities. */
3584 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3585 && operand_equal_p (TREE_OPERAND (arg0, 1),
3586 TREE_OPERAND (arg1, 1), 0))
3587 return fold (build (MULT_EXPR, type,
3588 fold (build (PLUS_EXPR, type,
3589 TREE_OPERAND (arg0, 0),
3590 TREE_OPERAND (arg1, 0))),
3591 TREE_OPERAND (arg0, 1)));
3593 /* In IEEE floating point, x+0 may not equal x. */
3594 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3596 && real_zerop (arg1))
3597 return non_lvalue (convert (type, arg0));
3599 /* In most languages, can't associate operations on floats
3600 through parentheses. Rather than remember where the parentheses
3601 were, we don't associate floats at all. It shouldn't matter much.
3602 However, associating multiplications is only very slightly
3603 inaccurate, so do that if -ffast-math is specified. */
3604 if (FLOAT_TYPE_P (type)
3605 && ! (flag_fast_math && code == MULT_EXPR))
3608 /* The varsign == -1 cases happen only for addition and subtraction.
3609 It says that the arg that was split was really CON minus VAR.
3610 The rest of the code applies to all associative operations. */
3616 if (split_tree (arg0, code, &var, &con, &varsign))
3620 /* EXPR is (CON-VAR) +- ARG1. */
3621 /* If it is + and VAR==ARG1, return just CONST. */
3622 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3623 return convert (TREE_TYPE (t), con);
3625 /* If ARG0 is a constant, don't change things around;
3626 instead keep all the constant computations together. */
3628 if (TREE_CONSTANT (arg0))
3631 /* Otherwise return (CON +- ARG1) - VAR. */
3632 TREE_SET_CODE (t, MINUS_EXPR);
3633 TREE_OPERAND (t, 1) = var;
3635 = fold (build (code, TREE_TYPE (t), con, arg1));
3639 /* EXPR is (VAR+CON) +- ARG1. */
3640 /* If it is - and VAR==ARG1, return just CONST. */
3641 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3642 return convert (TREE_TYPE (t), con);
3644 /* If ARG0 is a constant, don't change things around;
3645 instead keep all the constant computations together. */
3647 if (TREE_CONSTANT (arg0))
3650 /* Otherwise return VAR +- (ARG1 +- CON). */
3651 TREE_OPERAND (t, 1) = tem
3652 = fold (build (code, TREE_TYPE (t), arg1, con));
3653 TREE_OPERAND (t, 0) = var;
3654 if (integer_zerop (tem)
3655 && (code == PLUS_EXPR || code == MINUS_EXPR))
3656 return convert (type, var);
3657 /* If we have x +/- (c - d) [c an explicit integer]
3658 change it to x -/+ (d - c) since if d is relocatable
3659 then the latter can be a single immediate insn
3660 and the former cannot. */
3661 if (TREE_CODE (tem) == MINUS_EXPR
3662 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3664 tree tem1 = TREE_OPERAND (tem, 1);
3665 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3666 TREE_OPERAND (tem, 0) = tem1;
3668 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3674 if (split_tree (arg1, code, &var, &con, &varsign))
3676 if (TREE_CONSTANT (arg1))
3681 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3683 /* EXPR is ARG0 +- (CON +- VAR). */
3684 if (TREE_CODE (t) == MINUS_EXPR
3685 && operand_equal_p (var, arg0, 0))
3687 /* If VAR and ARG0 cancel, return just CON or -CON. */
3688 if (code == PLUS_EXPR)
3689 return convert (TREE_TYPE (t), con);
3690 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3691 convert (TREE_TYPE (t), con)));
3695 = fold (build (code, TREE_TYPE (t), arg0, con));
3696 TREE_OPERAND (t, 1) = var;
3697 if (integer_zerop (TREE_OPERAND (t, 0))
3698 && TREE_CODE (t) == PLUS_EXPR)
3699 return convert (TREE_TYPE (t), var);
3704 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3705 if (TREE_CODE (arg1) == REAL_CST)
3707 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3709 t1 = const_binop (code, arg0, arg1, 0);
3710 if (t1 != NULL_TREE)
3712 /* The return value should always have
3713 the same type as the original expression. */
3714 TREE_TYPE (t1) = TREE_TYPE (t);
3720 if (! FLOAT_TYPE_P (type))
3722 if (! wins && integer_zerop (arg0))
3723 return build1 (NEGATE_EXPR, type, arg1);
3724 if (integer_zerop (arg1))
3725 return non_lvalue (convert (type, arg0));
3727 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3728 about the case where C is a constant, just try one of the
3729 four possibilities. */
3731 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3732 && operand_equal_p (TREE_OPERAND (arg0, 1),
3733 TREE_OPERAND (arg1, 1), 0))
3734 return fold (build (MULT_EXPR, type,
3735 fold (build (MINUS_EXPR, type,
3736 TREE_OPERAND (arg0, 0),
3737 TREE_OPERAND (arg1, 0))),
3738 TREE_OPERAND (arg0, 1)));
3740 /* Convert A - (-B) to A + B. */
3741 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3742 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3744 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3747 /* Except with IEEE floating point, 0-x equals -x. */
3748 if (! wins && real_zerop (arg0))
3749 return build1 (NEGATE_EXPR, type, arg1);
3750 /* Except with IEEE floating point, x-0 equals x. */
3751 if (real_zerop (arg1))
3752 return non_lvalue (convert (type, arg0));
3755 /* Fold &x - &x. This can happen from &x.foo - &x.
3756 This is unsafe for certain floats even in non-IEEE formats.
3757 In IEEE, it is unsafe because it does wrong for NaNs.
3758 Also note that operand_equal_p is always false if an operand
3761 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
3762 && operand_equal_p (arg0, arg1, 0))
3763 return convert (type, integer_zero_node);
3768 if (! FLOAT_TYPE_P (type))
3770 if (integer_zerop (arg1))
3771 return omit_one_operand (type, arg1, arg0);
3772 if (integer_onep (arg1))
3773 return non_lvalue (convert (type, arg0));
3775 /* ((A / C) * C) is A if the division is an
3776 EXACT_DIV_EXPR. Since C is normally a constant,
3777 just check for one of the four possibilities. */
3779 if (TREE_CODE (arg0) == EXACT_DIV_EXPR
3780 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3781 return TREE_OPERAND (arg0, 0);
3783 /* (a * (1 << b)) is (a << b) */
3784 if (TREE_CODE (arg1) == LSHIFT_EXPR
3785 && integer_onep (TREE_OPERAND (arg1, 0)))
3786 return fold (build (LSHIFT_EXPR, type, arg0,
3787 TREE_OPERAND (arg1, 1)));
3788 if (TREE_CODE (arg0) == LSHIFT_EXPR
3789 && integer_onep (TREE_OPERAND (arg0, 0)))
3790 return fold (build (LSHIFT_EXPR, type, arg1,
3791 TREE_OPERAND (arg0, 1)));
3795 /* x*0 is 0, except for IEEE floating point. */
3796 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3798 && real_zerop (arg1))
3799 return omit_one_operand (type, arg1, arg0);
3800 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3801 However, ANSI says we can drop signals,
3802 so we can do this anyway. */
3803 if (real_onep (arg1))
3804 return non_lvalue (convert (type, arg0));
3806 if (! wins && real_twop (arg1))
3808 tree arg = save_expr (arg0);
3809 return build (PLUS_EXPR, type, arg, arg);
3816 if (integer_all_onesp (arg1))
3817 return omit_one_operand (type, arg1, arg0);
3818 if (integer_zerop (arg1))
3819 return non_lvalue (convert (type, arg0));
3820 t1 = distribute_bit_expr (code, type, arg0, arg1);
3821 if (t1 != NULL_TREE)
3824 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3825 is a rotate of A by C1 bits. */
3827 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3828 || TREE_CODE (arg0) == LSHIFT_EXPR)
3829 && (TREE_CODE (arg1) == RSHIFT_EXPR
3830 || TREE_CODE (arg1) == LSHIFT_EXPR)
3831 && TREE_CODE (arg0) != TREE_CODE (arg1)
3832 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3833 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3834 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3835 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3836 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3837 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3838 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3839 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3840 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3841 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3842 TREE_CODE (arg0) == LSHIFT_EXPR
3843 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3848 if (integer_zerop (arg1))
3849 return non_lvalue (convert (type, arg0));
3850 if (integer_all_onesp (arg1))
3851 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3856 if (integer_all_onesp (arg1))
3857 return non_lvalue (convert (type, arg0));
3858 if (integer_zerop (arg1))
3859 return omit_one_operand (type, arg1, arg0);
3860 t1 = distribute_bit_expr (code, type, arg0, arg1);
3861 if (t1 != NULL_TREE)
3863 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3864 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3865 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3867 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3868 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3869 && (~TREE_INT_CST_LOW (arg0)
3870 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3871 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3873 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3874 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3876 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3877 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3878 && (~TREE_INT_CST_LOW (arg1)
3879 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3880 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3884 case BIT_ANDTC_EXPR:
3885 if (integer_all_onesp (arg0))
3886 return non_lvalue (convert (type, arg1));
3887 if (integer_zerop (arg0))
3888 return omit_one_operand (type, arg0, arg1);
3889 if (TREE_CODE (arg1) == INTEGER_CST)
3891 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3892 code = BIT_AND_EXPR;
3898 /* In most cases, do nothing with a divide by zero. */
3899 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3900 #ifndef REAL_INFINITY
3901 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
3904 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3906 /* In IEEE floating point, x/1 is not equivalent to x for snans.
3907 However, ANSI says we can drop signals, so we can do this anyway. */
3908 if (real_onep (arg1))
3909 return non_lvalue (convert (type, arg0));
3911 /* If ARG1 is a constant, we can convert this to a multiply by the
3912 reciprocal. This does not have the same rounding properties,
3913 so only do this if -ffast-math. We can actually always safely
3914 do it if ARG1 is a power of two, but it's hard to tell if it is
3915 or not in a portable manner. */
3916 if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
3917 && 0 != (tem = const_binop (code, build_real (type, dconst1),
3919 return fold (build (MULT_EXPR, type, arg0, tem));
3923 case TRUNC_DIV_EXPR:
3924 case ROUND_DIV_EXPR:
3925 case FLOOR_DIV_EXPR:
3927 case EXACT_DIV_EXPR:
3928 if (integer_onep (arg1))
3929 return non_lvalue (convert (type, arg0));
3930 if (integer_zerop (arg1))
3933 /* If we have ((a / C1) / C2) where both division are the same type, try
3934 to simplify. First see if C1 * C2 overflows or not. */
3935 if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
3936 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
3940 new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
3941 tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
3943 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
3944 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
3946 /* If no overflow, divide by C1*C2. */
3947 return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
3951 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
3952 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
3953 expressions, which often appear in the offsets or sizes of
3954 objects with a varying size. Only deal with positive divisors
3955 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
3957 Look for NOPs and SAVE_EXPRs inside. */
3959 if (TREE_CODE (arg1) == INTEGER_CST
3960 && tree_int_cst_sgn (arg1) >= 0)
3962 int have_save_expr = 0;
3963 tree c2 = integer_zero_node;
3966 if (TREE_CODE (xarg0) == SAVE_EXPR)
3967 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3971 if (TREE_CODE (xarg0) == PLUS_EXPR
3972 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3973 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
3974 else if (TREE_CODE (xarg0) == MINUS_EXPR
3975 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3976 /* If we are doing this computation unsigned, the negate
3978 && ! TREE_UNSIGNED (type))
3980 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
3981 xarg0 = TREE_OPERAND (xarg0, 0);
3984 if (TREE_CODE (xarg0) == SAVE_EXPR)
3985 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3989 if (TREE_CODE (xarg0) == MULT_EXPR
3990 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3991 && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
3992 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
3993 TREE_OPERAND (xarg0, 1), arg1, 1))
3994 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
3995 TREE_OPERAND (xarg0, 1), 1)))
3996 && (tree_int_cst_sgn (c2) >= 0
3997 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
4000 tree outer_div = integer_one_node;
4001 tree c1 = TREE_OPERAND (xarg0, 1);
4004 /* If C3 > C1, set them equal and do a divide by
4005 C3/C1 at the end of the operation. */
4006 if (tree_int_cst_lt (c1, c3))
4007 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
4009 /* The result is A * (C1/C3) + (C2/C3). */
4010 t = fold (build (PLUS_EXPR, type,
4011 fold (build (MULT_EXPR, type,
4012 TREE_OPERAND (xarg0, 0),
4013 const_binop (code, c1, c3, 1))),
4014 const_binop (code, c2, c3, 1)));
4016 if (! integer_onep (outer_div))
4017 t = fold (build (code, type, t, convert (type, outer_div)));
4029 case FLOOR_MOD_EXPR:
4030 case ROUND_MOD_EXPR:
4031 case TRUNC_MOD_EXPR:
4032 if (integer_onep (arg1))
4033 return omit_one_operand (type, integer_zero_node, arg0);
4034 if (integer_zerop (arg1))
4037 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4038 where C1 % C3 == 0. Handle similarly to the division case,
4039 but don't bother with SAVE_EXPRs. */
4041 if (TREE_CODE (arg1) == INTEGER_CST
4042 && ! integer_zerop (arg1))
4044 tree c2 = integer_zero_node;
4047 if (TREE_CODE (xarg0) == PLUS_EXPR
4048 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4049 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4050 else if (TREE_CODE (xarg0) == MINUS_EXPR
4051 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4052 && ! TREE_UNSIGNED (type))
4054 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4055 xarg0 = TREE_OPERAND (xarg0, 0);
4060 if (TREE_CODE (xarg0) == MULT_EXPR
4061 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4062 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4063 TREE_OPERAND (xarg0, 1),
4065 && tree_int_cst_sgn (c2) >= 0)
4066 /* The result is (C2%C3). */
4067 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4068 TREE_OPERAND (xarg0, 0));
4077 if (integer_zerop (arg1))
4078 return non_lvalue (convert (type, arg0));
4079 /* Since negative shift count is not well-defined,
4080 don't try to compute it in the compiler. */
4081 if (tree_int_cst_sgn (arg1) < 0)
4086 if (operand_equal_p (arg0, arg1, 0))
4088 if (INTEGRAL_TYPE_P (type)
4089 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4090 return omit_one_operand (type, arg1, arg0);
4094 if (operand_equal_p (arg0, arg1, 0))
4096 if (INTEGRAL_TYPE_P (type)
4097 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4098 return omit_one_operand (type, arg1, arg0);
4101 case TRUTH_NOT_EXPR:
4102 /* Note that the operand of this must be an int
4103 and its values must be 0 or 1.
4104 ("true" is a fixed value perhaps depending on the language,
4105 but we don't handle values other than 1 correctly yet.) */
4106 return invert_truthvalue (arg0);
4108 case TRUTH_ANDIF_EXPR:
4109 /* Note that the operands of this must be ints
4110 and their values must be 0 or 1.
4111 ("true" is a fixed value perhaps depending on the language.) */
4112 /* If first arg is constant zero, return it. */
4113 if (integer_zerop (arg0))
4115 case TRUTH_AND_EXPR:
4116 /* If either arg is constant true, drop it. */
4117 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4118 return non_lvalue (arg1);
4119 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4120 return non_lvalue (arg0);
4121 /* If second arg is constant zero, result is zero, but first arg
4122 must be evaluated. */
4123 if (integer_zerop (arg1))
4124 return omit_one_operand (type, arg1, arg0);
4127 /* We only do these simplifications if we are optimizing. */
4131 /* Check for things like (A || B) && (A || C). We can convert this
4132 to A || (B && C). Note that either operator can be any of the four
4133 truth and/or operations and the transformation will still be
4134 valid. Also note that we only care about order for the
4135 ANDIF and ORIF operators. */
4136 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4137 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4138 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4139 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4140 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4142 tree a00 = TREE_OPERAND (arg0, 0);
4143 tree a01 = TREE_OPERAND (arg0, 1);
4144 tree a10 = TREE_OPERAND (arg1, 0);
4145 tree a11 = TREE_OPERAND (arg1, 1);
4146 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
4147 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
4148 && (code == TRUTH_AND_EXPR
4149 || code == TRUTH_OR_EXPR));
4151 if (operand_equal_p (a00, a10, 0))
4152 return fold (build (TREE_CODE (arg0), type, a00,
4153 fold (build (code, type, a01, a11))));
4154 else if (commutative && operand_equal_p (a00, a11, 0))
4155 return fold (build (TREE_CODE (arg0), type, a00,
4156 fold (build (code, type, a01, a10))));
4157 else if (commutative && operand_equal_p (a01, a10, 0))
4158 return fold (build (TREE_CODE (arg0), type, a01,
4159 fold (build (code, type, a00, a11))));
4161 /* This case if tricky because we must either have commutative
4162 operators or else A10 must not have side-effects. */
4164 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
4165 && operand_equal_p (a01, a11, 0))
4166 return fold (build (TREE_CODE (arg0), type,
4167 fold (build (code, type, a00, a10)),
4171 /* Check for the possibility of merging component references. If our
4172 lhs is another similar operation, try to merge its rhs with our
4173 rhs. Then try to merge our lhs and rhs. */
4174 if (TREE_CODE (arg0) == code
4175 && 0 != (tem = fold_truthop (code, type,
4176 TREE_OPERAND (arg0, 1), arg1)))
4177 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4179 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
4184 case TRUTH_ORIF_EXPR:
4185 /* Note that the operands of this must be ints
4186 and their values must be 0 or true.
4187 ("true" is a fixed value perhaps depending on the language.) */
4188 /* If first arg is constant true, return it. */
4189 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4192 /* If either arg is constant zero, drop it. */
4193 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4194 return non_lvalue (arg1);
4195 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4196 return non_lvalue (arg0);
4197 /* If second arg is constant true, result is true, but we must
4198 evaluate first arg. */
4199 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4200 return omit_one_operand (type, arg1, arg0);
4203 case TRUTH_XOR_EXPR:
4204 /* If either arg is constant zero, drop it. */
4205 if (integer_zerop (arg0))
4206 return non_lvalue (arg1);
4207 if (integer_zerop (arg1))
4208 return non_lvalue (arg0);
4209 /* If either arg is constant true, this is a logical inversion. */
4210 if (integer_onep (arg0))
4211 return non_lvalue (invert_truthvalue (arg1));
4212 if (integer_onep (arg1))
4213 return non_lvalue (invert_truthvalue (arg0));
4222 /* If one arg is a constant integer, put it last. */
4223 if (TREE_CODE (arg0) == INTEGER_CST
4224 && TREE_CODE (arg1) != INTEGER_CST)
4226 TREE_OPERAND (t, 0) = arg1;
4227 TREE_OPERAND (t, 1) = arg0;
4228 arg0 = TREE_OPERAND (t, 0);
4229 arg1 = TREE_OPERAND (t, 1);
4230 code = swap_tree_comparison (code);
4231 TREE_SET_CODE (t, code);
4234 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4235 First, see if one arg is constant; find the constant arg
4236 and the other one. */
4238 tree constop = 0, varop;
4241 if (TREE_CONSTANT (arg1))
4242 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
4243 if (TREE_CONSTANT (arg0))
4244 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
4246 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4248 /* This optimization is invalid for ordered comparisons
4249 if CONST+INCR overflows or if foo+incr might overflow.
4250 This optimization is invalid for floating point due to rounding.
4251 For pointer types we assume overflow doesn't happen. */
4252 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4253 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4254 && (code == EQ_EXPR || code == NE_EXPR)))
4257 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4258 constop, TREE_OPERAND (varop, 1)));
4259 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4260 *constoploc = newconst;
4264 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4266 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4267 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4268 && (code == EQ_EXPR || code == NE_EXPR)))
4271 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4272 constop, TREE_OPERAND (varop, 1)));
4273 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4274 *constoploc = newconst;
4280 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4281 if (TREE_CODE (arg1) == INTEGER_CST
4282 && TREE_CODE (arg0) != INTEGER_CST
4283 && tree_int_cst_sgn (arg1) > 0)
4285 switch (TREE_CODE (t))
4289 TREE_SET_CODE (t, code);
4290 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4291 TREE_OPERAND (t, 1) = arg1;
4296 TREE_SET_CODE (t, code);
4297 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4298 TREE_OPERAND (t, 1) = arg1;
4302 /* If this is an EQ or NE comparison with zero and ARG0 is
4303 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4304 two operations, but the latter can be done in one less insn
4305 one machine that have only two-operand insns or on which a
4306 constant cannot be the first operand. */
4307 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4308 && TREE_CODE (arg0) == BIT_AND_EXPR)
4310 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4311 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4313 fold (build (code, type,
4314 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4316 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4317 TREE_OPERAND (arg0, 1),
4318 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4319 convert (TREE_TYPE (arg0),
4322 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4323 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4325 fold (build (code, type,
4326 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4328 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4329 TREE_OPERAND (arg0, 0),
4330 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4331 convert (TREE_TYPE (arg0),
4336 /* If this is an NE or EQ comparison of zero against the result of a
4337 signed MOD operation whose second operand is a power of 2, make
4338 the MOD operation unsigned since it is simpler and equivalent. */
4339 if ((code == NE_EXPR || code == EQ_EXPR)
4340 && integer_zerop (arg1)
4341 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4342 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4343 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4344 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4345 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4346 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4348 tree newtype = unsigned_type (TREE_TYPE (arg0));
4349 tree newmod = build (TREE_CODE (arg0), newtype,
4350 convert (newtype, TREE_OPERAND (arg0, 0)),
4351 convert (newtype, TREE_OPERAND (arg0, 1)));
4353 return build (code, type, newmod, convert (newtype, arg1));
4356 /* If this is an NE comparison of zero with an AND of one, remove the
4357 comparison since the AND will give the correct value. */
4358 if (code == NE_EXPR && integer_zerop (arg1)
4359 && TREE_CODE (arg0) == BIT_AND_EXPR
4360 && integer_onep (TREE_OPERAND (arg0, 1)))
4361 return convert (type, arg0);
4363 /* If we have (A & C) == C where C is a power of 2, convert this into
4364 (A & C) != 0. Similarly for NE_EXPR. */
4365 if ((code == EQ_EXPR || code == NE_EXPR)
4366 && TREE_CODE (arg0) == BIT_AND_EXPR
4367 && integer_pow2p (TREE_OPERAND (arg0, 1))
4368 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4369 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4370 arg0, integer_zero_node);
4372 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
4373 and similarly for >= into !=. */
4374 if ((code == LT_EXPR || code == GE_EXPR)
4375 && TREE_UNSIGNED (TREE_TYPE (arg0))
4376 && TREE_CODE (arg1) == LSHIFT_EXPR
4377 && integer_onep (TREE_OPERAND (arg1, 0)))
4378 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4379 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4380 TREE_OPERAND (arg1, 1)),
4381 convert (TREE_TYPE (arg0), integer_zero_node));
4383 else if ((code == LT_EXPR || code == GE_EXPR)
4384 && TREE_UNSIGNED (TREE_TYPE (arg0))
4385 && (TREE_CODE (arg1) == NOP_EXPR
4386 || TREE_CODE (arg1) == CONVERT_EXPR)
4387 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
4388 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
4390 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4391 convert (TREE_TYPE (arg0),
4392 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4393 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
4394 convert (TREE_TYPE (arg0), integer_zero_node));
4396 /* Simplify comparison of something with itself. (For IEEE
4397 floating-point, we can only do some of these simplifications.) */
4398 if (operand_equal_p (arg0, arg1, 0))
4405 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4407 t = build_int_2 (1, 0);
4408 TREE_TYPE (t) = type;
4412 TREE_SET_CODE (t, code);
4416 /* For NE, we can only do this simplification if integer. */
4417 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4419 /* ... fall through ... */
4422 t = build_int_2 (0, 0);
4423 TREE_TYPE (t) = type;
4428 /* An unsigned comparison against 0 can be simplified. */
4429 if (integer_zerop (arg1)
4430 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4431 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4432 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4434 switch (TREE_CODE (t))
4438 TREE_SET_CODE (t, NE_EXPR);
4442 TREE_SET_CODE (t, EQ_EXPR);
4445 return omit_one_operand (type,
4446 convert (type, integer_one_node),
4449 return omit_one_operand (type,
4450 convert (type, integer_zero_node),
4455 /* If we are comparing an expression that just has comparisons
4456 of two integer values, arithmetic expressions of those comparisons,
4457 and constants, we can simplify it. There are only three cases
4458 to check: the two values can either be equal, the first can be
4459 greater, or the second can be greater. Fold the expression for
4460 those three values. Since each value must be 0 or 1, we have
4461 eight possibilities, each of which corresponds to the constant 0
4462 or 1 or one of the six possible comparisons.
4464 This handles common cases like (a > b) == 0 but also handles
4465 expressions like ((x > y) - (y > x)) > 0, which supposedly
4466 occur in macroized code. */
4468 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4470 tree cval1 = 0, cval2 = 0;
4473 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4474 /* Don't handle degenerate cases here; they should already
4475 have been handled anyway. */
4476 && cval1 != 0 && cval2 != 0
4477 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4478 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4479 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4480 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4481 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4483 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4484 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4486 /* We can't just pass T to eval_subst in case cval1 or cval2
4487 was the same as ARG1. */
4490 = fold (build (code, type,
4491 eval_subst (arg0, cval1, maxval, cval2, minval),
4494 = fold (build (code, type,
4495 eval_subst (arg0, cval1, maxval, cval2, maxval),
4498 = fold (build (code, type,
4499 eval_subst (arg0, cval1, minval, cval2, maxval),
4502 /* All three of these results should be 0 or 1. Confirm they
4503 are. Then use those values to select the proper code
4506 if ((integer_zerop (high_result)
4507 || integer_onep (high_result))
4508 && (integer_zerop (equal_result)
4509 || integer_onep (equal_result))
4510 && (integer_zerop (low_result)
4511 || integer_onep (low_result)))
4513 /* Make a 3-bit mask with the high-order bit being the
4514 value for `>', the next for '=', and the low for '<'. */
4515 switch ((integer_onep (high_result) * 4)
4516 + (integer_onep (equal_result) * 2)
4517 + integer_onep (low_result))
4521 return omit_one_operand (type, integer_zero_node, arg0);
4542 return omit_one_operand (type, integer_one_node, arg0);
4545 t = build (code, type, cval1, cval2);
4547 return save_expr (t);
4554 /* If this is a comparison of a field, we may be able to simplify it. */
4555 if ((TREE_CODE (arg0) == COMPONENT_REF
4556 || TREE_CODE (arg0) == BIT_FIELD_REF)
4557 && (code == EQ_EXPR || code == NE_EXPR)
4558 /* Handle the constant case even without -O
4559 to make sure the warnings are given. */
4560 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4562 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4566 /* If this is a comparison of complex values and either or both
4567 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4568 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4569 may prevent needless evaluations. */
4570 if ((code == EQ_EXPR || code == NE_EXPR)
4571 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4572 && (TREE_CODE (arg0) == COMPLEX_EXPR
4573 || TREE_CODE (arg1) == COMPLEX_EXPR))
4575 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4576 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4577 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4578 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4579 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4581 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4584 fold (build (code, type, real0, real1)),
4585 fold (build (code, type, imag0, imag1))));
4588 /* From here on, the only cases we handle are when the result is
4589 known to be a constant.
4591 To compute GT, swap the arguments and do LT.
4592 To compute GE, do LT and invert the result.
4593 To compute LE, swap the arguments, do LT and invert the result.
4594 To compute NE, do EQ and invert the result.
4596 Therefore, the code below must handle only EQ and LT. */
4598 if (code == LE_EXPR || code == GT_EXPR)
4600 tem = arg0, arg0 = arg1, arg1 = tem;
4601 code = swap_tree_comparison (code);
4604 /* Note that it is safe to invert for real values here because we
4605 will check below in the one case that it matters. */
4608 if (code == NE_EXPR || code == GE_EXPR)
4611 code = invert_tree_comparison (code);
4614 /* Compute a result for LT or EQ if args permit;
4615 otherwise return T. */
4616 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4618 if (code == EQ_EXPR)
4619 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4620 == TREE_INT_CST_LOW (arg1))
4621 && (TREE_INT_CST_HIGH (arg0)
4622 == TREE_INT_CST_HIGH (arg1)),
4625 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4626 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4627 : INT_CST_LT (arg0, arg1)),
4631 /* Assume a nonexplicit constant cannot equal an explicit one,
4632 since such code would be undefined anyway.
4633 Exception: on sysvr4, using #pragma weak,
4634 a label can come out as 0. */
4635 else if (TREE_CODE (arg1) == INTEGER_CST
4636 && !integer_zerop (arg1)
4637 && TREE_CONSTANT (arg0)
4638 && TREE_CODE (arg0) == ADDR_EXPR
4640 t1 = build_int_2 (0, 0);
4642 /* Two real constants can be compared explicitly. */
4643 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4645 /* If either operand is a NaN, the result is false with two
4646 exceptions: First, an NE_EXPR is true on NaNs, but that case
4647 is already handled correctly since we will be inverting the
4648 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4649 or a GE_EXPR into a LT_EXPR, we must return true so that it
4650 will be inverted into false. */
4652 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4653 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4654 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4656 else if (code == EQ_EXPR)
4657 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4658 TREE_REAL_CST (arg1)),
4661 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4662 TREE_REAL_CST (arg1)),
4666 if (t1 == NULL_TREE)
4670 TREE_INT_CST_LOW (t1) ^= 1;
4672 TREE_TYPE (t1) = type;
4676 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4677 so all simple results must be passed through pedantic_non_lvalue. */
4678 if (TREE_CODE (arg0) == INTEGER_CST)
4679 return pedantic_non_lvalue
4680 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4681 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4682 return pedantic_non_lvalue (omit_one_operand (type, arg1, arg0));
4684 /* If the second operand is zero, invert the comparison and swap
4685 the second and third operands. Likewise if the second operand
4686 is constant and the third is not or if the third operand is
4687 equivalent to the first operand of the comparison. */
4689 if (integer_zerop (arg1)
4690 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4691 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4692 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4693 TREE_OPERAND (t, 2),
4694 TREE_OPERAND (arg0, 1))))
4696 /* See if this can be inverted. If it can't, possibly because
4697 it was a floating-point inequality comparison, don't do
4699 tem = invert_truthvalue (arg0);
4701 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4703 arg0 = TREE_OPERAND (t, 0) = tem;
4704 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4705 TREE_OPERAND (t, 2) = arg1;
4706 arg1 = TREE_OPERAND (t, 1);
4710 /* If we have A op B ? A : C, we may be able to convert this to a
4711 simpler expression, depending on the operation and the values
4712 of B and C. IEEE floating point prevents this though,
4713 because A or B might be -0.0 or a NaN. */
4715 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4716 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4717 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4719 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4720 arg1, TREE_OPERAND (arg0, 1)))
4722 tree arg2 = TREE_OPERAND (t, 2);
4723 enum tree_code comp_code = TREE_CODE (arg0);
4725 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4726 depending on the comparison operation. */
4727 if (integer_zerop (TREE_OPERAND (arg0, 1))
4728 && TREE_CODE (arg2) == NEGATE_EXPR
4729 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4733 return pedantic_non_lvalue
4734 (fold (build1 (NEGATE_EXPR, type, arg1)));
4736 return pedantic_non_lvalue (convert (type, arg1));
4739 return pedantic_non_lvalue
4740 (fold (build1 (ABS_EXPR, type, arg1)));
4743 return pedantic_non_lvalue
4744 (fold (build1 (NEGATE_EXPR, type,
4745 fold (build1 (ABS_EXPR, type, arg1)))));
4748 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4751 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4753 if (comp_code == NE_EXPR)
4754 return pedantic_non_lvalue (convert (type, arg1));
4755 else if (comp_code == EQ_EXPR)
4756 return pedantic_non_lvalue (convert (type, integer_zero_node));
4759 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4760 or max (A, B), depending on the operation. */
4762 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4763 arg2, TREE_OPERAND (arg0, 0)))
4767 return pedantic_non_lvalue (convert (type, arg2));
4769 return pedantic_non_lvalue (convert (type, arg1));
4772 return pedantic_non_lvalue
4773 (fold (build (MIN_EXPR, type, arg1, arg2)));
4776 return pedantic_non_lvalue
4777 (fold (build (MAX_EXPR, type, arg1, arg2)));
4780 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4781 we might still be able to simplify this. For example,
4782 if C1 is one less or one more than C2, this might have started
4783 out as a MIN or MAX and been transformed by this function.
4784 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4786 if (INTEGRAL_TYPE_P (type)
4787 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4788 && TREE_CODE (arg2) == INTEGER_CST)
4792 /* We can replace A with C1 in this case. */
4793 arg1 = TREE_OPERAND (t, 1)
4794 = convert (type, TREE_OPERAND (arg0, 1));
4798 /* If C1 is C2 + 1, this is min(A, C2). */
4799 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4800 && operand_equal_p (TREE_OPERAND (arg0, 1),
4801 const_binop (PLUS_EXPR, arg2,
4802 integer_one_node, 0), 1))
4803 return pedantic_non_lvalue
4804 (fold (build (MIN_EXPR, type, arg1, arg2)));
4808 /* If C1 is C2 - 1, this is min(A, C2). */
4809 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4810 && operand_equal_p (TREE_OPERAND (arg0, 1),
4811 const_binop (MINUS_EXPR, arg2,
4812 integer_one_node, 0), 1))
4813 return pedantic_non_lvalue
4814 (fold (build (MIN_EXPR, type, arg1, arg2)));
4818 /* If C1 is C2 - 1, this is max(A, C2). */
4819 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4820 && operand_equal_p (TREE_OPERAND (arg0, 1),
4821 const_binop (MINUS_EXPR, arg2,
4822 integer_one_node, 0), 1))
4823 return pedantic_non_lvalue
4824 (fold (build (MAX_EXPR, type, arg1, arg2)));
4828 /* If C1 is C2 + 1, this is max(A, C2). */
4829 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4830 && operand_equal_p (TREE_OPERAND (arg0, 1),
4831 const_binop (PLUS_EXPR, arg2,
4832 integer_one_node, 0), 1))
4833 return pedantic_non_lvalue
4834 (fold (build (MAX_EXPR, type, arg1, arg2)));
4839 /* If the second operand is simpler than the third, swap them
4840 since that produces better jump optimization results. */
4841 if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
4842 || TREE_CODE (arg1) == SAVE_EXPR)
4843 && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
4844 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
4845 || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
4847 /* See if this can be inverted. If it can't, possibly because
4848 it was a floating-point inequality comparison, don't do
4850 tem = invert_truthvalue (arg0);
4852 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4854 arg0 = TREE_OPERAND (t, 0) = tem;
4855 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4856 TREE_OPERAND (t, 2) = arg1;
4857 arg1 = TREE_OPERAND (t, 1);
4861 /* Convert A ? 1 : 0 to simply A. */
4862 if (integer_onep (TREE_OPERAND (t, 1))
4863 && integer_zerop (TREE_OPERAND (t, 2))
4864 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4865 call to fold will try to move the conversion inside
4866 a COND, which will recurse. In that case, the COND_EXPR
4867 is probably the best choice, so leave it alone. */
4868 && type == TREE_TYPE (arg0))
4869 return pedantic_non_lvalue (arg0);
4871 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4872 operation is simply A & 2. */
4874 if (integer_zerop (TREE_OPERAND (t, 2))
4875 && TREE_CODE (arg0) == NE_EXPR
4876 && integer_zerop (TREE_OPERAND (arg0, 1))
4877 && integer_pow2p (arg1)
4878 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4879 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4881 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
4886 /* When pedantic, a compound expression can be neither an lvalue
4887 nor an integer constant expression. */
4888 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4890 /* Don't let (0, 0) be null pointer constant. */
4891 if (integer_zerop (arg1))
4892 return non_lvalue (arg1);
4897 return build_complex (arg0, arg1);
4901 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4903 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4904 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4905 TREE_OPERAND (arg0, 1));
4906 else if (TREE_CODE (arg0) == COMPLEX_CST)
4907 return TREE_REALPART (arg0);
4908 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4909 return fold (build (TREE_CODE (arg0), type,
4910 fold (build1 (REALPART_EXPR, type,
4911 TREE_OPERAND (arg0, 0))),
4912 fold (build1 (REALPART_EXPR,
4913 type, TREE_OPERAND (arg0, 1)))));
4917 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4918 return convert (type, integer_zero_node);
4919 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4920 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4921 TREE_OPERAND (arg0, 0));
4922 else if (TREE_CODE (arg0) == COMPLEX_CST)
4923 return TREE_IMAGPART (arg0);
4924 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4925 return fold (build (TREE_CODE (arg0), type,
4926 fold (build1 (IMAGPART_EXPR, type,
4927 TREE_OPERAND (arg0, 0))),
4928 fold (build1 (IMAGPART_EXPR, type,
4929 TREE_OPERAND (arg0, 1)))));
4934 } /* switch (code) */