1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ This file should be rewritten to use an arbitrary precision
21 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
22 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
23 @@ The routines that translate from the ap rep should
24 @@ warn if precision et. al. is lost.
25 @@ This would also make life easier when this technology is used
26 @@ for cross-compilers. */
29 /* The entry points in this file are fold, size_int and size_binop.
31 fold takes a tree as argument and returns a simplified tree.
33 size_binop takes a tree code for an arithmetic operation
34 and two operands that are trees, and produces a tree for the
35 result, assuming the type comes from `sizetype'.
37 size_int takes an integer value, and creates a tree constant
38 with type from `sizetype'. */
46 /* Handle floating overflow for `const_binop'. */
47 static jmp_buf float_error;
49 static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
50 static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
51 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
52 HOST_WIDE_INT, HOST_WIDE_INT,
53 HOST_WIDE_INT, HOST_WIDE_INT *,
54 HOST_WIDE_INT *, HOST_WIDE_INT *,
56 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
57 static tree const_binop PROTO((enum tree_code, tree, tree, int));
58 static tree fold_convert PROTO((tree, tree));
59 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
60 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
61 static int truth_value_p PROTO((enum tree_code));
62 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
63 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
64 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
65 static tree omit_one_operand PROTO((tree, tree, tree));
66 static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
67 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
68 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
69 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
71 static tree decode_field_reference PROTO((tree, int *, int *,
72 enum machine_mode *, int *,
74 static int all_ones_mask_p PROTO((tree, int));
75 static int simple_operand_p PROTO((tree));
76 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
77 enum tree_code, tree, tree, tree));
78 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
79 static tree strip_compound_expr PROTO((tree, tree));
85 /* Yield nonzero if a signed left shift of A by B bits overflows. */
86 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
88 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
89 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
90 Then this yields nonzero if overflow occurred during the addition.
91 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
92 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
93 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
95 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
96 We do that by representing the two-word integer in 4 words, with only
97 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
100 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
101 #define HIGHPART(x) \
102 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
103 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
105 /* Unpack a two-word integer into 4 words.
106 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
107 WORDS points to the array of HOST_WIDE_INTs. */
110 encode (words, low, hi)
111 HOST_WIDE_INT *words;
112 HOST_WIDE_INT low, hi;
114 words[0] = LOWPART (low);
115 words[1] = HIGHPART (low);
116 words[2] = LOWPART (hi);
117 words[3] = HIGHPART (hi);
120 /* Pack an array of 4 words into a two-word integer.
121 WORDS points to the array of words.
122 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
125 decode (words, low, hi)
126 HOST_WIDE_INT *words;
127 HOST_WIDE_INT *low, *hi;
129 *low = words[0] | words[1] * BASE;
130 *hi = words[2] | words[3] * BASE;
133 /* Make the integer constant T valid for its type
134 by setting to 0 or 1 all the bits in the constant
135 that don't belong in the type.
136 Yield 1 if a signed overflow occurs, 0 otherwise.
137 If OVERFLOW is nonzero, a signed overflow has already occurred
138 in calculating T, so propagate it.
140 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
144 force_fit_type (t, overflow)
148 HOST_WIDE_INT low, high;
151 if (TREE_CODE (t) == REAL_CST)
153 #ifdef CHECK_FLOAT_VALUE
154 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
160 else if (TREE_CODE (t) != INTEGER_CST)
163 low = TREE_INT_CST_LOW (t);
164 high = TREE_INT_CST_HIGH (t);
166 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
169 prec = TYPE_PRECISION (TREE_TYPE (t));
171 /* First clear all bits that are beyond the type's precision. */
173 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
175 else if (prec > HOST_BITS_PER_WIDE_INT)
177 TREE_INT_CST_HIGH (t)
178 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
182 TREE_INT_CST_HIGH (t) = 0;
183 if (prec < HOST_BITS_PER_WIDE_INT)
184 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
187 /* Unsigned types do not suffer sign extension or overflow. */
188 if (TREE_UNSIGNED (TREE_TYPE (t)))
191 /* If the value's sign bit is set, extend the sign. */
192 if (prec != 2 * HOST_BITS_PER_WIDE_INT
193 && (prec > HOST_BITS_PER_WIDE_INT
194 ? (TREE_INT_CST_HIGH (t)
195 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
196 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
198 /* Value is negative:
199 set to 1 all the bits that are outside this type's precision. */
200 if (prec > HOST_BITS_PER_WIDE_INT)
202 TREE_INT_CST_HIGH (t)
203 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
207 TREE_INT_CST_HIGH (t) = -1;
208 if (prec < HOST_BITS_PER_WIDE_INT)
209 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
213 /* Yield nonzero if signed overflow occurred. */
215 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
219 /* Add two doubleword integers with doubleword result.
220 Each argument is given as two `HOST_WIDE_INT' pieces.
221 One argument is L1 and H1; the other, L2 and H2.
222 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
225 add_double (l1, h1, l2, h2, lv, hv)
226 HOST_WIDE_INT l1, h1, l2, h2;
227 HOST_WIDE_INT *lv, *hv;
232 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
236 return overflow_sum_sign (h1, h2, h);
239 /* Negate a doubleword integer with doubleword result.
240 Return nonzero if the operation overflows, assuming it's signed.
241 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
242 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
245 neg_double (l1, h1, lv, hv)
246 HOST_WIDE_INT l1, h1;
247 HOST_WIDE_INT *lv, *hv;
253 return (*hv & h1) < 0;
263 /* Multiply two doubleword integers with doubleword result.
264 Return nonzero if the operation overflows, assuming it's signed.
265 Each argument is given as two `HOST_WIDE_INT' pieces.
266 One argument is L1 and H1; the other, L2 and H2.
267 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
270 mul_double (l1, h1, l2, h2, lv, hv)
271 HOST_WIDE_INT l1, h1, l2, h2;
272 HOST_WIDE_INT *lv, *hv;
274 HOST_WIDE_INT arg1[4];
275 HOST_WIDE_INT arg2[4];
276 HOST_WIDE_INT prod[4 * 2];
277 register unsigned HOST_WIDE_INT carry;
278 register int i, j, k;
279 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
281 encode (arg1, l1, h1);
282 encode (arg2, l2, h2);
284 bzero ((char *) prod, sizeof prod);
286 for (i = 0; i < 4; i++)
289 for (j = 0; j < 4; j++)
292 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
293 carry += arg1[i] * arg2[j];
294 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
296 prod[k] = LOWPART (carry);
297 carry = HIGHPART (carry);
302 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
304 /* Check for overflow by calculating the top half of the answer in full;
305 it should agree with the low half's sign bit. */
306 decode (prod+4, &toplow, &tophigh);
309 neg_double (l2, h2, &neglow, &neghigh);
310 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
314 neg_double (l1, h1, &neglow, &neghigh);
315 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
317 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
320 /* Shift the doubleword integer in L1, H1 left by COUNT places
321 keeping only PREC bits of result.
322 Shift right if COUNT is negative.
323 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
324 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
327 lshift_double (l1, h1, count, prec, lv, hv, arith)
328 HOST_WIDE_INT l1, h1, count;
330 HOST_WIDE_INT *lv, *hv;
335 rshift_double (l1, h1, - count, prec, lv, hv, arith);
340 count = (unsigned HOST_WIDE_INT) count & prec;
342 if (count >= HOST_BITS_PER_WIDE_INT)
344 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
349 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
350 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
351 *lv = (unsigned HOST_WIDE_INT) l1 << count;
355 /* Shift the doubleword integer in L1, H1 right by COUNT places
356 keeping only PREC bits of result. COUNT must be positive.
357 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
358 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
361 rshift_double (l1, h1, count, prec, lv, hv, arith)
362 HOST_WIDE_INT l1, h1, count;
364 HOST_WIDE_INT *lv, *hv;
367 unsigned HOST_WIDE_INT signmask;
369 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
373 count = (unsigned HOST_WIDE_INT) count % prec;
375 if (count >= HOST_BITS_PER_WIDE_INT)
378 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
379 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
383 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
384 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
385 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
386 | ((unsigned HOST_WIDE_INT) h1 >> count));
390 /* Rotate the doubleword integer in L1, H1 left by COUNT places
391 keeping only PREC bits of result.
392 Rotate right if COUNT is negative.
393 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
396 lrotate_double (l1, h1, count, prec, lv, hv)
397 HOST_WIDE_INT l1, h1, count;
399 HOST_WIDE_INT *lv, *hv;
401 HOST_WIDE_INT arg1[4];
407 rrotate_double (l1, h1, - count, prec, lv, hv);
411 encode (arg1, l1, h1);
416 carry = arg1[4 - 1] >> 16 - 1;
419 for (i = 0; i < 4; i++)
421 carry += arg1[i] << 1;
422 arg1[i] = LOWPART (carry);
423 carry = HIGHPART (carry);
428 decode (arg1, lv, hv);
431 /* Rotate the doubleword integer in L1, H1 left by COUNT places
432 keeping only PREC bits of result. COUNT must be positive.
433 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
436 rrotate_double (l1, h1, count, prec, lv, hv)
437 HOST_WIDE_INT l1, h1, count;
439 HOST_WIDE_INT *lv, *hv;
441 HOST_WIDE_INT arg1[4];
445 encode (arg1, l1, h1);
453 for (i = 4 - 1; i >= 0; i--)
457 arg1[i] = LOWPART (carry >> 1);
462 decode (arg1, lv, hv);
465 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
466 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
467 CODE is a tree code for a kind of division, one of
468 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
470 It controls how the quotient is rounded to a integer.
471 Return nonzero if the operation overflows.
472 UNS nonzero says do unsigned division. */
475 div_and_round_double (code, uns,
476 lnum_orig, hnum_orig, lden_orig, hden_orig,
477 lquo, hquo, lrem, hrem)
480 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
481 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
482 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
485 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
486 HOST_WIDE_INT den[4], quo[4];
488 unsigned HOST_WIDE_INT work;
489 register int carry = 0;
490 HOST_WIDE_INT lnum = lnum_orig;
491 HOST_WIDE_INT hnum = hnum_orig;
492 HOST_WIDE_INT lden = lden_orig;
493 HOST_WIDE_INT hden = hden_orig;
496 if ((hden == 0) && (lden == 0))
499 /* calculate quotient sign and convert operands to unsigned. */
505 /* (minimum integer) / (-1) is the only overflow case. */
506 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
512 neg_double (lden, hden, &lden, &hden);
516 if (hnum == 0 && hden == 0)
517 { /* single precision */
519 /* This unsigned division rounds toward zero. */
520 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
525 { /* trivial case: dividend < divisor */
526 /* hden != 0 already checked. */
533 bzero ((char *) quo, sizeof quo);
535 bzero ((char *) num, sizeof num); /* to zero 9th element */
536 bzero ((char *) den, sizeof den);
538 encode (num, lnum, hnum);
539 encode (den, lden, hden);
541 /* Special code for when the divisor < BASE. */
542 if (hden == 0 && lden < BASE)
544 /* hnum != 0 already checked. */
545 for (i = 4 - 1; i >= 0; i--)
547 work = num[i] + carry * BASE;
548 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
549 carry = work % (unsigned HOST_WIDE_INT) lden;
554 /* Full double precision division,
555 with thanks to Don Knuth's "Seminumerical Algorithms". */
556 int quo_est, scale, num_hi_sig, den_hi_sig;
558 /* Find the highest non-zero divisor digit. */
559 for (i = 4 - 1; ; i--)
565 /* Insure that the first digit of the divisor is at least BASE/2.
566 This is required by the quotient digit estimation algorithm. */
568 scale = BASE / (den[den_hi_sig] + 1);
569 if (scale > 1) { /* scale divisor and dividend */
571 for (i = 0; i <= 4 - 1; i++) {
572 work = (num[i] * scale) + carry;
573 num[i] = LOWPART (work);
574 carry = HIGHPART (work);
577 for (i = 0; i <= 4 - 1; i++) {
578 work = (den[i] * scale) + carry;
579 den[i] = LOWPART (work);
580 carry = HIGHPART (work);
581 if (den[i] != 0) den_hi_sig = i;
588 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
589 /* guess the next quotient digit, quo_est, by dividing the first
590 two remaining dividend digits by the high order quotient digit.
591 quo_est is never low and is at most 2 high. */
592 unsigned HOST_WIDE_INT tmp;
594 num_hi_sig = i + den_hi_sig + 1;
595 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
596 if (num[num_hi_sig] != den[den_hi_sig])
597 quo_est = work / den[den_hi_sig];
601 /* refine quo_est so it's usually correct, and at most one high. */
602 tmp = work - quo_est * den[den_hi_sig];
604 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
607 /* Try QUO_EST as the quotient digit, by multiplying the
608 divisor by QUO_EST and subtracting from the remaining dividend.
609 Keep in mind that QUO_EST is the I - 1st digit. */
612 for (j = 0; j <= den_hi_sig; j++)
614 work = quo_est * den[j] + carry;
615 carry = HIGHPART (work);
616 work = num[i + j] - LOWPART (work);
617 num[i + j] = LOWPART (work);
618 carry += HIGHPART (work) != 0;
621 /* if quo_est was high by one, then num[i] went negative and
622 we need to correct things. */
624 if (num[num_hi_sig] < carry)
627 carry = 0; /* add divisor back in */
628 for (j = 0; j <= den_hi_sig; j++)
630 work = num[i + j] + den[j] + carry;
631 carry = HIGHPART (work);
632 num[i + j] = LOWPART (work);
634 num [num_hi_sig] += carry;
637 /* store the quotient digit. */
642 decode (quo, lquo, hquo);
645 /* if result is negative, make it so. */
647 neg_double (*lquo, *hquo, lquo, hquo);
649 /* compute trial remainder: rem = num - (quo * den) */
650 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
651 neg_double (*lrem, *hrem, lrem, hrem);
652 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
657 case TRUNC_MOD_EXPR: /* round toward zero */
658 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
662 case FLOOR_MOD_EXPR: /* round toward negative infinity */
663 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
666 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
669 else return overflow;
673 case CEIL_MOD_EXPR: /* round toward positive infinity */
674 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
676 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
679 else return overflow;
683 case ROUND_MOD_EXPR: /* round to closest integer */
685 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
686 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
688 /* get absolute values */
689 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
690 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
692 /* if (2 * abs (lrem) >= abs (lden)) */
693 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
694 labs_rem, habs_rem, <wice, &htwice);
695 if (((unsigned HOST_WIDE_INT) habs_den
696 < (unsigned HOST_WIDE_INT) htwice)
697 || (((unsigned HOST_WIDE_INT) habs_den
698 == (unsigned HOST_WIDE_INT) htwice)
699 && ((HOST_WIDE_INT unsigned) labs_den
700 < (unsigned HOST_WIDE_INT) ltwice)))
704 add_double (*lquo, *hquo,
705 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
708 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
711 else return overflow;
719 /* compute true remainder: rem = num - (quo * den) */
720 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
721 neg_double (*lrem, *hrem, lrem, hrem);
722 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
726 #ifndef REAL_ARITHMETIC
727 /* Effectively truncate a real value to represent the nearest possible value
728 in a narrower mode. The result is actually represented in the same data
729 type as the argument, but its value is usually different.
731 A trap may occur during the FP operations and it is the responsibility
732 of the calling function to have a handler established. */
735 real_value_truncate (mode, arg)
736 enum machine_mode mode;
739 return REAL_VALUE_TRUNCATE (mode, arg);
742 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
744 /* Check for infinity in an IEEE double precision number. */
750 /* The IEEE 64-bit double format. */
755 unsigned exponent : 11;
756 unsigned mantissa1 : 20;
761 unsigned mantissa1 : 20;
762 unsigned exponent : 11;
768 if (u.big_endian.sign == 1)
771 return (u.big_endian.exponent == 2047
772 && u.big_endian.mantissa1 == 0
773 && u.big_endian.mantissa2 == 0);
778 return (u.little_endian.exponent == 2047
779 && u.little_endian.mantissa1 == 0
780 && u.little_endian.mantissa2 == 0);
784 /* Check whether an IEEE double precision number is a NaN. */
790 /* The IEEE 64-bit double format. */
795 unsigned exponent : 11;
796 unsigned mantissa1 : 20;
801 unsigned mantissa1 : 20;
802 unsigned exponent : 11;
808 if (u.big_endian.sign == 1)
811 return (u.big_endian.exponent == 2047
812 && (u.big_endian.mantissa1 != 0
813 || u.big_endian.mantissa2 != 0));
818 return (u.little_endian.exponent == 2047
819 && (u.little_endian.mantissa1 != 0
820 || u.little_endian.mantissa2 != 0));
824 /* Check for a negative IEEE double precision number. */
830 /* The IEEE 64-bit double format. */
835 unsigned exponent : 11;
836 unsigned mantissa1 : 20;
841 unsigned mantissa1 : 20;
842 unsigned exponent : 11;
848 if (u.big_endian.sign == 1)
851 return u.big_endian.sign;
856 return u.little_endian.sign;
859 #else /* Target not IEEE */
861 /* Let's assume other float formats don't have infinity.
862 (This can be overridden by redefining REAL_VALUE_ISINF.) */
870 /* Let's assume other float formats don't have NaNs.
871 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
879 /* Let's assume other float formats don't have minus zero.
880 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
887 #endif /* Target not IEEE */
888 #endif /* no REAL_ARITHMETIC */
890 /* Split a tree IN into a constant and a variable part
891 that could be combined with CODE to make IN.
892 CODE must be a commutative arithmetic operation.
893 Store the constant part into *CONP and the variable in &VARP.
894 Return 1 if this was done; zero means the tree IN did not decompose
897 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
898 Therefore, we must tell the caller whether the variable part
899 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
900 The value stored is the coefficient for the variable term.
901 The constant term we return should always be added;
902 we negate it if necessary. */
905 split_tree (in, code, varp, conp, varsignp)
911 register tree outtype = TREE_TYPE (in);
915 /* Strip any conversions that don't change the machine mode. */
916 while ((TREE_CODE (in) == NOP_EXPR
917 || TREE_CODE (in) == CONVERT_EXPR)
918 && (TYPE_MODE (TREE_TYPE (in))
919 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
920 in = TREE_OPERAND (in, 0);
922 if (TREE_CODE (in) == code
923 || (! FLOAT_TYPE_P (TREE_TYPE (in))
924 /* We can associate addition and subtraction together
925 (even though the C standard doesn't say so)
926 for integers because the value is not affected.
927 For reals, the value might be affected, so we can't. */
928 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
929 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
931 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
932 if (code == INTEGER_CST)
934 *conp = TREE_OPERAND (in, 0);
935 *varp = TREE_OPERAND (in, 1);
936 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
937 && TREE_TYPE (*varp) != outtype)
938 *varp = convert (outtype, *varp);
939 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
942 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
944 *conp = TREE_OPERAND (in, 1);
945 *varp = TREE_OPERAND (in, 0);
947 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
948 && TREE_TYPE (*varp) != outtype)
949 *varp = convert (outtype, *varp);
950 if (TREE_CODE (in) == MINUS_EXPR)
952 /* If operation is subtraction and constant is second,
953 must negate it to get an additive constant.
954 And this cannot be done unless it is a manifest constant.
955 It could also be the address of a static variable.
956 We cannot negate that, so give up. */
957 if (TREE_CODE (*conp) == INTEGER_CST)
958 /* Subtracting from integer_zero_node loses for long long. */
959 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
965 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
967 *conp = TREE_OPERAND (in, 0);
968 *varp = TREE_OPERAND (in, 1);
969 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
970 && TREE_TYPE (*varp) != outtype)
971 *varp = convert (outtype, *varp);
972 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
979 /* Combine two constants NUM and ARG2 under operation CODE
980 to produce a new constant.
981 We assume ARG1 and ARG2 have the same data type,
982 or at least are the same kind of constant and the same machine mode.
984 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
987 const_binop (code, arg1, arg2, notrunc)
989 register tree arg1, arg2;
992 if (TREE_CODE (arg1) == INTEGER_CST)
994 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
995 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
996 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
997 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
998 HOST_WIDE_INT low, hi;
999 HOST_WIDE_INT garbagel, garbageh;
1001 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1007 t = build_int_2 (int1l | int2l, int1h | int2h);
1011 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1015 t = build_int_2 (int1l & int2l, int1h & int2h);
1018 case BIT_ANDTC_EXPR:
1019 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1025 /* It's unclear from the C standard whether shifts can overflow.
1026 The following code ignores overflow; perhaps a C standard
1027 interpretation ruling is needed. */
1028 lshift_double (int1l, int1h, int2l,
1029 TYPE_PRECISION (TREE_TYPE (arg1)),
1032 t = build_int_2 (low, hi);
1033 TREE_TYPE (t) = TREE_TYPE (arg1);
1035 force_fit_type (t, 0);
1036 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1037 TREE_CONSTANT_OVERFLOW (t)
1038 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1044 lrotate_double (int1l, int1h, int2l,
1045 TYPE_PRECISION (TREE_TYPE (arg1)),
1047 t = build_int_2 (low, hi);
1054 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1057 overflow = int2h < hi;
1059 t = build_int_2 (int2l, int2h);
1065 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1068 overflow = int1h < hi;
1070 t = build_int_2 (int1l, int1h);
1073 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1074 t = build_int_2 (low, hi);
1078 if (int2h == 0 && int2l == 0)
1080 t = build_int_2 (int1l, int1h);
1083 neg_double (int2l, int2h, &low, &hi);
1084 add_double (int1l, int1h, low, hi, &low, &hi);
1085 overflow = overflow_sum_sign (hi, int2h, int1h);
1086 t = build_int_2 (low, hi);
1090 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1091 t = build_int_2 (low, hi);
1094 case TRUNC_DIV_EXPR:
1095 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1096 case EXACT_DIV_EXPR:
1097 /* This is a shortcut for a common special case.
1098 It reduces the number of tree nodes generated
1100 if (int2h == 0 && int2l > 0
1101 && TREE_TYPE (arg1) == sizetype
1102 && int1h == 0 && int1l >= 0)
1104 if (code == CEIL_DIV_EXPR)
1106 return size_int (int1l / int2l);
1108 case ROUND_DIV_EXPR:
1109 if (int2h == 0 && int2l == 1)
1111 t = build_int_2 (int1l, int1h);
1114 if (int1l == int2l && int1h == int2h)
1116 if ((int1l | int1h) == 0)
1118 t = build_int_2 (1, 0);
1121 overflow = div_and_round_double (code, uns,
1122 int1l, int1h, int2l, int2h,
1123 &low, &hi, &garbagel, &garbageh);
1124 t = build_int_2 (low, hi);
1127 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1128 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1129 overflow = div_and_round_double (code, uns,
1130 int1l, int1h, int2l, int2h,
1131 &garbagel, &garbageh, &low, &hi);
1132 t = build_int_2 (low, hi);
1139 low = (((unsigned HOST_WIDE_INT) int1h
1140 < (unsigned HOST_WIDE_INT) int2h)
1141 || (((unsigned HOST_WIDE_INT) int1h
1142 == (unsigned HOST_WIDE_INT) int2h)
1143 && ((unsigned HOST_WIDE_INT) int1l
1144 < (unsigned HOST_WIDE_INT) int2l)));
1148 low = ((int1h < int2h)
1149 || ((int1h == int2h)
1150 && ((unsigned HOST_WIDE_INT) int1l
1151 < (unsigned HOST_WIDE_INT) int2l)));
1153 if (low == (code == MIN_EXPR))
1154 t = build_int_2 (int1l, int1h);
1156 t = build_int_2 (int2l, int2h);
1163 TREE_TYPE (t) = TREE_TYPE (arg1);
1165 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1166 | TREE_OVERFLOW (arg1)
1167 | TREE_OVERFLOW (arg2));
1168 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1169 | TREE_CONSTANT_OVERFLOW (arg1)
1170 | TREE_CONSTANT_OVERFLOW (arg2));
1173 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1174 if (TREE_CODE (arg1) == REAL_CST)
1179 REAL_VALUE_TYPE value;
1182 d1 = TREE_REAL_CST (arg1);
1183 d2 = TREE_REAL_CST (arg2);
1185 /* If either operand is a NaN, just return it. Otherwise, set up
1186 for floating-point trap; we return an overflow. */
1187 if (REAL_VALUE_ISNAN (d1))
1189 else if (REAL_VALUE_ISNAN (d2))
1191 else if (setjmp (float_error))
1193 t = copy_node (arg1);
1198 set_float_handler (float_error);
1200 #ifdef REAL_ARITHMETIC
1201 REAL_ARITHMETIC (value, code, d1, d2);
1218 #ifndef REAL_INFINITY
1227 value = MIN (d1, d2);
1231 value = MAX (d1, d2);
1237 #endif /* no REAL_ARITHMETIC */
1238 t = build_real (TREE_TYPE (arg1),
1239 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1241 set_float_handler (NULL_PTR);
1244 = (force_fit_type (t, overflow)
1245 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1246 TREE_CONSTANT_OVERFLOW (t)
1248 | TREE_CONSTANT_OVERFLOW (arg1)
1249 | TREE_CONSTANT_OVERFLOW (arg2);
1252 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1253 if (TREE_CODE (arg1) == COMPLEX_CST)
1255 register tree r1 = TREE_REALPART (arg1);
1256 register tree i1 = TREE_IMAGPART (arg1);
1257 register tree r2 = TREE_REALPART (arg2);
1258 register tree i2 = TREE_IMAGPART (arg2);
1264 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1265 const_binop (PLUS_EXPR, i1, i2, notrunc));
1269 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1270 const_binop (MINUS_EXPR, i1, i2, notrunc));
1274 t = build_complex (const_binop (MINUS_EXPR,
1275 const_binop (MULT_EXPR,
1277 const_binop (MULT_EXPR,
1280 const_binop (PLUS_EXPR,
1281 const_binop (MULT_EXPR,
1283 const_binop (MULT_EXPR,
1290 register tree magsquared
1291 = const_binop (PLUS_EXPR,
1292 const_binop (MULT_EXPR, r2, r2, notrunc),
1293 const_binop (MULT_EXPR, i2, i2, notrunc),
1297 (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1298 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1299 const_binop (PLUS_EXPR,
1300 const_binop (MULT_EXPR, r1, r2,
1302 const_binop (MULT_EXPR, i1, i2,
1305 magsquared, notrunc),
1306 const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1307 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1308 const_binop (MINUS_EXPR,
1309 const_binop (MULT_EXPR, i1, r2,
1311 const_binop (MULT_EXPR, r1, i2,
1314 magsquared, notrunc));
1321 TREE_TYPE (t) = TREE_TYPE (arg1);
1327 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1331 unsigned int number;
1334 /* Type-size nodes already made for small sizes. */
1335 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1337 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1338 && size_table[number] != 0)
1339 return size_table[number];
1340 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1342 push_obstacks_nochange ();
1343 /* Make this a permanent node. */
1344 end_temporary_allocation ();
1345 t = build_int_2 (number, 0);
1346 TREE_TYPE (t) = sizetype;
1347 size_table[number] = t;
1352 t = build_int_2 (number, 0);
1353 TREE_TYPE (t) = sizetype;
1358 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1359 CODE is a tree code. Data type is taken from `sizetype',
1360 If the operands are constant, so is the result. */
1363 size_binop (code, arg0, arg1)
1364 enum tree_code code;
1367 /* Handle the special case of two integer constants faster. */
1368 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1370 /* And some specific cases even faster than that. */
1371 if (code == PLUS_EXPR
1372 && TREE_INT_CST_LOW (arg0) == 0
1373 && TREE_INT_CST_HIGH (arg0) == 0)
1375 if (code == MINUS_EXPR
1376 && TREE_INT_CST_LOW (arg1) == 0
1377 && TREE_INT_CST_HIGH (arg1) == 0)
1379 if (code == MULT_EXPR
1380 && TREE_INT_CST_LOW (arg0) == 1
1381 && TREE_INT_CST_HIGH (arg0) == 0)
1383 /* Handle general case of two integer constants. */
1384 return const_binop (code, arg0, arg1, 1);
1387 if (arg0 == error_mark_node || arg1 == error_mark_node)
1388 return error_mark_node;
1390 return fold (build (code, sizetype, arg0, arg1));
1393 /* Given T, a tree representing type conversion of ARG1, a constant,
1394 return a constant tree representing the result of conversion. */
1397 fold_convert (t, arg1)
1401 register tree type = TREE_TYPE (t);
1404 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1406 if (TREE_CODE (arg1) == INTEGER_CST)
1408 /* If we would build a constant wider than GCC supports,
1409 leave the conversion unfolded. */
1410 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1413 /* Given an integer constant, make new constant with new type,
1414 appropriately sign-extended or truncated. */
1415 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1416 TREE_INT_CST_HIGH (arg1));
1417 TREE_TYPE (t) = type;
1418 /* Indicate an overflow if (1) ARG1 already overflowed,
1419 or (2) force_fit_type indicates an overflow.
1420 Tell force_fit_type that an overflow has already occurred
1421 if ARG1 is a too-large unsigned value and T is signed. */
1423 = (TREE_OVERFLOW (arg1)
1424 | force_fit_type (t,
1425 (TREE_INT_CST_HIGH (arg1) < 0
1426 & (TREE_UNSIGNED (type)
1427 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1428 TREE_CONSTANT_OVERFLOW (t)
1429 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1431 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1432 else if (TREE_CODE (arg1) == REAL_CST)
1434 /* Don't initialize these, use assignments.
1435 Initialized local aggregates don't work on old compilers. */
1440 x = TREE_REAL_CST (arg1);
1441 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1442 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1443 /* See if X will be in range after truncation towards 0.
1444 To compensate for truncation, move the bounds away from 0,
1445 but reject if X exactly equals the adjusted bounds. */
1446 #ifdef REAL_ARITHMETIC
1447 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1448 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1453 /* If X is a NaN, use zero instead and show we have an overflow.
1454 Otherwise, range check. */
1455 if (REAL_VALUE_ISNAN (x))
1456 overflow = 1, x = dconst0;
1457 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1460 #ifndef REAL_ARITHMETIC
1462 HOST_WIDE_INT low, high;
1463 HOST_WIDE_INT half_word
1464 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1469 high = (HOST_WIDE_INT) (x / half_word / half_word);
1470 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1471 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1473 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1474 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1477 low = (HOST_WIDE_INT) x;
1478 if (TREE_REAL_CST (arg1) < 0)
1479 neg_double (low, high, &low, &high);
1480 t = build_int_2 (low, high);
1484 HOST_WIDE_INT low, high;
1485 REAL_VALUE_TO_INT (&low, &high, x);
1486 t = build_int_2 (low, high);
1489 TREE_TYPE (t) = type;
1491 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1492 TREE_CONSTANT_OVERFLOW (t)
1493 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1495 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1496 TREE_TYPE (t) = type;
1498 else if (TREE_CODE (type) == REAL_TYPE)
1500 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1501 if (TREE_CODE (arg1) == INTEGER_CST)
1502 return build_real_from_int_cst (type, arg1);
1503 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1504 if (TREE_CODE (arg1) == REAL_CST)
1506 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1508 else if (setjmp (float_error))
1511 t = copy_node (arg1);
1514 set_float_handler (float_error);
1516 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1517 TREE_REAL_CST (arg1)));
1518 set_float_handler (NULL_PTR);
1522 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1523 TREE_CONSTANT_OVERFLOW (t)
1524 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1528 TREE_CONSTANT (t) = 1;
1532 /* Return an expr equal to X but certainly not valid as an lvalue.
1533 Also make sure it is not valid as an null pointer constant. */
1541 /* These things are certainly not lvalues. */
1542 if (TREE_CODE (x) == NON_LVALUE_EXPR
1543 || TREE_CODE (x) == INTEGER_CST
1544 || TREE_CODE (x) == REAL_CST
1545 || TREE_CODE (x) == STRING_CST
1546 || TREE_CODE (x) == ADDR_EXPR)
1548 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1550 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1551 so convert_for_assignment won't strip it.
1552 This is so this 0 won't be treated as a null pointer constant. */
1553 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1554 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1560 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1561 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1565 /* When pedantic, return an expr equal to X but certainly not valid as a
1566 pedantic lvalue. Otherwise, return X. */
1569 pedantic_non_lvalue (x)
1573 return non_lvalue (x);
1578 /* Given a tree comparison code, return the code that is the logical inverse
1579 of the given code. It is not safe to do this for floating-point
1580 comparisons, except for NE_EXPR and EQ_EXPR. */
1582 static enum tree_code
1583 invert_tree_comparison (code)
1584 enum tree_code code;
1605 /* Similar, but return the comparison that results if the operands are
1606 swapped. This is safe for floating-point. */
1608 static enum tree_code
1609 swap_tree_comparison (code)
1610 enum tree_code code;
1630 /* Return nonzero if CODE is a tree code that represents a truth value. */
1633 truth_value_p (code)
1634 enum tree_code code;
1636 return (TREE_CODE_CLASS (code) == '<'
1637 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1638 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1639 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1642 /* Return nonzero if two operands are necessarily equal.
1643 If ONLY_CONST is non-zero, only return non-zero for constants.
1644 This function tests whether the operands are indistinguishable;
1645 it does not test whether they are equal using C's == operation.
1646 The distinction is important for IEEE floating point, because
1647 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1648 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1651 operand_equal_p (arg0, arg1, only_const)
1655 /* If both types don't have the same signedness, then we can't consider
1656 them equal. We must check this before the STRIP_NOPS calls
1657 because they may change the signedness of the arguments. */
1658 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1664 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1665 We don't care about side effects in that case because the SAVE_EXPR
1666 takes care of that for us. */
1667 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1668 return ! only_const;
1670 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1673 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1674 && TREE_CODE (arg0) == ADDR_EXPR
1675 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1678 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1679 && TREE_CODE (arg0) == INTEGER_CST
1680 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1681 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1684 /* Detect when real constants are equal. */
1685 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1686 && TREE_CODE (arg0) == REAL_CST)
1687 return !bcmp ((char *) &TREE_REAL_CST (arg0),
1688 (char *) &TREE_REAL_CST (arg1),
1689 sizeof (REAL_VALUE_TYPE));
1697 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1699 /* This is needed for conversions and for COMPONENT_REF.
1700 Might as well play it safe and always test this. */
1701 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1704 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1707 /* Two conversions are equal only if signedness and modes match. */
1708 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1709 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1710 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1713 return operand_equal_p (TREE_OPERAND (arg0, 0),
1714 TREE_OPERAND (arg1, 0), 0);
1718 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1719 TREE_OPERAND (arg1, 0), 0)
1720 && operand_equal_p (TREE_OPERAND (arg0, 1),
1721 TREE_OPERAND (arg1, 1), 0));
1724 switch (TREE_CODE (arg0))
1727 return operand_equal_p (TREE_OPERAND (arg0, 0),
1728 TREE_OPERAND (arg1, 0), 0);
1732 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1733 TREE_OPERAND (arg1, 0), 0)
1734 && operand_equal_p (TREE_OPERAND (arg0, 1),
1735 TREE_OPERAND (arg1, 1), 0));
1738 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1739 TREE_OPERAND (arg1, 0), 0)
1740 && operand_equal_p (TREE_OPERAND (arg0, 1),
1741 TREE_OPERAND (arg1, 1), 0)
1742 && operand_equal_p (TREE_OPERAND (arg0, 2),
1743 TREE_OPERAND (arg1, 2), 0));
1751 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1752 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1754 When in doubt, return 0. */
1757 operand_equal_for_comparison_p (arg0, arg1, other)
1761 int unsignedp1, unsignedpo;
1762 tree primarg1, primother;
1763 unsigned correct_width;
1765 if (operand_equal_p (arg0, arg1, 0))
1768 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1771 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1772 actual comparison operand, ARG0.
1774 First throw away any conversions to wider types
1775 already present in the operands. */
1777 primarg1 = get_narrower (arg1, &unsignedp1);
1778 primother = get_narrower (other, &unsignedpo);
1780 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1781 if (unsignedp1 == unsignedpo
1782 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1783 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1785 tree type = TREE_TYPE (arg0);
1787 /* Make sure shorter operand is extended the right way
1788 to match the longer operand. */
1789 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1790 TREE_TYPE (primarg1)),
1793 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1800 /* See if ARG is an expression that is either a comparison or is performing
1801 arithmetic on comparisons. The comparisons must only be comparing
1802 two different values, which will be stored in *CVAL1 and *CVAL2; if
1803 they are non-zero it means that some operands have already been found.
1804 No variables may be used anywhere else in the expression except in the
1805 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1806 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1808 If this is true, return 1. Otherwise, return zero. */
1811 twoval_comparison_p (arg, cval1, cval2, save_p)
1813 tree *cval1, *cval2;
1816 enum tree_code code = TREE_CODE (arg);
1817 char class = TREE_CODE_CLASS (code);
1819 /* We can handle some of the 'e' cases here. */
1820 if (class == 'e' && code == TRUTH_NOT_EXPR)
1822 else if (class == 'e'
1823 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1824 || code == COMPOUND_EXPR))
1827 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1828 the expression. There may be no way to make this work, but it needs
1829 to be looked at again for 2.6. */
1831 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1833 /* If we've already found a CVAL1 or CVAL2, this expression is
1834 two complex to handle. */
1835 if (*cval1 || *cval2)
1846 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1849 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1850 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1851 cval1, cval2, save_p));
1857 if (code == COND_EXPR)
1858 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1859 cval1, cval2, save_p)
1860 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1861 cval1, cval2, save_p)
1862 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1863 cval1, cval2, save_p));
1867 /* First see if we can handle the first operand, then the second. For
1868 the second operand, we know *CVAL1 can't be zero. It must be that
1869 one side of the comparison is each of the values; test for the
1870 case where this isn't true by failing if the two operands
1873 if (operand_equal_p (TREE_OPERAND (arg, 0),
1874 TREE_OPERAND (arg, 1), 0))
1878 *cval1 = TREE_OPERAND (arg, 0);
1879 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1881 else if (*cval2 == 0)
1882 *cval2 = TREE_OPERAND (arg, 0);
1883 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1888 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1890 else if (*cval2 == 0)
1891 *cval2 = TREE_OPERAND (arg, 1);
1892 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1903 /* ARG is a tree that is known to contain just arithmetic operations and
1904 comparisons. Evaluate the operations in the tree substituting NEW0 for
1905 any occurrence of OLD0 as an operand of a comparison and likewise for
1909 eval_subst (arg, old0, new0, old1, new1)
1911 tree old0, new0, old1, new1;
1913 tree type = TREE_TYPE (arg);
1914 enum tree_code code = TREE_CODE (arg);
1915 char class = TREE_CODE_CLASS (code);
1917 /* We can handle some of the 'e' cases here. */
1918 if (class == 'e' && code == TRUTH_NOT_EXPR)
1920 else if (class == 'e'
1921 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1927 return fold (build1 (code, type,
1928 eval_subst (TREE_OPERAND (arg, 0),
1929 old0, new0, old1, new1)));
1932 return fold (build (code, type,
1933 eval_subst (TREE_OPERAND (arg, 0),
1934 old0, new0, old1, new1),
1935 eval_subst (TREE_OPERAND (arg, 1),
1936 old0, new0, old1, new1)));
1942 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1945 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1948 return fold (build (code, type,
1949 eval_subst (TREE_OPERAND (arg, 0),
1950 old0, new0, old1, new1),
1951 eval_subst (TREE_OPERAND (arg, 1),
1952 old0, new0, old1, new1),
1953 eval_subst (TREE_OPERAND (arg, 2),
1954 old0, new0, old1, new1)));
1959 tree arg0 = TREE_OPERAND (arg, 0);
1960 tree arg1 = TREE_OPERAND (arg, 1);
1962 /* We need to check both for exact equality and tree equality. The
1963 former will be true if the operand has a side-effect. In that
1964 case, we know the operand occurred exactly once. */
1966 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1968 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1971 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1973 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1976 return fold (build (code, type, arg0, arg1));
1983 /* Return a tree for the case when the result of an expression is RESULT
1984 converted to TYPE and OMITTED was previously an operand of the expression
1985 but is now not needed (e.g., we folded OMITTED * 0).
1987 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1988 the conversion of RESULT to TYPE. */
1991 omit_one_operand (type, result, omitted)
1992 tree type, result, omitted;
1994 tree t = convert (type, result);
1996 if (TREE_SIDE_EFFECTS (omitted))
1997 return build (COMPOUND_EXPR, type, omitted, t);
1999 return non_lvalue (t);
2002 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
2005 pedantic_omit_one_operand (type, result, omitted)
2006 tree type, result, omitted;
2008 tree t = convert (type, result);
2010 if (TREE_SIDE_EFFECTS (omitted))
2011 return build (COMPOUND_EXPR, type, omitted, t);
2013 return pedantic_non_lvalue (t);
2018 /* Return a simplified tree node for the truth-negation of ARG. This
2019 never alters ARG itself. We assume that ARG is an operation that
2020 returns a truth value (0 or 1). */
2023 invert_truthvalue (arg)
2026 tree type = TREE_TYPE (arg);
2027 enum tree_code code = TREE_CODE (arg);
2029 if (code == ERROR_MARK)
2032 /* If this is a comparison, we can simply invert it, except for
2033 floating-point non-equality comparisons, in which case we just
2034 enclose a TRUTH_NOT_EXPR around what we have. */
2036 if (TREE_CODE_CLASS (code) == '<')
2038 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2039 && code != NE_EXPR && code != EQ_EXPR)
2040 return build1 (TRUTH_NOT_EXPR, type, arg);
2042 return build (invert_tree_comparison (code), type,
2043 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2049 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2050 && TREE_INT_CST_HIGH (arg) == 0, 0));
2052 case TRUTH_AND_EXPR:
2053 return build (TRUTH_OR_EXPR, type,
2054 invert_truthvalue (TREE_OPERAND (arg, 0)),
2055 invert_truthvalue (TREE_OPERAND (arg, 1)));
2058 return build (TRUTH_AND_EXPR, type,
2059 invert_truthvalue (TREE_OPERAND (arg, 0)),
2060 invert_truthvalue (TREE_OPERAND (arg, 1)));
2062 case TRUTH_XOR_EXPR:
2063 /* Here we can invert either operand. We invert the first operand
2064 unless the second operand is a TRUTH_NOT_EXPR in which case our
2065 result is the XOR of the first operand with the inside of the
2066 negation of the second operand. */
2068 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2069 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2070 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2072 return build (TRUTH_XOR_EXPR, type,
2073 invert_truthvalue (TREE_OPERAND (arg, 0)),
2074 TREE_OPERAND (arg, 1));
2076 case TRUTH_ANDIF_EXPR:
2077 return build (TRUTH_ORIF_EXPR, type,
2078 invert_truthvalue (TREE_OPERAND (arg, 0)),
2079 invert_truthvalue (TREE_OPERAND (arg, 1)));
2081 case TRUTH_ORIF_EXPR:
2082 return build (TRUTH_ANDIF_EXPR, type,
2083 invert_truthvalue (TREE_OPERAND (arg, 0)),
2084 invert_truthvalue (TREE_OPERAND (arg, 1)));
2086 case TRUTH_NOT_EXPR:
2087 return TREE_OPERAND (arg, 0);
2090 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2091 invert_truthvalue (TREE_OPERAND (arg, 1)),
2092 invert_truthvalue (TREE_OPERAND (arg, 2)));
2095 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2096 invert_truthvalue (TREE_OPERAND (arg, 1)));
2098 case NON_LVALUE_EXPR:
2099 return invert_truthvalue (TREE_OPERAND (arg, 0));
2104 return build1 (TREE_CODE (arg), type,
2105 invert_truthvalue (TREE_OPERAND (arg, 0)));
2108 if (!integer_onep (TREE_OPERAND (arg, 1)))
2110 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2113 return build1 (TRUTH_NOT_EXPR, type, arg);
2115 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2117 return build1 (TRUTH_NOT_EXPR, type, arg);
2120 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2121 operands are another bit-wise operation with a common input. If so,
2122 distribute the bit operations to save an operation and possibly two if
2123 constants are involved. For example, convert
2124 (A | B) & (A | C) into A | (B & C)
2125 Further simplification will occur if B and C are constants.
2127 If this optimization cannot be done, 0 will be returned. */
2130 distribute_bit_expr (code, type, arg0, arg1)
2131 enum tree_code code;
2138 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2139 || TREE_CODE (arg0) == code
2140 || (TREE_CODE (arg0) != BIT_AND_EXPR
2141 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2144 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2146 common = TREE_OPERAND (arg0, 0);
2147 left = TREE_OPERAND (arg0, 1);
2148 right = TREE_OPERAND (arg1, 1);
2150 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2152 common = TREE_OPERAND (arg0, 0);
2153 left = TREE_OPERAND (arg0, 1);
2154 right = TREE_OPERAND (arg1, 0);
2156 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2158 common = TREE_OPERAND (arg0, 1);
2159 left = TREE_OPERAND (arg0, 0);
2160 right = TREE_OPERAND (arg1, 1);
2162 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2164 common = TREE_OPERAND (arg0, 1);
2165 left = TREE_OPERAND (arg0, 0);
2166 right = TREE_OPERAND (arg1, 0);
2171 return fold (build (TREE_CODE (arg0), type, common,
2172 fold (build (code, type, left, right))));
2175 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2176 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2179 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2182 int bitsize, bitpos;
2185 tree result = build (BIT_FIELD_REF, type, inner,
2186 size_int (bitsize), size_int (bitpos));
2188 TREE_UNSIGNED (result) = unsignedp;
2193 /* Optimize a bit-field compare.
2195 There are two cases: First is a compare against a constant and the
2196 second is a comparison of two items where the fields are at the same
2197 bit position relative to the start of a chunk (byte, halfword, word)
2198 large enough to contain it. In these cases we can avoid the shift
2199 implicit in bitfield extractions.
2201 For constants, we emit a compare of the shifted constant with the
2202 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2203 compared. For two fields at the same position, we do the ANDs with the
2204 similar mask and compare the result of the ANDs.
2206 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2207 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2208 are the left and right operands of the comparison, respectively.
2210 If the optimization described above can be done, we return the resulting
2211 tree. Otherwise we return zero. */
2214 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2215 enum tree_code code;
2219 int lbitpos, lbitsize, rbitpos, rbitsize;
2220 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2221 tree type = TREE_TYPE (lhs);
2222 tree signed_type, unsigned_type;
2223 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2224 enum machine_mode lmode, rmode, lnmode, rnmode;
2225 int lunsignedp, runsignedp;
2226 int lvolatilep = 0, rvolatilep = 0;
2227 tree linner, rinner;
2231 /* Get all the information about the extractions being done. If the bit size
2232 if the same as the size of the underlying object, we aren't doing an
2233 extraction at all and so can do nothing. */
2234 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2235 &lunsignedp, &lvolatilep);
2236 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2242 /* If this is not a constant, we can only do something if bit positions,
2243 sizes, and signedness are the same. */
2244 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2245 &rmode, &runsignedp, &rvolatilep);
2247 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2248 || lunsignedp != runsignedp || offset != 0)
2252 /* See if we can find a mode to refer to this field. We should be able to,
2253 but fail if we can't. */
2254 lnmode = get_best_mode (lbitsize, lbitpos,
2255 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2257 if (lnmode == VOIDmode)
2260 /* Set signed and unsigned types of the precision of this mode for the
2262 signed_type = type_for_mode (lnmode, 0);
2263 unsigned_type = type_for_mode (lnmode, 1);
2267 rnmode = get_best_mode (rbitsize, rbitpos,
2268 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2270 if (rnmode == VOIDmode)
2274 /* Compute the bit position and size for the new reference and our offset
2275 within it. If the new reference is the same size as the original, we
2276 won't optimize anything, so return zero. */
2277 lnbitsize = GET_MODE_BITSIZE (lnmode);
2278 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2279 lbitpos -= lnbitpos;
2280 if (lnbitsize == lbitsize)
2285 rnbitsize = GET_MODE_BITSIZE (rnmode);
2286 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2287 rbitpos -= rnbitpos;
2288 if (rnbitsize == rbitsize)
2292 if (BYTES_BIG_ENDIAN)
2293 lbitpos = lnbitsize - lbitsize - lbitpos;
2295 /* Make the mask to be used against the extracted field. */
2296 mask = build_int_2 (~0, ~0);
2297 TREE_TYPE (mask) = unsigned_type;
2298 force_fit_type (mask, 0);
2299 mask = convert (unsigned_type, mask);
2300 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2301 mask = const_binop (RSHIFT_EXPR, mask,
2302 size_int (lnbitsize - lbitsize - lbitpos), 0);
2305 /* If not comparing with constant, just rework the comparison
2307 return build (code, compare_type,
2308 build (BIT_AND_EXPR, unsigned_type,
2309 make_bit_field_ref (linner, unsigned_type,
2310 lnbitsize, lnbitpos, 1),
2312 build (BIT_AND_EXPR, unsigned_type,
2313 make_bit_field_ref (rinner, unsigned_type,
2314 rnbitsize, rnbitpos, 1),
2317 /* Otherwise, we are handling the constant case. See if the constant is too
2318 big for the field. Warn and return a tree of for 0 (false) if so. We do
2319 this not only for its own sake, but to avoid having to test for this
2320 error case below. If we didn't, we might generate wrong code.
2322 For unsigned fields, the constant shifted right by the field length should
2323 be all zero. For signed fields, the high-order bits should agree with
2328 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2329 convert (unsigned_type, rhs),
2330 size_int (lbitsize), 0)))
2332 warning ("comparison is always %s due to width of bitfield",
2333 code == NE_EXPR ? "one" : "zero");
2334 return convert (compare_type,
2336 ? integer_one_node : integer_zero_node));
2341 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2342 size_int (lbitsize - 1), 0);
2343 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2345 warning ("comparison is always %s due to width of bitfield",
2346 code == NE_EXPR ? "one" : "zero");
2347 return convert (compare_type,
2349 ? integer_one_node : integer_zero_node));
2353 /* Single-bit compares should always be against zero. */
2354 if (lbitsize == 1 && ! integer_zerop (rhs))
2356 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2357 rhs = convert (type, integer_zero_node);
2360 /* Make a new bitfield reference, shift the constant over the
2361 appropriate number of bits and mask it with the computed mask
2362 (in case this was a signed field). If we changed it, make a new one. */
2363 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2366 TREE_SIDE_EFFECTS (lhs) = 1;
2367 TREE_THIS_VOLATILE (lhs) = 1;
2370 rhs = fold (const_binop (BIT_AND_EXPR,
2371 const_binop (LSHIFT_EXPR,
2372 convert (unsigned_type, rhs),
2373 size_int (lbitpos), 0),
2376 return build (code, compare_type,
2377 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2381 /* Subroutine for fold_truthop: decode a field reference.
2383 If EXP is a comparison reference, we return the innermost reference.
2385 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2386 set to the starting bit number.
2388 If the innermost field can be completely contained in a mode-sized
2389 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2391 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2392 otherwise it is not changed.
2394 *PUNSIGNEDP is set to the signedness of the field.
2396 *PMASK is set to the mask used. This is either contained in a
2397 BIT_AND_EXPR or derived from the width of the field.
2399 Return 0 if this is not a component reference or is one that we can't
2400 do anything with. */
2403 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2406 int *pbitsize, *pbitpos;
2407 enum machine_mode *pmode;
2408 int *punsignedp, *pvolatilep;
2412 tree mask, inner, offset;
2416 /* All the optimizations using this function assume integer fields.
2417 There are problems with FP fields since the type_for_size call
2418 below can fail for, e.g., XFmode. */
2419 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2424 if (TREE_CODE (exp) == BIT_AND_EXPR)
2426 and_mask = TREE_OPERAND (exp, 1);
2427 exp = TREE_OPERAND (exp, 0);
2428 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2429 if (TREE_CODE (and_mask) != INTEGER_CST)
2433 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2434 && TREE_CODE (exp) != BIT_FIELD_REF)
2437 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2438 punsignedp, pvolatilep);
2439 if (inner == exp || *pbitsize < 0 || offset != 0)
2442 /* Compute the mask to access the bitfield. */
2443 unsigned_type = type_for_size (*pbitsize, 1);
2444 precision = TYPE_PRECISION (unsigned_type);
2446 mask = build_int_2 (~0, ~0);
2447 TREE_TYPE (mask) = unsigned_type;
2448 force_fit_type (mask, 0);
2449 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2450 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2452 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2454 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2455 convert (unsigned_type, and_mask), mask));
2461 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2465 all_ones_mask_p (mask, size)
2469 tree type = TREE_TYPE (mask);
2470 int precision = TYPE_PRECISION (type);
2473 tmask = build_int_2 (~0, ~0);
2474 TREE_TYPE (tmask) = signed_type (type);
2475 force_fit_type (tmask, 0);
2477 operand_equal_p (mask,
2478 const_binop (RSHIFT_EXPR,
2479 const_binop (LSHIFT_EXPR, tmask,
2480 size_int (precision - size), 0),
2481 size_int (precision - size), 0),
2485 /* Subroutine for fold_truthop: determine if an operand is simple enough
2486 to be evaluated unconditionally. */
2489 simple_operand_p (exp)
2492 /* Strip any conversions that don't change the machine mode. */
2493 while ((TREE_CODE (exp) == NOP_EXPR
2494 || TREE_CODE (exp) == CONVERT_EXPR)
2495 && (TYPE_MODE (TREE_TYPE (exp))
2496 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2497 exp = TREE_OPERAND (exp, 0);
2499 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2500 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2501 && ! TREE_ADDRESSABLE (exp)
2502 && ! TREE_THIS_VOLATILE (exp)
2503 && ! DECL_NONLOCAL (exp)
2504 /* Don't regard global variables as simple. They may be
2505 allocated in ways unknown to the compiler (shared memory,
2506 #pragma weak, etc). */
2507 && ! TREE_PUBLIC (exp)
2508 && ! DECL_EXTERNAL (exp)
2509 /* Loading a static variable is unduly expensive, but global
2510 registers aren't expensive. */
2511 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2514 /* Subroutine for fold_truthop: try to optimize a range test.
2516 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2518 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2519 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2520 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2523 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2524 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2525 larger than HI_CST (they may be equal).
2527 We return the simplified tree or 0 if no optimization is possible. */
2530 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2531 enum tree_code jcode, lo_code, hi_code;
2532 tree type, var, lo_cst, hi_cst;
2535 enum tree_code rcode;
2537 /* See if this is a range test and normalize the constant terms. */
2539 if (jcode == TRUTH_AND_EXPR)
2544 /* See if we have VAR != CST && VAR != CST+1. */
2545 if (! (hi_code == NE_EXPR
2546 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2547 && tree_int_cst_equal (integer_one_node,
2548 const_binop (MINUS_EXPR,
2549 hi_cst, lo_cst, 0))))
2557 if (hi_code == LT_EXPR)
2558 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2559 else if (hi_code != LE_EXPR)
2562 if (lo_code == GT_EXPR)
2563 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2565 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2578 /* See if we have VAR == CST || VAR == CST+1. */
2579 if (! (hi_code == EQ_EXPR
2580 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2581 && tree_int_cst_equal (integer_one_node,
2582 const_binop (MINUS_EXPR,
2583 hi_cst, lo_cst, 0))))
2591 if (hi_code == GE_EXPR)
2592 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2593 else if (hi_code != GT_EXPR)
2596 if (lo_code == LE_EXPR)
2597 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2599 /* We now have VAR < LO_CST || VAR > HI_CST. */
2608 /* When normalizing, it is possible to both increment the smaller constant
2609 and decrement the larger constant. See if they are still ordered. */
2610 if (tree_int_cst_lt (hi_cst, lo_cst))
2613 /* Fail if VAR isn't an integer. */
2614 utype = TREE_TYPE (var);
2615 if (! INTEGRAL_TYPE_P (utype))
2618 /* The range test is invalid if subtracting the two constants results
2619 in overflow. This can happen in traditional mode. */
2620 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2621 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2624 if (! TREE_UNSIGNED (utype))
2626 utype = unsigned_type (utype);
2627 var = convert (utype, var);
2628 lo_cst = convert (utype, lo_cst);
2629 hi_cst = convert (utype, hi_cst);
2632 return fold (convert (type,
2633 build (rcode, utype,
2634 build (MINUS_EXPR, utype, var, lo_cst),
2635 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2638 /* Find ways of folding logical expressions of LHS and RHS:
2639 Try to merge two comparisons to the same innermost item.
2640 Look for range tests like "ch >= '0' && ch <= '9'".
2641 Look for combinations of simple terms on machines with expensive branches
2642 and evaluate the RHS unconditionally.
2644 For example, if we have p->a == 2 && p->b == 4 and we can make an
2645 object large enough to span both A and B, we can do this with a comparison
2646 against the object ANDed with the a mask.
2648 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2649 operations to do this with one comparison.
2651 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2652 function and the one above.
2654 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2655 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2657 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2660 We return the simplified tree or 0 if no optimization is possible. */
2663 fold_truthop (code, truth_type, lhs, rhs)
2664 enum tree_code code;
2665 tree truth_type, lhs, rhs;
2667 /* If this is the "or" of two comparisons, we can do something if we
2668 the comparisons are NE_EXPR. If this is the "and", we can do something
2669 if the comparisons are EQ_EXPR. I.e.,
2670 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2672 WANTED_CODE is this operation code. For single bit fields, we can
2673 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2674 comparison for one-bit fields. */
2676 enum tree_code wanted_code;
2677 enum tree_code lcode, rcode;
2678 tree ll_arg, lr_arg, rl_arg, rr_arg;
2679 tree ll_inner, lr_inner, rl_inner, rr_inner;
2680 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2681 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2682 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2683 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2684 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2685 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2686 enum machine_mode lnmode, rnmode;
2687 tree ll_mask, lr_mask, rl_mask, rr_mask;
2688 tree l_const, r_const;
2690 int first_bit, end_bit;
2693 /* Start by getting the comparison codes and seeing if this looks like
2694 a range test. Fail if anything is volatile. If one operand is a
2695 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2698 if (TREE_SIDE_EFFECTS (lhs)
2699 || TREE_SIDE_EFFECTS (rhs))
2702 lcode = TREE_CODE (lhs);
2703 rcode = TREE_CODE (rhs);
2705 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2706 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2708 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2709 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2711 if (TREE_CODE_CLASS (lcode) != '<'
2712 || TREE_CODE_CLASS (rcode) != '<')
2715 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2716 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2718 ll_arg = TREE_OPERAND (lhs, 0);
2719 lr_arg = TREE_OPERAND (lhs, 1);
2720 rl_arg = TREE_OPERAND (rhs, 0);
2721 rr_arg = TREE_OPERAND (rhs, 1);
2723 if (TREE_CODE (lr_arg) == INTEGER_CST
2724 && TREE_CODE (rr_arg) == INTEGER_CST
2725 && operand_equal_p (ll_arg, rl_arg, 0))
2727 if (tree_int_cst_lt (lr_arg, rr_arg))
2728 result = range_test (code, truth_type, lcode, rcode,
2729 ll_arg, lr_arg, rr_arg);
2731 result = range_test (code, truth_type, rcode, lcode,
2732 ll_arg, rr_arg, lr_arg);
2734 /* If this isn't a range test, it also isn't a comparison that
2735 can be merged. However, it wins to evaluate the RHS unconditionally
2736 on machines with expensive branches. */
2738 if (result == 0 && BRANCH_COST >= 2)
2740 if (TREE_CODE (ll_arg) != VAR_DECL
2741 && TREE_CODE (ll_arg) != PARM_DECL)
2743 /* Avoid evaluating the variable part twice. */
2744 ll_arg = save_expr (ll_arg);
2745 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2746 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2748 return build (code, truth_type, lhs, rhs);
2753 /* If the RHS can be evaluated unconditionally and its operands are
2754 simple, it wins to evaluate the RHS unconditionally on machines
2755 with expensive branches. In this case, this isn't a comparison
2756 that can be merged. */
2758 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2759 are with zero (tmw). */
2761 if (BRANCH_COST >= 2
2762 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2763 && simple_operand_p (rl_arg)
2764 && simple_operand_p (rr_arg))
2765 return build (code, truth_type, lhs, rhs);
2767 /* See if the comparisons can be merged. Then get all the parameters for
2770 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2771 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2775 ll_inner = decode_field_reference (ll_arg,
2776 &ll_bitsize, &ll_bitpos, &ll_mode,
2777 &ll_unsignedp, &volatilep, &ll_mask);
2778 lr_inner = decode_field_reference (lr_arg,
2779 &lr_bitsize, &lr_bitpos, &lr_mode,
2780 &lr_unsignedp, &volatilep, &lr_mask);
2781 rl_inner = decode_field_reference (rl_arg,
2782 &rl_bitsize, &rl_bitpos, &rl_mode,
2783 &rl_unsignedp, &volatilep, &rl_mask);
2784 rr_inner = decode_field_reference (rr_arg,
2785 &rr_bitsize, &rr_bitpos, &rr_mode,
2786 &rr_unsignedp, &volatilep, &rr_mask);
2788 /* It must be true that the inner operation on the lhs of each
2789 comparison must be the same if we are to be able to do anything.
2790 Then see if we have constants. If not, the same must be true for
2792 if (volatilep || ll_inner == 0 || rl_inner == 0
2793 || ! operand_equal_p (ll_inner, rl_inner, 0))
2796 if (TREE_CODE (lr_arg) == INTEGER_CST
2797 && TREE_CODE (rr_arg) == INTEGER_CST)
2798 l_const = lr_arg, r_const = rr_arg;
2799 else if (lr_inner == 0 || rr_inner == 0
2800 || ! operand_equal_p (lr_inner, rr_inner, 0))
2803 l_const = r_const = 0;
2805 /* If either comparison code is not correct for our logical operation,
2806 fail. However, we can convert a one-bit comparison against zero into
2807 the opposite comparison against that bit being set in the field. */
2809 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2810 if (lcode != wanted_code)
2812 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2818 if (rcode != wanted_code)
2820 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2826 /* See if we can find a mode that contains both fields being compared on
2827 the left. If we can't, fail. Otherwise, update all constants and masks
2828 to be relative to a field of that size. */
2829 first_bit = MIN (ll_bitpos, rl_bitpos);
2830 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2831 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2832 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2834 if (lnmode == VOIDmode)
2837 lnbitsize = GET_MODE_BITSIZE (lnmode);
2838 lnbitpos = first_bit & ~ (lnbitsize - 1);
2839 type = type_for_size (lnbitsize, 1);
2840 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2842 if (BYTES_BIG_ENDIAN)
2844 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2845 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2848 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2849 size_int (xll_bitpos), 0);
2850 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2851 size_int (xrl_bitpos), 0);
2853 /* Make sure the constants are interpreted as unsigned, so we
2854 don't have sign bits outside the range of their type. */
2858 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2859 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2860 size_int (xll_bitpos), 0);
2864 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2865 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2866 size_int (xrl_bitpos), 0);
2869 /* If the right sides are not constant, do the same for it. Also,
2870 disallow this optimization if a size or signedness mismatch occurs
2871 between the left and right sides. */
2874 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2875 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2876 /* Make sure the two fields on the right
2877 correspond to the left without being swapped. */
2878 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2881 first_bit = MIN (lr_bitpos, rr_bitpos);
2882 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2883 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2884 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2886 if (rnmode == VOIDmode)
2889 rnbitsize = GET_MODE_BITSIZE (rnmode);
2890 rnbitpos = first_bit & ~ (rnbitsize - 1);
2891 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2893 if (BYTES_BIG_ENDIAN)
2895 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2896 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2899 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2900 size_int (xlr_bitpos), 0);
2901 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2902 size_int (xrr_bitpos), 0);
2904 /* Make a mask that corresponds to both fields being compared.
2905 Do this for both items being compared. If the masks agree,
2906 we can do this by masking both and comparing the masked
2908 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2909 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2910 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2912 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2913 ll_unsignedp || rl_unsignedp);
2914 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2915 lr_unsignedp || rr_unsignedp);
2916 if (! all_ones_mask_p (ll_mask, lnbitsize))
2918 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2919 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2921 return build (wanted_code, truth_type, lhs, rhs);
2924 /* There is still another way we can do something: If both pairs of
2925 fields being compared are adjacent, we may be able to make a wider
2926 field containing them both. */
2927 if ((ll_bitsize + ll_bitpos == rl_bitpos
2928 && lr_bitsize + lr_bitpos == rr_bitpos)
2929 || (ll_bitpos == rl_bitpos + rl_bitsize
2930 && lr_bitpos == rr_bitpos + rr_bitsize))
2931 return build (wanted_code, truth_type,
2932 make_bit_field_ref (ll_inner, type,
2933 ll_bitsize + rl_bitsize,
2934 MIN (ll_bitpos, rl_bitpos),
2936 make_bit_field_ref (lr_inner, type,
2937 lr_bitsize + rr_bitsize,
2938 MIN (lr_bitpos, rr_bitpos),
2944 /* Handle the case of comparisons with constants. If there is something in
2945 common between the masks, those bits of the constants must be the same.
2946 If not, the condition is always false. Test for this to avoid generating
2947 incorrect code below. */
2948 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2949 if (! integer_zerop (result)
2950 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2951 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2953 if (wanted_code == NE_EXPR)
2955 warning ("`or' of unmatched not-equal tests is always 1");
2956 return convert (truth_type, integer_one_node);
2960 warning ("`and' of mutually exclusive equal-tests is always zero");
2961 return convert (truth_type, integer_zero_node);
2965 /* Construct the expression we will return. First get the component
2966 reference we will make. Unless the mask is all ones the width of
2967 that field, perform the mask operation. Then compare with the
2969 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2970 ll_unsignedp || rl_unsignedp);
2972 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2973 if (! all_ones_mask_p (ll_mask, lnbitsize))
2974 result = build (BIT_AND_EXPR, type, result, ll_mask);
2976 return build (wanted_code, truth_type, result,
2977 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
2980 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
2981 S, a SAVE_EXPR, return the expression actually being evaluated. Note
2982 that we may sometimes modify the tree. */
2985 strip_compound_expr (t, s)
2989 tree type = TREE_TYPE (t);
2990 enum tree_code code = TREE_CODE (t);
2992 /* See if this is the COMPOUND_EXPR we want to eliminate. */
2993 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
2994 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
2995 return TREE_OPERAND (t, 1);
2997 /* See if this is a COND_EXPR or a simple arithmetic operator. We
2998 don't bother handling any other types. */
2999 else if (code == COND_EXPR)
3001 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3002 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3003 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
3005 else if (TREE_CODE_CLASS (code) == '1')
3006 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3007 else if (TREE_CODE_CLASS (code) == '<'
3008 || TREE_CODE_CLASS (code) == '2')
3010 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3011 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3017 /* Perform constant folding and related simplification of EXPR.
3018 The related simplifications include x*1 => x, x*0 => 0, etc.,
3019 and application of the associative law.
3020 NOP_EXPR conversions may be removed freely (as long as we
3021 are careful not to change the C type of the overall expression)
3022 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3023 but we can constant-fold them if they have constant operands. */
3029 register tree t = expr;
3030 tree t1 = NULL_TREE;
3032 tree type = TREE_TYPE (expr);
3033 register tree arg0, arg1;
3034 register enum tree_code code = TREE_CODE (t);
3038 /* WINS will be nonzero when the switch is done
3039 if all operands are constant. */
3043 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3044 if (code == RTL_EXPR)
3047 /* Return right away if already constant. */
3048 if (TREE_CONSTANT (t))
3050 if (code == CONST_DECL)
3051 return DECL_INITIAL (t);
3055 kind = TREE_CODE_CLASS (code);
3056 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3060 /* Special case for conversion ops that can have fixed point args. */
3061 arg0 = TREE_OPERAND (t, 0);
3063 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3065 STRIP_TYPE_NOPS (arg0);
3067 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3068 subop = TREE_REALPART (arg0);
3072 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3073 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3074 && TREE_CODE (subop) != REAL_CST
3075 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3077 /* Note that TREE_CONSTANT isn't enough:
3078 static var addresses are constant but we can't
3079 do arithmetic on them. */
3082 else if (kind == 'e' || kind == '<'
3083 || kind == '1' || kind == '2' || kind == 'r')
3085 register int len = tree_code_length[(int) code];
3087 for (i = 0; i < len; i++)
3089 tree op = TREE_OPERAND (t, i);
3093 continue; /* Valid for CALL_EXPR, at least. */
3095 if (kind == '<' || code == RSHIFT_EXPR)
3097 /* Signedness matters here. Perhaps we can refine this
3099 STRIP_TYPE_NOPS (op);
3103 /* Strip any conversions that don't change the mode. */
3107 if (TREE_CODE (op) == COMPLEX_CST)
3108 subop = TREE_REALPART (op);
3112 if (TREE_CODE (subop) != INTEGER_CST
3113 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3114 && TREE_CODE (subop) != REAL_CST
3115 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3117 /* Note that TREE_CONSTANT isn't enough:
3118 static var addresses are constant but we can't
3119 do arithmetic on them. */
3129 /* If this is a commutative operation, and ARG0 is a constant, move it
3130 to ARG1 to reduce the number of tests below. */
3131 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3132 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3133 || code == BIT_AND_EXPR)
3134 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3136 tem = arg0; arg0 = arg1; arg1 = tem;
3138 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3139 TREE_OPERAND (t, 1) = tem;
3142 /* Now WINS is set as described above,
3143 ARG0 is the first operand of EXPR,
3144 and ARG1 is the second operand (if it has more than one operand).
3146 First check for cases where an arithmetic operation is applied to a
3147 compound, conditional, or comparison operation. Push the arithmetic
3148 operation inside the compound or conditional to see if any folding
3149 can then be done. Convert comparison to conditional for this purpose.
3150 The also optimizes non-constant cases that used to be done in
3153 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3154 one of the operands is a comparison and the other is a comparison, a
3155 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3156 code below would make the expression more complex. Change it to a
3157 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3158 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3160 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3161 || code == EQ_EXPR || code == NE_EXPR)
3162 && ((truth_value_p (TREE_CODE (arg0))
3163 && (truth_value_p (TREE_CODE (arg1))
3164 || (TREE_CODE (arg1) == BIT_AND_EXPR
3165 && integer_onep (TREE_OPERAND (arg1, 1)))))
3166 || (truth_value_p (TREE_CODE (arg1))
3167 && (truth_value_p (TREE_CODE (arg0))
3168 || (TREE_CODE (arg0) == BIT_AND_EXPR
3169 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3171 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3172 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3176 if (code == EQ_EXPR)
3177 t = invert_truthvalue (t);
3182 if (TREE_CODE_CLASS (code) == '1')
3184 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3185 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3186 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3187 else if (TREE_CODE (arg0) == COND_EXPR)
3189 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3190 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3191 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3193 /* If this was a conversion, and all we did was to move into
3194 inside the COND_EXPR, bring it back out. But leave it if
3195 it is a conversion from integer to integer and the
3196 result precision is no wider than a word since such a
3197 conversion is cheap and may be optimized away by combine,
3198 while it couldn't if it were outside the COND_EXPR. Then return
3199 so we don't get into an infinite recursion loop taking the
3200 conversion out and then back in. */
3202 if ((code == NOP_EXPR || code == CONVERT_EXPR
3203 || code == NON_LVALUE_EXPR)
3204 && TREE_CODE (t) == COND_EXPR
3205 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3206 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3207 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3208 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
3209 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
3210 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
3211 && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
3212 t = build1 (code, type,
3214 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3215 TREE_OPERAND (t, 0),
3216 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3217 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3220 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3221 return fold (build (COND_EXPR, type, arg0,
3222 fold (build1 (code, type, integer_one_node)),
3223 fold (build1 (code, type, integer_zero_node))));
3225 else if (TREE_CODE_CLASS (code) == '2'
3226 || TREE_CODE_CLASS (code) == '<')
3228 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3229 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3230 fold (build (code, type,
3231 arg0, TREE_OPERAND (arg1, 1))));
3232 else if (TREE_CODE (arg1) == COND_EXPR
3233 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3235 tree test, true_value, false_value;
3237 if (TREE_CODE (arg1) == COND_EXPR)
3239 test = TREE_OPERAND (arg1, 0);
3240 true_value = TREE_OPERAND (arg1, 1);
3241 false_value = TREE_OPERAND (arg1, 2);
3246 true_value = integer_one_node;
3247 false_value = integer_zero_node;
3250 /* If ARG0 is complex we want to make sure we only evaluate
3251 it once. Though this is only required if it is volatile, it
3252 might be more efficient even if it is not. However, if we
3253 succeed in folding one part to a constant, we do not need
3254 to make this SAVE_EXPR. Since we do this optimization
3255 primarily to see if we do end up with constant and this
3256 SAVE_EXPR interfers with later optimizations, suppressing
3257 it when we can is important. */
3259 if (TREE_CODE (arg0) != SAVE_EXPR
3260 && ((TREE_CODE (arg0) != VAR_DECL
3261 && TREE_CODE (arg0) != PARM_DECL)
3262 || TREE_SIDE_EFFECTS (arg0)))
3264 tree lhs = fold (build (code, type, arg0, true_value));
3265 tree rhs = fold (build (code, type, arg0, false_value));
3267 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3268 return fold (build (COND_EXPR, type, test, lhs, rhs));
3270 arg0 = save_expr (arg0);
3273 test = fold (build (COND_EXPR, type, test,
3274 fold (build (code, type, arg0, true_value)),
3275 fold (build (code, type, arg0, false_value))));
3276 if (TREE_CODE (arg0) == SAVE_EXPR)
3277 return build (COMPOUND_EXPR, type,
3278 convert (void_type_node, arg0),
3279 strip_compound_expr (test, arg0));
3281 return convert (type, test);
3284 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3285 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3286 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3287 else if (TREE_CODE (arg0) == COND_EXPR
3288 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3290 tree test, true_value, false_value;
3292 if (TREE_CODE (arg0) == COND_EXPR)
3294 test = TREE_OPERAND (arg0, 0);
3295 true_value = TREE_OPERAND (arg0, 1);
3296 false_value = TREE_OPERAND (arg0, 2);
3301 true_value = integer_one_node;
3302 false_value = integer_zero_node;
3305 if (TREE_CODE (arg1) != SAVE_EXPR
3306 && ((TREE_CODE (arg1) != VAR_DECL
3307 && TREE_CODE (arg1) != PARM_DECL)
3308 || TREE_SIDE_EFFECTS (arg1)))
3310 tree lhs = fold (build (code, type, true_value, arg1));
3311 tree rhs = fold (build (code, type, false_value, arg1));
3313 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
3314 || TREE_CONSTANT (arg1))
3315 return fold (build (COND_EXPR, type, test, lhs, rhs));
3317 arg1 = save_expr (arg1);
3320 test = fold (build (COND_EXPR, type, test,
3321 fold (build (code, type, true_value, arg1)),
3322 fold (build (code, type, false_value, arg1))));
3323 if (TREE_CODE (arg1) == SAVE_EXPR)
3324 return build (COMPOUND_EXPR, type,
3325 convert (void_type_node, arg1),
3326 strip_compound_expr (test, arg1));
3328 return convert (type, test);
3331 else if (TREE_CODE_CLASS (code) == '<'
3332 && TREE_CODE (arg0) == COMPOUND_EXPR)
3333 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3334 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3335 else if (TREE_CODE_CLASS (code) == '<'
3336 && TREE_CODE (arg1) == COMPOUND_EXPR)
3337 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3338 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3350 return fold (DECL_INITIAL (t));
3355 case FIX_TRUNC_EXPR:
3356 /* Other kinds of FIX are not handled properly by fold_convert. */
3358 if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
3359 return TREE_OPERAND (t, 0);
3361 /* Handle cases of two conversions in a row. */
3362 if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3363 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3365 tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3366 tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0));
3367 tree final_type = TREE_TYPE (t);
3368 int inside_int = INTEGRAL_TYPE_P (inside_type);
3369 int inside_ptr = POINTER_TYPE_P (inside_type);
3370 int inside_float = FLOAT_TYPE_P (inside_type);
3371 int inside_prec = TYPE_PRECISION (inside_type);
3372 int inside_unsignedp = TREE_UNSIGNED (inside_type);
3373 int inter_int = INTEGRAL_TYPE_P (inter_type);
3374 int inter_ptr = POINTER_TYPE_P (inter_type);
3375 int inter_float = FLOAT_TYPE_P (inter_type);
3376 int inter_prec = TYPE_PRECISION (inter_type);
3377 int inter_unsignedp = TREE_UNSIGNED (inter_type);
3378 int final_int = INTEGRAL_TYPE_P (final_type);
3379 int final_ptr = POINTER_TYPE_P (final_type);
3380 int final_float = FLOAT_TYPE_P (final_type);
3381 int final_prec = TYPE_PRECISION (final_type);
3382 int final_unsignedp = TREE_UNSIGNED (final_type);
3384 /* In addition to the cases of two conversions in a row
3385 handled below, if we are converting something to its own
3386 type via an object of identical or wider precision, neither
3387 conversion is needed. */
3388 if (inside_type == final_type
3389 && ((inter_int && final_int) || (inter_float && final_float))
3390 && inter_prec >= final_prec)
3391 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3393 /* Likewise, if the intermediate and final types are either both
3394 float or both integer, we don't need the middle conversion if
3395 it is wider than the final type and doesn't change the signedness
3396 (for integers). Avoid this if the final type is a pointer
3397 since then we sometimes need the inner conversion. */
3398 if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
3399 || (inter_float && inside_float))
3400 && inter_prec >= inside_prec
3401 && (inter_float || inter_unsignedp == inside_unsignedp)
3403 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3405 /* Two conversions in a row are not needed unless:
3406 - some conversion is floating-point (overstrict for now), or
3407 - the intermediate type is narrower than both initial and
3409 - the intermediate type and innermost type differ in signedness,
3410 and the outermost type is wider than the intermediate, or
3411 - the initial type is a pointer type and the precisions of the
3412 intermediate and final types differ, or
3413 - the final type is a pointer type and the precisions of the
3414 initial and intermediate types differ. */
3415 if (! inside_float && ! inter_float && ! final_float
3416 && (inter_prec > inside_prec || inter_prec > final_prec)
3417 && ! (inside_int && inter_int
3418 && inter_unsignedp != inside_unsignedp
3419 && inter_prec < final_prec)
3420 && ((inter_unsignedp && inter_prec > inside_prec)
3421 == (final_unsignedp && final_prec > inter_prec))
3422 && ! (inside_ptr && inter_prec != final_prec)
3423 && ! (final_ptr && inside_prec != inter_prec))
3424 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3427 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3428 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3429 /* Detect assigning a bitfield. */
3430 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3431 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3433 /* Don't leave an assignment inside a conversion
3434 unless assigning a bitfield. */
3435 tree prev = TREE_OPERAND (t, 0);
3436 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3437 /* First do the assignment, then return converted constant. */
3438 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3444 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3447 return fold_convert (t, arg0);
3449 #if 0 /* This loses on &"foo"[0]. */
3454 /* Fold an expression like: "foo"[2] */
3455 if (TREE_CODE (arg0) == STRING_CST
3456 && TREE_CODE (arg1) == INTEGER_CST
3457 && !TREE_INT_CST_HIGH (arg1)
3458 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3460 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3461 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3462 force_fit_type (t, 0);
3469 if (TREE_CODE (arg0) == CONSTRUCTOR)
3471 tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
3478 TREE_CONSTANT (t) = wins;
3484 if (TREE_CODE (arg0) == INTEGER_CST)
3486 HOST_WIDE_INT low, high;
3487 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3488 TREE_INT_CST_HIGH (arg0),
3490 t = build_int_2 (low, high);
3491 TREE_TYPE (t) = type;
3493 = (TREE_OVERFLOW (arg0)
3494 | force_fit_type (t, overflow));
3495 TREE_CONSTANT_OVERFLOW (t)
3496 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3498 else if (TREE_CODE (arg0) == REAL_CST)
3499 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3500 TREE_TYPE (t) = type;
3502 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3503 return TREE_OPERAND (arg0, 0);
3505 /* Convert - (a - b) to (b - a) for non-floating-point. */
3506 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3507 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3508 TREE_OPERAND (arg0, 0));
3515 if (TREE_CODE (arg0) == INTEGER_CST)
3517 if (! TREE_UNSIGNED (type)
3518 && TREE_INT_CST_HIGH (arg0) < 0)
3520 HOST_WIDE_INT low, high;
3521 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3522 TREE_INT_CST_HIGH (arg0),
3524 t = build_int_2 (low, high);
3525 TREE_TYPE (t) = type;
3527 = (TREE_OVERFLOW (arg0)
3528 | force_fit_type (t, overflow));
3529 TREE_CONSTANT_OVERFLOW (t)
3530 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3533 else if (TREE_CODE (arg0) == REAL_CST)
3535 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3536 t = build_real (type,
3537 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3539 TREE_TYPE (t) = type;
3541 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3542 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3546 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3548 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3549 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3550 TREE_OPERAND (arg0, 0),
3551 fold (build1 (NEGATE_EXPR,
3552 TREE_TYPE (TREE_TYPE (arg0)),
3553 TREE_OPERAND (arg0, 1))));
3554 else if (TREE_CODE (arg0) == COMPLEX_CST)
3555 return build_complex (TREE_OPERAND (arg0, 0),
3556 fold (build1 (NEGATE_EXPR,
3557 TREE_TYPE (TREE_TYPE (arg0)),
3558 TREE_OPERAND (arg0, 1))));
3559 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3560 return fold (build (TREE_CODE (arg0), type,
3561 fold (build1 (CONJ_EXPR, type,
3562 TREE_OPERAND (arg0, 0))),
3563 fold (build1 (CONJ_EXPR,
3564 type, TREE_OPERAND (arg0, 1)))));
3565 else if (TREE_CODE (arg0) == CONJ_EXPR)
3566 return TREE_OPERAND (arg0, 0);
3572 if (TREE_CODE (arg0) == INTEGER_CST)
3573 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3574 ~ TREE_INT_CST_HIGH (arg0));
3575 TREE_TYPE (t) = type;
3576 force_fit_type (t, 0);
3577 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3578 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3580 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3581 return TREE_OPERAND (arg0, 0);
3585 /* A + (-B) -> A - B */
3586 if (TREE_CODE (arg1) == NEGATE_EXPR)
3587 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3588 else if (! FLOAT_TYPE_P (type))
3590 if (integer_zerop (arg1))
3591 return non_lvalue (convert (type, arg0));
3593 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3594 with a constant, and the two constants have no bits in common,
3595 we should treat this as a BIT_IOR_EXPR since this may produce more
3597 if (TREE_CODE (arg0) == BIT_AND_EXPR
3598 && TREE_CODE (arg1) == BIT_AND_EXPR
3599 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3600 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3601 && integer_zerop (const_binop (BIT_AND_EXPR,
3602 TREE_OPERAND (arg0, 1),
3603 TREE_OPERAND (arg1, 1), 0)))
3605 code = BIT_IOR_EXPR;
3609 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3610 about the case where C is a constant, just try one of the
3611 four possibilities. */
3613 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3614 && operand_equal_p (TREE_OPERAND (arg0, 1),
3615 TREE_OPERAND (arg1, 1), 0))
3616 return fold (build (MULT_EXPR, type,
3617 fold (build (PLUS_EXPR, type,
3618 TREE_OPERAND (arg0, 0),
3619 TREE_OPERAND (arg1, 0))),
3620 TREE_OPERAND (arg0, 1)));
3622 /* In IEEE floating point, x+0 may not equal x. */
3623 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3625 && real_zerop (arg1))
3626 return non_lvalue (convert (type, arg0));
3628 /* In most languages, can't associate operations on floats
3629 through parentheses. Rather than remember where the parentheses
3630 were, we don't associate floats at all. It shouldn't matter much.
3631 However, associating multiplications is only very slightly
3632 inaccurate, so do that if -ffast-math is specified. */
3633 if (FLOAT_TYPE_P (type)
3634 && ! (flag_fast_math && code == MULT_EXPR))
3637 /* The varsign == -1 cases happen only for addition and subtraction.
3638 It says that the arg that was split was really CON minus VAR.
3639 The rest of the code applies to all associative operations. */
3645 if (split_tree (arg0, code, &var, &con, &varsign))
3649 /* EXPR is (CON-VAR) +- ARG1. */
3650 /* If it is + and VAR==ARG1, return just CONST. */
3651 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3652 return convert (TREE_TYPE (t), con);
3654 /* If ARG0 is a constant, don't change things around;
3655 instead keep all the constant computations together. */
3657 if (TREE_CONSTANT (arg0))
3660 /* Otherwise return (CON +- ARG1) - VAR. */
3661 TREE_SET_CODE (t, MINUS_EXPR);
3662 TREE_OPERAND (t, 1) = var;
3664 = fold (build (code, TREE_TYPE (t), con, arg1));
3668 /* EXPR is (VAR+CON) +- ARG1. */
3669 /* If it is - and VAR==ARG1, return just CONST. */
3670 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3671 return convert (TREE_TYPE (t), con);
3673 /* If ARG0 is a constant, don't change things around;
3674 instead keep all the constant computations together. */
3676 if (TREE_CONSTANT (arg0))
3679 /* Otherwise return VAR +- (ARG1 +- CON). */
3680 TREE_OPERAND (t, 1) = tem
3681 = fold (build (code, TREE_TYPE (t), arg1, con));
3682 TREE_OPERAND (t, 0) = var;
3683 if (integer_zerop (tem)
3684 && (code == PLUS_EXPR || code == MINUS_EXPR))
3685 return convert (type, var);
3686 /* If we have x +/- (c - d) [c an explicit integer]
3687 change it to x -/+ (d - c) since if d is relocatable
3688 then the latter can be a single immediate insn
3689 and the former cannot. */
3690 if (TREE_CODE (tem) == MINUS_EXPR
3691 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3693 tree tem1 = TREE_OPERAND (tem, 1);
3694 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3695 TREE_OPERAND (tem, 0) = tem1;
3697 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3703 if (split_tree (arg1, code, &var, &con, &varsign))
3705 if (TREE_CONSTANT (arg1))
3710 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3712 /* EXPR is ARG0 +- (CON +- VAR). */
3713 if (TREE_CODE (t) == MINUS_EXPR
3714 && operand_equal_p (var, arg0, 0))
3716 /* If VAR and ARG0 cancel, return just CON or -CON. */
3717 if (code == PLUS_EXPR)
3718 return convert (TREE_TYPE (t), con);
3719 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3720 convert (TREE_TYPE (t), con)));
3724 = fold (build (code, TREE_TYPE (t), arg0, con));
3725 TREE_OPERAND (t, 1) = var;
3726 if (integer_zerop (TREE_OPERAND (t, 0))
3727 && TREE_CODE (t) == PLUS_EXPR)
3728 return convert (TREE_TYPE (t), var);
3733 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3734 if (TREE_CODE (arg1) == REAL_CST)
3736 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3738 t1 = const_binop (code, arg0, arg1, 0);
3739 if (t1 != NULL_TREE)
3741 /* The return value should always have
3742 the same type as the original expression. */
3743 TREE_TYPE (t1) = TREE_TYPE (t);
3749 if (! FLOAT_TYPE_P (type))
3751 if (! wins && integer_zerop (arg0))
3752 return build1 (NEGATE_EXPR, type, arg1);
3753 if (integer_zerop (arg1))
3754 return non_lvalue (convert (type, arg0));
3756 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3757 about the case where C is a constant, just try one of the
3758 four possibilities. */
3760 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3761 && operand_equal_p (TREE_OPERAND (arg0, 1),
3762 TREE_OPERAND (arg1, 1), 0))
3763 return fold (build (MULT_EXPR, type,
3764 fold (build (MINUS_EXPR, type,
3765 TREE_OPERAND (arg0, 0),
3766 TREE_OPERAND (arg1, 0))),
3767 TREE_OPERAND (arg0, 1)));
3769 /* Convert A - (-B) to A + B. */
3770 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3771 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3773 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3776 /* Except with IEEE floating point, 0-x equals -x. */
3777 if (! wins && real_zerop (arg0))
3778 return build1 (NEGATE_EXPR, type, arg1);
3779 /* Except with IEEE floating point, x-0 equals x. */
3780 if (real_zerop (arg1))
3781 return non_lvalue (convert (type, arg0));
3784 /* Fold &x - &x. This can happen from &x.foo - &x.
3785 This is unsafe for certain floats even in non-IEEE formats.
3786 In IEEE, it is unsafe because it does wrong for NaNs.
3787 Also note that operand_equal_p is always false if an operand
3790 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
3791 && operand_equal_p (arg0, arg1, 0))
3792 return convert (type, integer_zero_node);
3797 if (! FLOAT_TYPE_P (type))
3799 if (integer_zerop (arg1))
3800 return omit_one_operand (type, arg1, arg0);
3801 if (integer_onep (arg1))
3802 return non_lvalue (convert (type, arg0));
3804 /* ((A / C) * C) is A if the division is an
3805 EXACT_DIV_EXPR. Since C is normally a constant,
3806 just check for one of the four possibilities. */
3808 if (TREE_CODE (arg0) == EXACT_DIV_EXPR
3809 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3810 return TREE_OPERAND (arg0, 0);
3812 /* (a * (1 << b)) is (a << b) */
3813 if (TREE_CODE (arg1) == LSHIFT_EXPR
3814 && integer_onep (TREE_OPERAND (arg1, 0)))
3815 return fold (build (LSHIFT_EXPR, type, arg0,
3816 TREE_OPERAND (arg1, 1)));
3817 if (TREE_CODE (arg0) == LSHIFT_EXPR
3818 && integer_onep (TREE_OPERAND (arg0, 0)))
3819 return fold (build (LSHIFT_EXPR, type, arg1,
3820 TREE_OPERAND (arg0, 1)));
3824 /* x*0 is 0, except for IEEE floating point. */
3825 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3827 && real_zerop (arg1))
3828 return omit_one_operand (type, arg1, arg0);
3829 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3830 However, ANSI says we can drop signals,
3831 so we can do this anyway. */
3832 if (real_onep (arg1))
3833 return non_lvalue (convert (type, arg0));
3835 if (! wins && real_twop (arg1))
3837 tree arg = save_expr (arg0);
3838 return build (PLUS_EXPR, type, arg, arg);
3845 if (integer_all_onesp (arg1))
3846 return omit_one_operand (type, arg1, arg0);
3847 if (integer_zerop (arg1))
3848 return non_lvalue (convert (type, arg0));
3849 t1 = distribute_bit_expr (code, type, arg0, arg1);
3850 if (t1 != NULL_TREE)
3853 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3854 is a rotate of A by C1 bits. */
3856 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3857 || TREE_CODE (arg0) == LSHIFT_EXPR)
3858 && (TREE_CODE (arg1) == RSHIFT_EXPR
3859 || TREE_CODE (arg1) == LSHIFT_EXPR)
3860 && TREE_CODE (arg0) != TREE_CODE (arg1)
3861 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3862 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3863 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3864 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3865 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3866 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3867 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3868 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3869 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3870 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3871 TREE_CODE (arg0) == LSHIFT_EXPR
3872 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3877 if (integer_zerop (arg1))
3878 return non_lvalue (convert (type, arg0));
3879 if (integer_all_onesp (arg1))
3880 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3885 if (integer_all_onesp (arg1))
3886 return non_lvalue (convert (type, arg0));
3887 if (integer_zerop (arg1))
3888 return omit_one_operand (type, arg1, arg0);
3889 t1 = distribute_bit_expr (code, type, arg0, arg1);
3890 if (t1 != NULL_TREE)
3892 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3893 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3894 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3896 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3897 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3898 && (~TREE_INT_CST_LOW (arg0)
3899 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3900 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3902 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3903 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3905 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3906 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3907 && (~TREE_INT_CST_LOW (arg1)
3908 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3909 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3913 case BIT_ANDTC_EXPR:
3914 if (integer_all_onesp (arg0))
3915 return non_lvalue (convert (type, arg1));
3916 if (integer_zerop (arg0))
3917 return omit_one_operand (type, arg0, arg1);
3918 if (TREE_CODE (arg1) == INTEGER_CST)
3920 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3921 code = BIT_AND_EXPR;
3927 /* In most cases, do nothing with a divide by zero. */
3928 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3929 #ifndef REAL_INFINITY
3930 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
3933 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3935 /* In IEEE floating point, x/1 is not equivalent to x for snans.
3936 However, ANSI says we can drop signals, so we can do this anyway. */
3937 if (real_onep (arg1))
3938 return non_lvalue (convert (type, arg0));
3940 /* If ARG1 is a constant, we can convert this to a multiply by the
3941 reciprocal. This does not have the same rounding properties,
3942 so only do this if -ffast-math. We can actually always safely
3943 do it if ARG1 is a power of two, but it's hard to tell if it is
3944 or not in a portable manner. */
3945 if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
3946 && 0 != (tem = const_binop (code, build_real (type, dconst1),
3948 return fold (build (MULT_EXPR, type, arg0, tem));
3952 case TRUNC_DIV_EXPR:
3953 case ROUND_DIV_EXPR:
3954 case FLOOR_DIV_EXPR:
3956 case EXACT_DIV_EXPR:
3957 if (integer_onep (arg1))
3958 return non_lvalue (convert (type, arg0));
3959 if (integer_zerop (arg1))
3962 /* If we have ((a / C1) / C2) where both division are the same type, try
3963 to simplify. First see if C1 * C2 overflows or not. */
3964 if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
3965 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
3969 new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
3970 tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
3972 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
3973 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
3975 /* If no overflow, divide by C1*C2. */
3976 return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
3980 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
3981 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
3982 expressions, which often appear in the offsets or sizes of
3983 objects with a varying size. Only deal with positive divisors
3984 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
3986 Look for NOPs and SAVE_EXPRs inside. */
3988 if (TREE_CODE (arg1) == INTEGER_CST
3989 && tree_int_cst_sgn (arg1) >= 0)
3991 int have_save_expr = 0;
3992 tree c2 = integer_zero_node;
3995 if (TREE_CODE (xarg0) == SAVE_EXPR)
3996 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4000 if (TREE_CODE (xarg0) == PLUS_EXPR
4001 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4002 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4003 else if (TREE_CODE (xarg0) == MINUS_EXPR
4004 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4005 /* If we are doing this computation unsigned, the negate
4007 && ! TREE_UNSIGNED (type))
4009 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4010 xarg0 = TREE_OPERAND (xarg0, 0);
4013 if (TREE_CODE (xarg0) == SAVE_EXPR)
4014 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4018 if (TREE_CODE (xarg0) == MULT_EXPR
4019 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4020 && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
4021 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
4022 TREE_OPERAND (xarg0, 1), arg1, 1))
4023 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
4024 TREE_OPERAND (xarg0, 1), 1)))
4025 && (tree_int_cst_sgn (c2) >= 0
4026 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
4029 tree outer_div = integer_one_node;
4030 tree c1 = TREE_OPERAND (xarg0, 1);
4033 /* If C3 > C1, set them equal and do a divide by
4034 C3/C1 at the end of the operation. */
4035 if (tree_int_cst_lt (c1, c3))
4036 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
4038 /* The result is A * (C1/C3) + (C2/C3). */
4039 t = fold (build (PLUS_EXPR, type,
4040 fold (build (MULT_EXPR, type,
4041 TREE_OPERAND (xarg0, 0),
4042 const_binop (code, c1, c3, 1))),
4043 const_binop (code, c2, c3, 1)));
4045 if (! integer_onep (outer_div))
4046 t = fold (build (code, type, t, convert (type, outer_div)));
4058 case FLOOR_MOD_EXPR:
4059 case ROUND_MOD_EXPR:
4060 case TRUNC_MOD_EXPR:
4061 if (integer_onep (arg1))
4062 return omit_one_operand (type, integer_zero_node, arg0);
4063 if (integer_zerop (arg1))
4066 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4067 where C1 % C3 == 0. Handle similarly to the division case,
4068 but don't bother with SAVE_EXPRs. */
4070 if (TREE_CODE (arg1) == INTEGER_CST
4071 && ! integer_zerop (arg1))
4073 tree c2 = integer_zero_node;
4076 if (TREE_CODE (xarg0) == PLUS_EXPR
4077 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4078 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4079 else if (TREE_CODE (xarg0) == MINUS_EXPR
4080 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4081 && ! TREE_UNSIGNED (type))
4083 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4084 xarg0 = TREE_OPERAND (xarg0, 0);
4089 if (TREE_CODE (xarg0) == MULT_EXPR
4090 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4091 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4092 TREE_OPERAND (xarg0, 1),
4094 && tree_int_cst_sgn (c2) >= 0)
4095 /* The result is (C2%C3). */
4096 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4097 TREE_OPERAND (xarg0, 0));
4106 if (integer_zerop (arg1))
4107 return non_lvalue (convert (type, arg0));
4108 /* Since negative shift count is not well-defined,
4109 don't try to compute it in the compiler. */
4110 if (tree_int_cst_sgn (arg1) < 0)
4115 if (operand_equal_p (arg0, arg1, 0))
4117 if (INTEGRAL_TYPE_P (type)
4118 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4119 return omit_one_operand (type, arg1, arg0);
4123 if (operand_equal_p (arg0, arg1, 0))
4125 if (INTEGRAL_TYPE_P (type)
4126 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4127 return omit_one_operand (type, arg1, arg0);
4130 case TRUTH_NOT_EXPR:
4131 /* Note that the operand of this must be an int
4132 and its values must be 0 or 1.
4133 ("true" is a fixed value perhaps depending on the language,
4134 but we don't handle values other than 1 correctly yet.) */
4135 return invert_truthvalue (arg0);
4137 case TRUTH_ANDIF_EXPR:
4138 /* Note that the operands of this must be ints
4139 and their values must be 0 or 1.
4140 ("true" is a fixed value perhaps depending on the language.) */
4141 /* If first arg is constant zero, return it. */
4142 if (integer_zerop (arg0))
4144 case TRUTH_AND_EXPR:
4145 /* If either arg is constant true, drop it. */
4146 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4147 return non_lvalue (arg1);
4148 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4149 return non_lvalue (arg0);
4150 /* If second arg is constant zero, result is zero, but first arg
4151 must be evaluated. */
4152 if (integer_zerop (arg1))
4153 return omit_one_operand (type, arg1, arg0);
4156 /* We only do these simplifications if we are optimizing. */
4160 /* Check for things like (A || B) && (A || C). We can convert this
4161 to A || (B && C). Note that either operator can be any of the four
4162 truth and/or operations and the transformation will still be
4163 valid. Also note that we only care about order for the
4164 ANDIF and ORIF operators. */
4165 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4166 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4167 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4168 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4169 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4171 tree a00 = TREE_OPERAND (arg0, 0);
4172 tree a01 = TREE_OPERAND (arg0, 1);
4173 tree a10 = TREE_OPERAND (arg1, 0);
4174 tree a11 = TREE_OPERAND (arg1, 1);
4175 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
4176 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
4177 && (code == TRUTH_AND_EXPR
4178 || code == TRUTH_OR_EXPR));
4180 if (operand_equal_p (a00, a10, 0))
4181 return fold (build (TREE_CODE (arg0), type, a00,
4182 fold (build (code, type, a01, a11))));
4183 else if (commutative && operand_equal_p (a00, a11, 0))
4184 return fold (build (TREE_CODE (arg0), type, a00,
4185 fold (build (code, type, a01, a10))));
4186 else if (commutative && operand_equal_p (a01, a10, 0))
4187 return fold (build (TREE_CODE (arg0), type, a01,
4188 fold (build (code, type, a00, a11))));
4190 /* This case if tricky because we must either have commutative
4191 operators or else A10 must not have side-effects. */
4193 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
4194 && operand_equal_p (a01, a11, 0))
4195 return fold (build (TREE_CODE (arg0), type,
4196 fold (build (code, type, a00, a10)),
4200 /* Check for the possibility of merging component references. If our
4201 lhs is another similar operation, try to merge its rhs with our
4202 rhs. Then try to merge our lhs and rhs. */
4203 if (TREE_CODE (arg0) == code
4204 && 0 != (tem = fold_truthop (code, type,
4205 TREE_OPERAND (arg0, 1), arg1)))
4206 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4208 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
4213 case TRUTH_ORIF_EXPR:
4214 /* Note that the operands of this must be ints
4215 and their values must be 0 or true.
4216 ("true" is a fixed value perhaps depending on the language.) */
4217 /* If first arg is constant true, return it. */
4218 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4221 /* If either arg is constant zero, drop it. */
4222 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4223 return non_lvalue (arg1);
4224 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4225 return non_lvalue (arg0);
4226 /* If second arg is constant true, result is true, but we must
4227 evaluate first arg. */
4228 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4229 return omit_one_operand (type, arg1, arg0);
4232 case TRUTH_XOR_EXPR:
4233 /* If either arg is constant zero, drop it. */
4234 if (integer_zerop (arg0))
4235 return non_lvalue (arg1);
4236 if (integer_zerop (arg1))
4237 return non_lvalue (arg0);
4238 /* If either arg is constant true, this is a logical inversion. */
4239 if (integer_onep (arg0))
4240 return non_lvalue (invert_truthvalue (arg1));
4241 if (integer_onep (arg1))
4242 return non_lvalue (invert_truthvalue (arg0));
4251 /* If one arg is a constant integer, put it last. */
4252 if (TREE_CODE (arg0) == INTEGER_CST
4253 && TREE_CODE (arg1) != INTEGER_CST)
4255 TREE_OPERAND (t, 0) = arg1;
4256 TREE_OPERAND (t, 1) = arg0;
4257 arg0 = TREE_OPERAND (t, 0);
4258 arg1 = TREE_OPERAND (t, 1);
4259 code = swap_tree_comparison (code);
4260 TREE_SET_CODE (t, code);
4263 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4264 First, see if one arg is constant; find the constant arg
4265 and the other one. */
4267 tree constop = 0, varop;
4270 if (TREE_CONSTANT (arg1))
4271 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
4272 if (TREE_CONSTANT (arg0))
4273 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
4275 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4277 /* This optimization is invalid for ordered comparisons
4278 if CONST+INCR overflows or if foo+incr might overflow.
4279 This optimization is invalid for floating point due to rounding.
4280 For pointer types we assume overflow doesn't happen. */
4281 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4282 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4283 && (code == EQ_EXPR || code == NE_EXPR)))
4286 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4287 constop, TREE_OPERAND (varop, 1)));
4288 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4289 *constoploc = newconst;
4293 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4295 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4296 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4297 && (code == EQ_EXPR || code == NE_EXPR)))
4300 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4301 constop, TREE_OPERAND (varop, 1)));
4302 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4303 *constoploc = newconst;
4309 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4310 if (TREE_CODE (arg1) == INTEGER_CST
4311 && TREE_CODE (arg0) != INTEGER_CST
4312 && tree_int_cst_sgn (arg1) > 0)
4314 switch (TREE_CODE (t))
4318 TREE_SET_CODE (t, code);
4319 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4320 TREE_OPERAND (t, 1) = arg1;
4325 TREE_SET_CODE (t, code);
4326 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4327 TREE_OPERAND (t, 1) = arg1;
4331 /* If this is an EQ or NE comparison with zero and ARG0 is
4332 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4333 two operations, but the latter can be done in one less insn
4334 one machine that have only two-operand insns or on which a
4335 constant cannot be the first operand. */
4336 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4337 && TREE_CODE (arg0) == BIT_AND_EXPR)
4339 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4340 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4342 fold (build (code, type,
4343 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4345 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4346 TREE_OPERAND (arg0, 1),
4347 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4348 convert (TREE_TYPE (arg0),
4351 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4352 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4354 fold (build (code, type,
4355 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4357 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4358 TREE_OPERAND (arg0, 0),
4359 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4360 convert (TREE_TYPE (arg0),
4365 /* If this is an NE or EQ comparison of zero against the result of a
4366 signed MOD operation whose second operand is a power of 2, make
4367 the MOD operation unsigned since it is simpler and equivalent. */
4368 if ((code == NE_EXPR || code == EQ_EXPR)
4369 && integer_zerop (arg1)
4370 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4371 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4372 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4373 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4374 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4375 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4377 tree newtype = unsigned_type (TREE_TYPE (arg0));
4378 tree newmod = build (TREE_CODE (arg0), newtype,
4379 convert (newtype, TREE_OPERAND (arg0, 0)),
4380 convert (newtype, TREE_OPERAND (arg0, 1)));
4382 return build (code, type, newmod, convert (newtype, arg1));
4385 /* If this is an NE comparison of zero with an AND of one, remove the
4386 comparison since the AND will give the correct value. */
4387 if (code == NE_EXPR && integer_zerop (arg1)
4388 && TREE_CODE (arg0) == BIT_AND_EXPR
4389 && integer_onep (TREE_OPERAND (arg0, 1)))
4390 return convert (type, arg0);
4392 /* If we have (A & C) == C where C is a power of 2, convert this into
4393 (A & C) != 0. Similarly for NE_EXPR. */
4394 if ((code == EQ_EXPR || code == NE_EXPR)
4395 && TREE_CODE (arg0) == BIT_AND_EXPR
4396 && integer_pow2p (TREE_OPERAND (arg0, 1))
4397 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4398 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4399 arg0, integer_zero_node);
4401 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
4402 and similarly for >= into !=. */
4403 if ((code == LT_EXPR || code == GE_EXPR)
4404 && TREE_UNSIGNED (TREE_TYPE (arg0))
4405 && TREE_CODE (arg1) == LSHIFT_EXPR
4406 && integer_onep (TREE_OPERAND (arg1, 0)))
4407 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4408 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4409 TREE_OPERAND (arg1, 1)),
4410 convert (TREE_TYPE (arg0), integer_zero_node));
4412 else if ((code == LT_EXPR || code == GE_EXPR)
4413 && TREE_UNSIGNED (TREE_TYPE (arg0))
4414 && (TREE_CODE (arg1) == NOP_EXPR
4415 || TREE_CODE (arg1) == CONVERT_EXPR)
4416 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
4417 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
4419 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4420 convert (TREE_TYPE (arg0),
4421 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4422 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
4423 convert (TREE_TYPE (arg0), integer_zero_node));
4425 /* Simplify comparison of something with itself. (For IEEE
4426 floating-point, we can only do some of these simplifications.) */
4427 if (operand_equal_p (arg0, arg1, 0))
4434 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4436 t = build_int_2 (1, 0);
4437 TREE_TYPE (t) = type;
4441 TREE_SET_CODE (t, code);
4445 /* For NE, we can only do this simplification if integer. */
4446 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4448 /* ... fall through ... */
4451 t = build_int_2 (0, 0);
4452 TREE_TYPE (t) = type;
4457 /* An unsigned comparison against 0 can be simplified. */
4458 if (integer_zerop (arg1)
4459 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4460 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4461 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4463 switch (TREE_CODE (t))
4467 TREE_SET_CODE (t, NE_EXPR);
4471 TREE_SET_CODE (t, EQ_EXPR);
4474 return omit_one_operand (type,
4475 convert (type, integer_one_node),
4478 return omit_one_operand (type,
4479 convert (type, integer_zero_node),
4484 /* If we are comparing an expression that just has comparisons
4485 of two integer values, arithmetic expressions of those comparisons,
4486 and constants, we can simplify it. There are only three cases
4487 to check: the two values can either be equal, the first can be
4488 greater, or the second can be greater. Fold the expression for
4489 those three values. Since each value must be 0 or 1, we have
4490 eight possibilities, each of which corresponds to the constant 0
4491 or 1 or one of the six possible comparisons.
4493 This handles common cases like (a > b) == 0 but also handles
4494 expressions like ((x > y) - (y > x)) > 0, which supposedly
4495 occur in macroized code. */
4497 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4499 tree cval1 = 0, cval2 = 0;
4502 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4503 /* Don't handle degenerate cases here; they should already
4504 have been handled anyway. */
4505 && cval1 != 0 && cval2 != 0
4506 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4507 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4508 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4509 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4510 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4512 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4513 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4515 /* We can't just pass T to eval_subst in case cval1 or cval2
4516 was the same as ARG1. */
4519 = fold (build (code, type,
4520 eval_subst (arg0, cval1, maxval, cval2, minval),
4523 = fold (build (code, type,
4524 eval_subst (arg0, cval1, maxval, cval2, maxval),
4527 = fold (build (code, type,
4528 eval_subst (arg0, cval1, minval, cval2, maxval),
4531 /* All three of these results should be 0 or 1. Confirm they
4532 are. Then use those values to select the proper code
4535 if ((integer_zerop (high_result)
4536 || integer_onep (high_result))
4537 && (integer_zerop (equal_result)
4538 || integer_onep (equal_result))
4539 && (integer_zerop (low_result)
4540 || integer_onep (low_result)))
4542 /* Make a 3-bit mask with the high-order bit being the
4543 value for `>', the next for '=', and the low for '<'. */
4544 switch ((integer_onep (high_result) * 4)
4545 + (integer_onep (equal_result) * 2)
4546 + integer_onep (low_result))
4550 return omit_one_operand (type, integer_zero_node, arg0);
4571 return omit_one_operand (type, integer_one_node, arg0);
4574 t = build (code, type, cval1, cval2);
4576 return save_expr (t);
4583 /* If this is a comparison of a field, we may be able to simplify it. */
4584 if ((TREE_CODE (arg0) == COMPONENT_REF
4585 || TREE_CODE (arg0) == BIT_FIELD_REF)
4586 && (code == EQ_EXPR || code == NE_EXPR)
4587 /* Handle the constant case even without -O
4588 to make sure the warnings are given. */
4589 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4591 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4595 /* If this is a comparison of complex values and either or both
4596 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4597 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4598 may prevent needless evaluations. */
4599 if ((code == EQ_EXPR || code == NE_EXPR)
4600 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4601 && (TREE_CODE (arg0) == COMPLEX_EXPR
4602 || TREE_CODE (arg1) == COMPLEX_EXPR))
4604 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4605 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4606 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4607 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4608 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4610 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4613 fold (build (code, type, real0, real1)),
4614 fold (build (code, type, imag0, imag1))));
4617 /* From here on, the only cases we handle are when the result is
4618 known to be a constant.
4620 To compute GT, swap the arguments and do LT.
4621 To compute GE, do LT and invert the result.
4622 To compute LE, swap the arguments, do LT and invert the result.
4623 To compute NE, do EQ and invert the result.
4625 Therefore, the code below must handle only EQ and LT. */
4627 if (code == LE_EXPR || code == GT_EXPR)
4629 tem = arg0, arg0 = arg1, arg1 = tem;
4630 code = swap_tree_comparison (code);
4633 /* Note that it is safe to invert for real values here because we
4634 will check below in the one case that it matters. */
4637 if (code == NE_EXPR || code == GE_EXPR)
4640 code = invert_tree_comparison (code);
4643 /* Compute a result for LT or EQ if args permit;
4644 otherwise return T. */
4645 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4647 if (code == EQ_EXPR)
4648 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4649 == TREE_INT_CST_LOW (arg1))
4650 && (TREE_INT_CST_HIGH (arg0)
4651 == TREE_INT_CST_HIGH (arg1)),
4654 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4655 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4656 : INT_CST_LT (arg0, arg1)),
4660 /* Assume a nonexplicit constant cannot equal an explicit one,
4661 since such code would be undefined anyway.
4662 Exception: on sysvr4, using #pragma weak,
4663 a label can come out as 0. */
4664 else if (TREE_CODE (arg1) == INTEGER_CST
4665 && !integer_zerop (arg1)
4666 && TREE_CONSTANT (arg0)
4667 && TREE_CODE (arg0) == ADDR_EXPR
4669 t1 = build_int_2 (0, 0);
4671 /* Two real constants can be compared explicitly. */
4672 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4674 /* If either operand is a NaN, the result is false with two
4675 exceptions: First, an NE_EXPR is true on NaNs, but that case
4676 is already handled correctly since we will be inverting the
4677 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4678 or a GE_EXPR into a LT_EXPR, we must return true so that it
4679 will be inverted into false. */
4681 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4682 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4683 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4685 else if (code == EQ_EXPR)
4686 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4687 TREE_REAL_CST (arg1)),
4690 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4691 TREE_REAL_CST (arg1)),
4695 if (t1 == NULL_TREE)
4699 TREE_INT_CST_LOW (t1) ^= 1;
4701 TREE_TYPE (t1) = type;
4705 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4706 so all simple results must be passed through pedantic_non_lvalue. */
4707 if (TREE_CODE (arg0) == INTEGER_CST)
4708 return pedantic_non_lvalue
4709 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4710 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4711 return pedantic_omit_one_operand (type, arg1, arg0);
4713 /* If the second operand is zero, invert the comparison and swap
4714 the second and third operands. Likewise if the second operand
4715 is constant and the third is not or if the third operand is
4716 equivalent to the first operand of the comparison. */
4718 if (integer_zerop (arg1)
4719 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4720 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4721 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4722 TREE_OPERAND (t, 2),
4723 TREE_OPERAND (arg0, 1))))
4725 /* See if this can be inverted. If it can't, possibly because
4726 it was a floating-point inequality comparison, don't do
4728 tem = invert_truthvalue (arg0);
4730 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4732 arg0 = TREE_OPERAND (t, 0) = tem;
4733 arg1 = TREE_OPERAND (t, 2);
4734 TREE_OPERAND (t, 2) = TREE_OPERAND (t, 1);
4735 TREE_OPERAND (t, 1) = arg1;
4740 /* If we have A op B ? A : C, we may be able to convert this to a
4741 simpler expression, depending on the operation and the values
4742 of B and C. IEEE floating point prevents this though,
4743 because A or B might be -0.0 or a NaN. */
4745 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4746 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4747 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4749 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4750 arg1, TREE_OPERAND (arg0, 1)))
4752 tree arg2 = TREE_OPERAND (t, 2);
4753 enum tree_code comp_code = TREE_CODE (arg0);
4757 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4758 depending on the comparison operation. */
4759 if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
4760 ? real_zerop (TREE_OPERAND (arg0, 1))
4761 : integer_zerop (TREE_OPERAND (arg0, 1)))
4762 && TREE_CODE (arg2) == NEGATE_EXPR
4763 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4767 return pedantic_non_lvalue
4768 (fold (build1 (NEGATE_EXPR, type, arg1)));
4770 return pedantic_non_lvalue (convert (type, arg1));
4773 return pedantic_non_lvalue
4774 (fold (build1 (ABS_EXPR, type, arg1)));
4777 return pedantic_non_lvalue
4778 (fold (build1 (NEGATE_EXPR, type,
4779 fold (build1 (ABS_EXPR, type, arg1)))));
4782 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4785 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4787 if (comp_code == NE_EXPR)
4788 return pedantic_non_lvalue (convert (type, arg1));
4789 else if (comp_code == EQ_EXPR)
4790 return pedantic_non_lvalue (convert (type, integer_zero_node));
4793 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4794 or max (A, B), depending on the operation. */
4796 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4797 arg2, TREE_OPERAND (arg0, 0)))
4799 tree comp_op0 = TREE_OPERAND (arg0, 0);
4800 tree comp_op1 = TREE_OPERAND (arg0, 1);
4801 tree comp_type = TREE_TYPE (comp_op0);
4806 return pedantic_non_lvalue (convert (type, arg2));
4808 return pedantic_non_lvalue (convert (type, arg1));
4811 return pedantic_non_lvalue
4812 (convert (type, (fold (build (MIN_EXPR, comp_type,
4813 comp_op0, comp_op1)))));
4816 return pedantic_non_lvalue
4817 (convert (type, fold (build (MAX_EXPR, comp_type,
4818 comp_op0, comp_op1))));
4822 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4823 we might still be able to simplify this. For example,
4824 if C1 is one less or one more than C2, this might have started
4825 out as a MIN or MAX and been transformed by this function.
4826 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4828 if (INTEGRAL_TYPE_P (type)
4829 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4830 && TREE_CODE (arg2) == INTEGER_CST)
4834 /* We can replace A with C1 in this case. */
4835 arg1 = TREE_OPERAND (t, 1)
4836 = convert (type, TREE_OPERAND (arg0, 1));
4840 /* If C1 is C2 + 1, this is min(A, C2). */
4841 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4842 && operand_equal_p (TREE_OPERAND (arg0, 1),
4843 const_binop (PLUS_EXPR, arg2,
4844 integer_one_node, 0), 1))
4845 return pedantic_non_lvalue
4846 (fold (build (MIN_EXPR, type, arg1, arg2)));
4850 /* If C1 is C2 - 1, this is min(A, C2). */
4851 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4852 && operand_equal_p (TREE_OPERAND (arg0, 1),
4853 const_binop (MINUS_EXPR, arg2,
4854 integer_one_node, 0), 1))
4855 return pedantic_non_lvalue
4856 (fold (build (MIN_EXPR, type, arg1, arg2)));
4860 /* If C1 is C2 - 1, this is max(A, C2). */
4861 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4862 && operand_equal_p (TREE_OPERAND (arg0, 1),
4863 const_binop (MINUS_EXPR, arg2,
4864 integer_one_node, 0), 1))
4865 return pedantic_non_lvalue
4866 (fold (build (MAX_EXPR, type, arg1, arg2)));
4870 /* If C1 is C2 + 1, this is max(A, C2). */
4871 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4872 && operand_equal_p (TREE_OPERAND (arg0, 1),
4873 const_binop (PLUS_EXPR, arg2,
4874 integer_one_node, 0), 1))
4875 return pedantic_non_lvalue
4876 (fold (build (MAX_EXPR, type, arg1, arg2)));
4881 /* If the second operand is simpler than the third, swap them
4882 since that produces better jump optimization results. */
4883 if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
4884 || TREE_CODE (arg1) == SAVE_EXPR)
4885 && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
4886 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
4887 || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
4889 /* See if this can be inverted. If it can't, possibly because
4890 it was a floating-point inequality comparison, don't do
4892 tem = invert_truthvalue (arg0);
4894 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4896 arg0 = TREE_OPERAND (t, 0) = tem;
4897 arg1 = TREE_OPERAND (t, 2);
4898 TREE_OPERAND (t, 2) = TREE_OPERAND (t, 1);
4899 TREE_OPERAND (t, 1) = arg1;
4904 /* Convert A ? 1 : 0 to simply A. */
4905 if (integer_onep (TREE_OPERAND (t, 1))
4906 && integer_zerop (TREE_OPERAND (t, 2))
4907 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4908 call to fold will try to move the conversion inside
4909 a COND, which will recurse. In that case, the COND_EXPR
4910 is probably the best choice, so leave it alone. */
4911 && type == TREE_TYPE (arg0))
4912 return pedantic_non_lvalue (arg0);
4914 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4915 operation is simply A & 2. */
4917 if (integer_zerop (TREE_OPERAND (t, 2))
4918 && TREE_CODE (arg0) == NE_EXPR
4919 && integer_zerop (TREE_OPERAND (arg0, 1))
4920 && integer_pow2p (arg1)
4921 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4922 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4924 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
4929 /* When pedantic, a compound expression can be neither an lvalue
4930 nor an integer constant expression. */
4931 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4933 /* Don't let (0, 0) be null pointer constant. */
4934 if (integer_zerop (arg1))
4935 return non_lvalue (arg1);
4940 return build_complex (arg0, arg1);
4944 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4946 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4947 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4948 TREE_OPERAND (arg0, 1));
4949 else if (TREE_CODE (arg0) == COMPLEX_CST)
4950 return TREE_REALPART (arg0);
4951 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4952 return fold (build (TREE_CODE (arg0), type,
4953 fold (build1 (REALPART_EXPR, type,
4954 TREE_OPERAND (arg0, 0))),
4955 fold (build1 (REALPART_EXPR,
4956 type, TREE_OPERAND (arg0, 1)))));
4960 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4961 return convert (type, integer_zero_node);
4962 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4963 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4964 TREE_OPERAND (arg0, 0));
4965 else if (TREE_CODE (arg0) == COMPLEX_CST)
4966 return TREE_IMAGPART (arg0);
4967 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4968 return fold (build (TREE_CODE (arg0), type,
4969 fold (build1 (IMAGPART_EXPR, type,
4970 TREE_OPERAND (arg0, 0))),
4971 fold (build1 (IMAGPART_EXPR, type,
4972 TREE_OPERAND (arg0, 1)))));
4977 } /* switch (code) */