1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ This file should be rewritten to use an arbitrary precision
21 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
22 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
23 @@ The routines that translate from the ap rep should
24 @@ warn if precision et. al. is lost.
25 @@ This would also make life easier when this technology is used
26 @@ for cross-compilers. */
29 /* The entry points in this file are fold, size_int and size_binop.
31 fold takes a tree as argument and returns a simplified tree.
33 size_binop takes a tree code for an arithmetic operation
34 and two operands that are trees, and produces a tree for the
35 result, assuming the type comes from `sizetype'.
37 size_int takes an integer value, and creates a tree constant
38 with type from `sizetype'. */
46 /* Handle floating overflow for `const_binop'. */
47 static jmp_buf float_error;
49 static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
50 static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
51 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
52 HOST_WIDE_INT, HOST_WIDE_INT,
53 HOST_WIDE_INT, HOST_WIDE_INT *,
54 HOST_WIDE_INT *, HOST_WIDE_INT *,
56 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
57 static tree const_binop PROTO((enum tree_code, tree, tree, int));
58 static tree fold_convert PROTO((tree, tree));
59 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
60 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
61 static int truth_value_p PROTO((enum tree_code));
62 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
63 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
64 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
65 static tree omit_one_operand PROTO((tree, tree, tree));
66 static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
67 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
68 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
69 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
71 static tree decode_field_reference PROTO((tree, int *, int *,
72 enum machine_mode *, int *,
74 static int all_ones_mask_p PROTO((tree, int));
75 static int simple_operand_p PROTO((tree));
76 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
77 enum tree_code, tree, tree, tree));
78 static tree unextend PROTO((tree, int, int));
79 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
80 static tree strip_compound_expr PROTO((tree, tree));
86 /* Yield nonzero if a signed left shift of A by B bits overflows. */
87 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
89 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
90 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
91 Then this yields nonzero if overflow occurred during the addition.
92 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
93 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
94 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
96 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
97 We do that by representing the two-word integer in 4 words, with only
98 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
101 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
102 #define HIGHPART(x) \
103 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
104 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
106 /* Unpack a two-word integer into 4 words.
107 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
108 WORDS points to the array of HOST_WIDE_INTs. */
111 encode (words, low, hi)
112 HOST_WIDE_INT *words;
113 HOST_WIDE_INT low, hi;
115 words[0] = LOWPART (low);
116 words[1] = HIGHPART (low);
117 words[2] = LOWPART (hi);
118 words[3] = HIGHPART (hi);
121 /* Pack an array of 4 words into a two-word integer.
122 WORDS points to the array of words.
123 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
126 decode (words, low, hi)
127 HOST_WIDE_INT *words;
128 HOST_WIDE_INT *low, *hi;
130 *low = words[0] | words[1] * BASE;
131 *hi = words[2] | words[3] * BASE;
134 /* Make the integer constant T valid for its type
135 by setting to 0 or 1 all the bits in the constant
136 that don't belong in the type.
137 Yield 1 if a signed overflow occurs, 0 otherwise.
138 If OVERFLOW is nonzero, a signed overflow has already occurred
139 in calculating T, so propagate it.
141 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
145 force_fit_type (t, overflow)
149 HOST_WIDE_INT low, high;
152 if (TREE_CODE (t) == REAL_CST)
154 #ifdef CHECK_FLOAT_VALUE
155 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
161 else if (TREE_CODE (t) != INTEGER_CST)
164 low = TREE_INT_CST_LOW (t);
165 high = TREE_INT_CST_HIGH (t);
167 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
170 prec = TYPE_PRECISION (TREE_TYPE (t));
172 /* First clear all bits that are beyond the type's precision. */
174 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
176 else if (prec > HOST_BITS_PER_WIDE_INT)
178 TREE_INT_CST_HIGH (t)
179 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
183 TREE_INT_CST_HIGH (t) = 0;
184 if (prec < HOST_BITS_PER_WIDE_INT)
185 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
188 /* Unsigned types do not suffer sign extension or overflow. */
189 if (TREE_UNSIGNED (TREE_TYPE (t)))
192 /* If the value's sign bit is set, extend the sign. */
193 if (prec != 2 * HOST_BITS_PER_WIDE_INT
194 && (prec > HOST_BITS_PER_WIDE_INT
195 ? (TREE_INT_CST_HIGH (t)
196 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
197 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
199 /* Value is negative:
200 set to 1 all the bits that are outside this type's precision. */
201 if (prec > HOST_BITS_PER_WIDE_INT)
203 TREE_INT_CST_HIGH (t)
204 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
208 TREE_INT_CST_HIGH (t) = -1;
209 if (prec < HOST_BITS_PER_WIDE_INT)
210 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
214 /* Yield nonzero if signed overflow occurred. */
216 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
220 /* Add two doubleword integers with doubleword result.
221 Each argument is given as two `HOST_WIDE_INT' pieces.
222 One argument is L1 and H1; the other, L2 and H2.
223 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
226 add_double (l1, h1, l2, h2, lv, hv)
227 HOST_WIDE_INT l1, h1, l2, h2;
228 HOST_WIDE_INT *lv, *hv;
233 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
237 return overflow_sum_sign (h1, h2, h);
240 /* Negate a doubleword integer with doubleword result.
241 Return nonzero if the operation overflows, assuming it's signed.
242 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
243 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
246 neg_double (l1, h1, lv, hv)
247 HOST_WIDE_INT l1, h1;
248 HOST_WIDE_INT *lv, *hv;
254 return (*hv & h1) < 0;
264 /* Multiply two doubleword integers with doubleword result.
265 Return nonzero if the operation overflows, assuming it's signed.
266 Each argument is given as two `HOST_WIDE_INT' pieces.
267 One argument is L1 and H1; the other, L2 and H2.
268 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
271 mul_double (l1, h1, l2, h2, lv, hv)
272 HOST_WIDE_INT l1, h1, l2, h2;
273 HOST_WIDE_INT *lv, *hv;
275 HOST_WIDE_INT arg1[4];
276 HOST_WIDE_INT arg2[4];
277 HOST_WIDE_INT prod[4 * 2];
278 register unsigned HOST_WIDE_INT carry;
279 register int i, j, k;
280 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
282 encode (arg1, l1, h1);
283 encode (arg2, l2, h2);
285 bzero ((char *) prod, sizeof prod);
287 for (i = 0; i < 4; i++)
290 for (j = 0; j < 4; j++)
293 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
294 carry += arg1[i] * arg2[j];
295 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
297 prod[k] = LOWPART (carry);
298 carry = HIGHPART (carry);
303 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
305 /* Check for overflow by calculating the top half of the answer in full;
306 it should agree with the low half's sign bit. */
307 decode (prod+4, &toplow, &tophigh);
310 neg_double (l2, h2, &neglow, &neghigh);
311 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
315 neg_double (l1, h1, &neglow, &neghigh);
316 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
318 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
321 /* Shift the doubleword integer in L1, H1 left by COUNT places
322 keeping only PREC bits of result.
323 Shift right if COUNT is negative.
324 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
325 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
328 lshift_double (l1, h1, count, prec, lv, hv, arith)
329 HOST_WIDE_INT l1, h1, count;
331 HOST_WIDE_INT *lv, *hv;
336 rshift_double (l1, h1, - count, prec, lv, hv, arith);
341 count = (unsigned HOST_WIDE_INT) count & prec;
343 if (count >= HOST_BITS_PER_WIDE_INT)
345 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
350 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
351 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
352 *lv = (unsigned HOST_WIDE_INT) l1 << count;
356 /* Shift the doubleword integer in L1, H1 right by COUNT places
357 keeping only PREC bits of result. COUNT must be positive.
358 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
359 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
362 rshift_double (l1, h1, count, prec, lv, hv, arith)
363 HOST_WIDE_INT l1, h1, count;
365 HOST_WIDE_INT *lv, *hv;
368 unsigned HOST_WIDE_INT signmask;
370 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
374 count = (unsigned HOST_WIDE_INT) count % prec;
376 if (count >= HOST_BITS_PER_WIDE_INT)
379 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
380 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
384 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
385 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
386 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
387 | ((unsigned HOST_WIDE_INT) h1 >> count));
391 /* Rotate the doubleword integer in L1, H1 left by COUNT places
392 keeping only PREC bits of result.
393 Rotate right if COUNT is negative.
394 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
397 lrotate_double (l1, h1, count, prec, lv, hv)
398 HOST_WIDE_INT l1, h1, count;
400 HOST_WIDE_INT *lv, *hv;
402 HOST_WIDE_INT s1l, s1h, s2l, s2h;
408 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
409 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
414 /* Rotate the doubleword integer in L1, H1 left by COUNT places
415 keeping only PREC bits of result. COUNT must be positive.
416 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
419 rrotate_double (l1, h1, count, prec, lv, hv)
420 HOST_WIDE_INT l1, h1, count;
422 HOST_WIDE_INT *lv, *hv;
424 HOST_WIDE_INT s1l, s1h, s2l, s2h;
430 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
431 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
436 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
437 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
438 CODE is a tree code for a kind of division, one of
439 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
441 It controls how the quotient is rounded to a integer.
442 Return nonzero if the operation overflows.
443 UNS nonzero says do unsigned division. */
446 div_and_round_double (code, uns,
447 lnum_orig, hnum_orig, lden_orig, hden_orig,
448 lquo, hquo, lrem, hrem)
451 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
452 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
453 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
456 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
457 HOST_WIDE_INT den[4], quo[4];
459 unsigned HOST_WIDE_INT work;
460 register int carry = 0;
461 HOST_WIDE_INT lnum = lnum_orig;
462 HOST_WIDE_INT hnum = hnum_orig;
463 HOST_WIDE_INT lden = lden_orig;
464 HOST_WIDE_INT hden = hden_orig;
467 if ((hden == 0) && (lden == 0))
470 /* calculate quotient sign and convert operands to unsigned. */
476 /* (minimum integer) / (-1) is the only overflow case. */
477 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
483 neg_double (lden, hden, &lden, &hden);
487 if (hnum == 0 && hden == 0)
488 { /* single precision */
490 /* This unsigned division rounds toward zero. */
491 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
496 { /* trivial case: dividend < divisor */
497 /* hden != 0 already checked. */
504 bzero ((char *) quo, sizeof quo);
506 bzero ((char *) num, sizeof num); /* to zero 9th element */
507 bzero ((char *) den, sizeof den);
509 encode (num, lnum, hnum);
510 encode (den, lden, hden);
512 /* Special code for when the divisor < BASE. */
513 if (hden == 0 && lden < BASE)
515 /* hnum != 0 already checked. */
516 for (i = 4 - 1; i >= 0; i--)
518 work = num[i] + carry * BASE;
519 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
520 carry = work % (unsigned HOST_WIDE_INT) lden;
525 /* Full double precision division,
526 with thanks to Don Knuth's "Seminumerical Algorithms". */
527 int quo_est, scale, num_hi_sig, den_hi_sig;
529 /* Find the highest non-zero divisor digit. */
530 for (i = 4 - 1; ; i--)
536 /* Insure that the first digit of the divisor is at least BASE/2.
537 This is required by the quotient digit estimation algorithm. */
539 scale = BASE / (den[den_hi_sig] + 1);
540 if (scale > 1) { /* scale divisor and dividend */
542 for (i = 0; i <= 4 - 1; i++) {
543 work = (num[i] * scale) + carry;
544 num[i] = LOWPART (work);
545 carry = HIGHPART (work);
548 for (i = 0; i <= 4 - 1; i++) {
549 work = (den[i] * scale) + carry;
550 den[i] = LOWPART (work);
551 carry = HIGHPART (work);
552 if (den[i] != 0) den_hi_sig = i;
559 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
560 /* guess the next quotient digit, quo_est, by dividing the first
561 two remaining dividend digits by the high order quotient digit.
562 quo_est is never low and is at most 2 high. */
563 unsigned HOST_WIDE_INT tmp;
565 num_hi_sig = i + den_hi_sig + 1;
566 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
567 if (num[num_hi_sig] != den[den_hi_sig])
568 quo_est = work / den[den_hi_sig];
572 /* refine quo_est so it's usually correct, and at most one high. */
573 tmp = work - quo_est * den[den_hi_sig];
575 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
578 /* Try QUO_EST as the quotient digit, by multiplying the
579 divisor by QUO_EST and subtracting from the remaining dividend.
580 Keep in mind that QUO_EST is the I - 1st digit. */
583 for (j = 0; j <= den_hi_sig; j++)
585 work = quo_est * den[j] + carry;
586 carry = HIGHPART (work);
587 work = num[i + j] - LOWPART (work);
588 num[i + j] = LOWPART (work);
589 carry += HIGHPART (work) != 0;
592 /* if quo_est was high by one, then num[i] went negative and
593 we need to correct things. */
595 if (num[num_hi_sig] < carry)
598 carry = 0; /* add divisor back in */
599 for (j = 0; j <= den_hi_sig; j++)
601 work = num[i + j] + den[j] + carry;
602 carry = HIGHPART (work);
603 num[i + j] = LOWPART (work);
605 num [num_hi_sig] += carry;
608 /* store the quotient digit. */
613 decode (quo, lquo, hquo);
616 /* if result is negative, make it so. */
618 neg_double (*lquo, *hquo, lquo, hquo);
620 /* compute trial remainder: rem = num - (quo * den) */
621 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
622 neg_double (*lrem, *hrem, lrem, hrem);
623 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
628 case TRUNC_MOD_EXPR: /* round toward zero */
629 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
633 case FLOOR_MOD_EXPR: /* round toward negative infinity */
634 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
637 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
640 else return overflow;
644 case CEIL_MOD_EXPR: /* round toward positive infinity */
645 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
647 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
650 else return overflow;
654 case ROUND_MOD_EXPR: /* round to closest integer */
656 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
657 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
659 /* get absolute values */
660 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
661 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
663 /* if (2 * abs (lrem) >= abs (lden)) */
664 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
665 labs_rem, habs_rem, <wice, &htwice);
666 if (((unsigned HOST_WIDE_INT) habs_den
667 < (unsigned HOST_WIDE_INT) htwice)
668 || (((unsigned HOST_WIDE_INT) habs_den
669 == (unsigned HOST_WIDE_INT) htwice)
670 && ((HOST_WIDE_INT unsigned) labs_den
671 < (unsigned HOST_WIDE_INT) ltwice)))
675 add_double (*lquo, *hquo,
676 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
679 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
682 else return overflow;
690 /* compute true remainder: rem = num - (quo * den) */
691 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
692 neg_double (*lrem, *hrem, lrem, hrem);
693 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
697 #ifndef REAL_ARITHMETIC
698 /* Effectively truncate a real value to represent the nearest possible value
699 in a narrower mode. The result is actually represented in the same data
700 type as the argument, but its value is usually different.
702 A trap may occur during the FP operations and it is the responsibility
703 of the calling function to have a handler established. */
706 real_value_truncate (mode, arg)
707 enum machine_mode mode;
710 return REAL_VALUE_TRUNCATE (mode, arg);
713 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
715 /* Check for infinity in an IEEE double precision number. */
721 /* The IEEE 64-bit double format. */
726 unsigned exponent : 11;
727 unsigned mantissa1 : 20;
732 unsigned mantissa1 : 20;
733 unsigned exponent : 11;
739 if (u.big_endian.sign == 1)
742 return (u.big_endian.exponent == 2047
743 && u.big_endian.mantissa1 == 0
744 && u.big_endian.mantissa2 == 0);
749 return (u.little_endian.exponent == 2047
750 && u.little_endian.mantissa1 == 0
751 && u.little_endian.mantissa2 == 0);
755 /* Check whether an IEEE double precision number is a NaN. */
761 /* The IEEE 64-bit double format. */
766 unsigned exponent : 11;
767 unsigned mantissa1 : 20;
772 unsigned mantissa1 : 20;
773 unsigned exponent : 11;
779 if (u.big_endian.sign == 1)
782 return (u.big_endian.exponent == 2047
783 && (u.big_endian.mantissa1 != 0
784 || u.big_endian.mantissa2 != 0));
789 return (u.little_endian.exponent == 2047
790 && (u.little_endian.mantissa1 != 0
791 || u.little_endian.mantissa2 != 0));
795 /* Check for a negative IEEE double precision number. */
801 /* The IEEE 64-bit double format. */
806 unsigned exponent : 11;
807 unsigned mantissa1 : 20;
812 unsigned mantissa1 : 20;
813 unsigned exponent : 11;
819 if (u.big_endian.sign == 1)
822 return u.big_endian.sign;
827 return u.little_endian.sign;
830 #else /* Target not IEEE */
832 /* Let's assume other float formats don't have infinity.
833 (This can be overridden by redefining REAL_VALUE_ISINF.) */
841 /* Let's assume other float formats don't have NaNs.
842 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
850 /* Let's assume other float formats don't have minus zero.
851 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
858 #endif /* Target not IEEE */
859 #endif /* no REAL_ARITHMETIC */
861 /* Split a tree IN into a constant and a variable part
862 that could be combined with CODE to make IN.
863 CODE must be a commutative arithmetic operation.
864 Store the constant part into *CONP and the variable in &VARP.
865 Return 1 if this was done; zero means the tree IN did not decompose
868 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
869 Therefore, we must tell the caller whether the variable part
870 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
871 The value stored is the coefficient for the variable term.
872 The constant term we return should always be added;
873 we negate it if necessary. */
876 split_tree (in, code, varp, conp, varsignp)
882 register tree outtype = TREE_TYPE (in);
886 /* Strip any conversions that don't change the machine mode. */
887 while ((TREE_CODE (in) == NOP_EXPR
888 || TREE_CODE (in) == CONVERT_EXPR)
889 && (TYPE_MODE (TREE_TYPE (in))
890 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
891 in = TREE_OPERAND (in, 0);
893 if (TREE_CODE (in) == code
894 || (! FLOAT_TYPE_P (TREE_TYPE (in))
895 /* We can associate addition and subtraction together
896 (even though the C standard doesn't say so)
897 for integers because the value is not affected.
898 For reals, the value might be affected, so we can't. */
899 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
900 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
902 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
903 if (code == INTEGER_CST)
905 *conp = TREE_OPERAND (in, 0);
906 *varp = TREE_OPERAND (in, 1);
907 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
908 && TREE_TYPE (*varp) != outtype)
909 *varp = convert (outtype, *varp);
910 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
913 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
915 *conp = TREE_OPERAND (in, 1);
916 *varp = TREE_OPERAND (in, 0);
918 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
919 && TREE_TYPE (*varp) != outtype)
920 *varp = convert (outtype, *varp);
921 if (TREE_CODE (in) == MINUS_EXPR)
923 /* If operation is subtraction and constant is second,
924 must negate it to get an additive constant.
925 And this cannot be done unless it is a manifest constant.
926 It could also be the address of a static variable.
927 We cannot negate that, so give up. */
928 if (TREE_CODE (*conp) == INTEGER_CST)
929 /* Subtracting from integer_zero_node loses for long long. */
930 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
936 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
938 *conp = TREE_OPERAND (in, 0);
939 *varp = TREE_OPERAND (in, 1);
940 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
941 && TREE_TYPE (*varp) != outtype)
942 *varp = convert (outtype, *varp);
943 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
950 /* Combine two constants NUM and ARG2 under operation CODE
951 to produce a new constant.
952 We assume ARG1 and ARG2 have the same data type,
953 or at least are the same kind of constant and the same machine mode.
955 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
958 const_binop (code, arg1, arg2, notrunc)
960 register tree arg1, arg2;
963 if (TREE_CODE (arg1) == INTEGER_CST)
965 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
966 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
967 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
968 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
969 HOST_WIDE_INT low, hi;
970 HOST_WIDE_INT garbagel, garbageh;
972 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
978 t = build_int_2 (int1l | int2l, int1h | int2h);
982 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
986 t = build_int_2 (int1l & int2l, int1h & int2h);
990 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
996 /* It's unclear from the C standard whether shifts can overflow.
997 The following code ignores overflow; perhaps a C standard
998 interpretation ruling is needed. */
999 lshift_double (int1l, int1h, int2l,
1000 TYPE_PRECISION (TREE_TYPE (arg1)),
1003 t = build_int_2 (low, hi);
1004 TREE_TYPE (t) = TREE_TYPE (arg1);
1006 force_fit_type (t, 0);
1007 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1008 TREE_CONSTANT_OVERFLOW (t)
1009 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1015 lrotate_double (int1l, int1h, int2l,
1016 TYPE_PRECISION (TREE_TYPE (arg1)),
1018 t = build_int_2 (low, hi);
1025 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1028 overflow = int2h < hi;
1030 t = build_int_2 (int2l, int2h);
1036 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1039 overflow = int1h < hi;
1041 t = build_int_2 (int1l, int1h);
1044 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1045 t = build_int_2 (low, hi);
1049 if (int2h == 0 && int2l == 0)
1051 t = build_int_2 (int1l, int1h);
1054 neg_double (int2l, int2h, &low, &hi);
1055 add_double (int1l, int1h, low, hi, &low, &hi);
1056 overflow = overflow_sum_sign (hi, int2h, int1h);
1057 t = build_int_2 (low, hi);
1061 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1062 t = build_int_2 (low, hi);
1065 case TRUNC_DIV_EXPR:
1066 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1067 case EXACT_DIV_EXPR:
1068 /* This is a shortcut for a common special case.
1069 It reduces the number of tree nodes generated
1071 if (int2h == 0 && int2l > 0
1072 && TREE_TYPE (arg1) == sizetype
1073 && int1h == 0 && int1l >= 0)
1075 if (code == CEIL_DIV_EXPR)
1077 return size_int (int1l / int2l);
1079 case ROUND_DIV_EXPR:
1080 if (int2h == 0 && int2l == 1)
1082 t = build_int_2 (int1l, int1h);
1085 if (int1l == int2l && int1h == int2h)
1087 if ((int1l | int1h) == 0)
1089 t = build_int_2 (1, 0);
1092 overflow = div_and_round_double (code, uns,
1093 int1l, int1h, int2l, int2h,
1094 &low, &hi, &garbagel, &garbageh);
1095 t = build_int_2 (low, hi);
1098 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1099 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1100 overflow = div_and_round_double (code, uns,
1101 int1l, int1h, int2l, int2h,
1102 &garbagel, &garbageh, &low, &hi);
1103 t = build_int_2 (low, hi);
1110 low = (((unsigned HOST_WIDE_INT) int1h
1111 < (unsigned HOST_WIDE_INT) int2h)
1112 || (((unsigned HOST_WIDE_INT) int1h
1113 == (unsigned HOST_WIDE_INT) int2h)
1114 && ((unsigned HOST_WIDE_INT) int1l
1115 < (unsigned HOST_WIDE_INT) int2l)));
1119 low = ((int1h < int2h)
1120 || ((int1h == int2h)
1121 && ((unsigned HOST_WIDE_INT) int1l
1122 < (unsigned HOST_WIDE_INT) int2l)));
1124 if (low == (code == MIN_EXPR))
1125 t = build_int_2 (int1l, int1h);
1127 t = build_int_2 (int2l, int2h);
1134 TREE_TYPE (t) = TREE_TYPE (arg1);
1136 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1137 | TREE_OVERFLOW (arg1)
1138 | TREE_OVERFLOW (arg2));
1139 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1140 | TREE_CONSTANT_OVERFLOW (arg1)
1141 | TREE_CONSTANT_OVERFLOW (arg2));
1144 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1145 if (TREE_CODE (arg1) == REAL_CST)
1150 REAL_VALUE_TYPE value;
1153 d1 = TREE_REAL_CST (arg1);
1154 d2 = TREE_REAL_CST (arg2);
1156 /* If either operand is a NaN, just return it. Otherwise, set up
1157 for floating-point trap; we return an overflow. */
1158 if (REAL_VALUE_ISNAN (d1))
1160 else if (REAL_VALUE_ISNAN (d2))
1162 else if (setjmp (float_error))
1164 t = copy_node (arg1);
1169 set_float_handler (float_error);
1171 #ifdef REAL_ARITHMETIC
1172 REAL_ARITHMETIC (value, code, d1, d2);
1189 #ifndef REAL_INFINITY
1198 value = MIN (d1, d2);
1202 value = MAX (d1, d2);
1208 #endif /* no REAL_ARITHMETIC */
1209 t = build_real (TREE_TYPE (arg1),
1210 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1212 set_float_handler (NULL_PTR);
1215 = (force_fit_type (t, overflow)
1216 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1217 TREE_CONSTANT_OVERFLOW (t)
1219 | TREE_CONSTANT_OVERFLOW (arg1)
1220 | TREE_CONSTANT_OVERFLOW (arg2);
1223 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1224 if (TREE_CODE (arg1) == COMPLEX_CST)
1226 register tree r1 = TREE_REALPART (arg1);
1227 register tree i1 = TREE_IMAGPART (arg1);
1228 register tree r2 = TREE_REALPART (arg2);
1229 register tree i2 = TREE_IMAGPART (arg2);
1235 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1236 const_binop (PLUS_EXPR, i1, i2, notrunc));
1240 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1241 const_binop (MINUS_EXPR, i1, i2, notrunc));
1245 t = build_complex (const_binop (MINUS_EXPR,
1246 const_binop (MULT_EXPR,
1248 const_binop (MULT_EXPR,
1251 const_binop (PLUS_EXPR,
1252 const_binop (MULT_EXPR,
1254 const_binop (MULT_EXPR,
1261 register tree magsquared
1262 = const_binop (PLUS_EXPR,
1263 const_binop (MULT_EXPR, r2, r2, notrunc),
1264 const_binop (MULT_EXPR, i2, i2, notrunc),
1268 (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1269 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1270 const_binop (PLUS_EXPR,
1271 const_binop (MULT_EXPR, r1, r2,
1273 const_binop (MULT_EXPR, i1, i2,
1276 magsquared, notrunc),
1277 const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1278 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1279 const_binop (MINUS_EXPR,
1280 const_binop (MULT_EXPR, i1, r2,
1282 const_binop (MULT_EXPR, r1, i2,
1285 magsquared, notrunc));
1292 TREE_TYPE (t) = TREE_TYPE (arg1);
1298 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1302 unsigned HOST_WIDE_INT number;
1305 /* Type-size nodes already made for small sizes. */
1306 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1308 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1309 && size_table[number] != 0)
1310 return size_table[number];
1311 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1313 push_obstacks_nochange ();
1314 /* Make this a permanent node. */
1315 end_temporary_allocation ();
1316 t = build_int_2 (number, 0);
1317 TREE_TYPE (t) = sizetype;
1318 size_table[number] = t;
1323 t = build_int_2 (number, 0);
1324 TREE_TYPE (t) = sizetype;
1329 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1330 CODE is a tree code. Data type is taken from `sizetype',
1331 If the operands are constant, so is the result. */
1334 size_binop (code, arg0, arg1)
1335 enum tree_code code;
1338 /* Handle the special case of two integer constants faster. */
1339 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1341 /* And some specific cases even faster than that. */
1342 if (code == PLUS_EXPR
1343 && TREE_INT_CST_LOW (arg0) == 0
1344 && TREE_INT_CST_HIGH (arg0) == 0)
1346 if (code == MINUS_EXPR
1347 && TREE_INT_CST_LOW (arg1) == 0
1348 && TREE_INT_CST_HIGH (arg1) == 0)
1350 if (code == MULT_EXPR
1351 && TREE_INT_CST_LOW (arg0) == 1
1352 && TREE_INT_CST_HIGH (arg0) == 0)
1354 /* Handle general case of two integer constants. */
1355 return const_binop (code, arg0, arg1, 1);
1358 if (arg0 == error_mark_node || arg1 == error_mark_node)
1359 return error_mark_node;
1361 return fold (build (code, sizetype, arg0, arg1));
1364 /* Given T, a tree representing type conversion of ARG1, a constant,
1365 return a constant tree representing the result of conversion. */
1368 fold_convert (t, arg1)
1372 register tree type = TREE_TYPE (t);
1375 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1377 if (TREE_CODE (arg1) == INTEGER_CST)
1379 /* If we would build a constant wider than GCC supports,
1380 leave the conversion unfolded. */
1381 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1384 /* Given an integer constant, make new constant with new type,
1385 appropriately sign-extended or truncated. */
1386 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1387 TREE_INT_CST_HIGH (arg1));
1388 TREE_TYPE (t) = type;
1389 /* Indicate an overflow if (1) ARG1 already overflowed,
1390 or (2) force_fit_type indicates an overflow.
1391 Tell force_fit_type that an overflow has already occurred
1392 if ARG1 is a too-large unsigned value and T is signed. */
1394 = (TREE_OVERFLOW (arg1)
1395 | force_fit_type (t,
1396 (TREE_INT_CST_HIGH (arg1) < 0
1397 & (TREE_UNSIGNED (type)
1398 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1399 TREE_CONSTANT_OVERFLOW (t)
1400 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1402 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1403 else if (TREE_CODE (arg1) == REAL_CST)
1405 /* Don't initialize these, use assignments.
1406 Initialized local aggregates don't work on old compilers. */
1411 x = TREE_REAL_CST (arg1);
1412 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1413 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1414 /* See if X will be in range after truncation towards 0.
1415 To compensate for truncation, move the bounds away from 0,
1416 but reject if X exactly equals the adjusted bounds. */
1417 #ifdef REAL_ARITHMETIC
1418 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1419 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1424 /* If X is a NaN, use zero instead and show we have an overflow.
1425 Otherwise, range check. */
1426 if (REAL_VALUE_ISNAN (x))
1427 overflow = 1, x = dconst0;
1428 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1431 #ifndef REAL_ARITHMETIC
1433 HOST_WIDE_INT low, high;
1434 HOST_WIDE_INT half_word
1435 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1440 high = (HOST_WIDE_INT) (x / half_word / half_word);
1441 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1442 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1444 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1445 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1448 low = (HOST_WIDE_INT) x;
1449 if (TREE_REAL_CST (arg1) < 0)
1450 neg_double (low, high, &low, &high);
1451 t = build_int_2 (low, high);
1455 HOST_WIDE_INT low, high;
1456 REAL_VALUE_TO_INT (&low, &high, x);
1457 t = build_int_2 (low, high);
1460 TREE_TYPE (t) = type;
1462 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1463 TREE_CONSTANT_OVERFLOW (t)
1464 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1466 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1467 TREE_TYPE (t) = type;
1469 else if (TREE_CODE (type) == REAL_TYPE)
1471 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1472 if (TREE_CODE (arg1) == INTEGER_CST)
1473 return build_real_from_int_cst (type, arg1);
1474 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1475 if (TREE_CODE (arg1) == REAL_CST)
1477 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1479 else if (setjmp (float_error))
1482 t = copy_node (arg1);
1485 set_float_handler (float_error);
1487 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1488 TREE_REAL_CST (arg1)));
1489 set_float_handler (NULL_PTR);
1493 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1494 TREE_CONSTANT_OVERFLOW (t)
1495 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1499 TREE_CONSTANT (t) = 1;
1503 /* Return an expr equal to X but certainly not valid as an lvalue.
1504 Also make sure it is not valid as an null pointer constant. */
1512 /* These things are certainly not lvalues. */
1513 if (TREE_CODE (x) == NON_LVALUE_EXPR
1514 || TREE_CODE (x) == INTEGER_CST
1515 || TREE_CODE (x) == REAL_CST
1516 || TREE_CODE (x) == STRING_CST
1517 || TREE_CODE (x) == ADDR_EXPR)
1519 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1521 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1522 so convert_for_assignment won't strip it.
1523 This is so this 0 won't be treated as a null pointer constant. */
1524 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1525 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1531 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1532 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1536 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
1537 Zero means allow extended lvalues. */
1539 int pedantic_lvalues;
1541 /* When pedantic, return an expr equal to X but certainly not valid as a
1542 pedantic lvalue. Otherwise, return X. */
1545 pedantic_non_lvalue (x)
1548 if (pedantic_lvalues)
1549 return non_lvalue (x);
1554 /* Given a tree comparison code, return the code that is the logical inverse
1555 of the given code. It is not safe to do this for floating-point
1556 comparisons, except for NE_EXPR and EQ_EXPR. */
1558 static enum tree_code
1559 invert_tree_comparison (code)
1560 enum tree_code code;
1581 /* Similar, but return the comparison that results if the operands are
1582 swapped. This is safe for floating-point. */
1584 static enum tree_code
1585 swap_tree_comparison (code)
1586 enum tree_code code;
1606 /* Return nonzero if CODE is a tree code that represents a truth value. */
1609 truth_value_p (code)
1610 enum tree_code code;
1612 return (TREE_CODE_CLASS (code) == '<'
1613 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1614 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1615 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1618 /* Return nonzero if two operands are necessarily equal.
1619 If ONLY_CONST is non-zero, only return non-zero for constants.
1620 This function tests whether the operands are indistinguishable;
1621 it does not test whether they are equal using C's == operation.
1622 The distinction is important for IEEE floating point, because
1623 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1624 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1627 operand_equal_p (arg0, arg1, only_const)
1631 /* If both types don't have the same signedness, then we can't consider
1632 them equal. We must check this before the STRIP_NOPS calls
1633 because they may change the signedness of the arguments. */
1634 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1640 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1641 We don't care about side effects in that case because the SAVE_EXPR
1642 takes care of that for us. */
1643 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1644 return ! only_const;
1646 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1649 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1650 && TREE_CODE (arg0) == ADDR_EXPR
1651 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1654 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1655 && TREE_CODE (arg0) == INTEGER_CST
1656 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1657 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1660 /* Detect when real constants are equal. */
1661 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1662 && TREE_CODE (arg0) == REAL_CST)
1663 return !bcmp ((char *) &TREE_REAL_CST (arg0),
1664 (char *) &TREE_REAL_CST (arg1),
1665 sizeof (REAL_VALUE_TYPE));
1673 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1675 /* This is needed for conversions and for COMPONENT_REF.
1676 Might as well play it safe and always test this. */
1677 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1680 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1683 /* Two conversions are equal only if signedness and modes match. */
1684 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1685 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1686 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1689 return operand_equal_p (TREE_OPERAND (arg0, 0),
1690 TREE_OPERAND (arg1, 0), 0);
1694 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1695 TREE_OPERAND (arg1, 0), 0)
1696 && operand_equal_p (TREE_OPERAND (arg0, 1),
1697 TREE_OPERAND (arg1, 1), 0));
1700 switch (TREE_CODE (arg0))
1703 return operand_equal_p (TREE_OPERAND (arg0, 0),
1704 TREE_OPERAND (arg1, 0), 0);
1708 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1709 TREE_OPERAND (arg1, 0), 0)
1710 && operand_equal_p (TREE_OPERAND (arg0, 1),
1711 TREE_OPERAND (arg1, 1), 0));
1714 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1715 TREE_OPERAND (arg1, 0), 0)
1716 && operand_equal_p (TREE_OPERAND (arg0, 1),
1717 TREE_OPERAND (arg1, 1), 0)
1718 && operand_equal_p (TREE_OPERAND (arg0, 2),
1719 TREE_OPERAND (arg1, 2), 0));
1727 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1728 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1730 When in doubt, return 0. */
1733 operand_equal_for_comparison_p (arg0, arg1, other)
1737 int unsignedp1, unsignedpo;
1738 tree primarg1, primother;
1739 unsigned correct_width;
1741 if (operand_equal_p (arg0, arg1, 0))
1744 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1747 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1748 actual comparison operand, ARG0.
1750 First throw away any conversions to wider types
1751 already present in the operands. */
1753 primarg1 = get_narrower (arg1, &unsignedp1);
1754 primother = get_narrower (other, &unsignedpo);
1756 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1757 if (unsignedp1 == unsignedpo
1758 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1759 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1761 tree type = TREE_TYPE (arg0);
1763 /* Make sure shorter operand is extended the right way
1764 to match the longer operand. */
1765 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1766 TREE_TYPE (primarg1)),
1769 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1776 /* See if ARG is an expression that is either a comparison or is performing
1777 arithmetic on comparisons. The comparisons must only be comparing
1778 two different values, which will be stored in *CVAL1 and *CVAL2; if
1779 they are non-zero it means that some operands have already been found.
1780 No variables may be used anywhere else in the expression except in the
1781 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1782 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1784 If this is true, return 1. Otherwise, return zero. */
1787 twoval_comparison_p (arg, cval1, cval2, save_p)
1789 tree *cval1, *cval2;
1792 enum tree_code code = TREE_CODE (arg);
1793 char class = TREE_CODE_CLASS (code);
1795 /* We can handle some of the 'e' cases here. */
1796 if (class == 'e' && code == TRUTH_NOT_EXPR)
1798 else if (class == 'e'
1799 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1800 || code == COMPOUND_EXPR))
1803 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1804 the expression. There may be no way to make this work, but it needs
1805 to be looked at again for 2.6. */
1807 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1809 /* If we've already found a CVAL1 or CVAL2, this expression is
1810 two complex to handle. */
1811 if (*cval1 || *cval2)
1822 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1825 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1826 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1827 cval1, cval2, save_p));
1833 if (code == COND_EXPR)
1834 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1835 cval1, cval2, save_p)
1836 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1837 cval1, cval2, save_p)
1838 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1839 cval1, cval2, save_p));
1843 /* First see if we can handle the first operand, then the second. For
1844 the second operand, we know *CVAL1 can't be zero. It must be that
1845 one side of the comparison is each of the values; test for the
1846 case where this isn't true by failing if the two operands
1849 if (operand_equal_p (TREE_OPERAND (arg, 0),
1850 TREE_OPERAND (arg, 1), 0))
1854 *cval1 = TREE_OPERAND (arg, 0);
1855 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1857 else if (*cval2 == 0)
1858 *cval2 = TREE_OPERAND (arg, 0);
1859 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1864 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1866 else if (*cval2 == 0)
1867 *cval2 = TREE_OPERAND (arg, 1);
1868 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1879 /* ARG is a tree that is known to contain just arithmetic operations and
1880 comparisons. Evaluate the operations in the tree substituting NEW0 for
1881 any occurrence of OLD0 as an operand of a comparison and likewise for
1885 eval_subst (arg, old0, new0, old1, new1)
1887 tree old0, new0, old1, new1;
1889 tree type = TREE_TYPE (arg);
1890 enum tree_code code = TREE_CODE (arg);
1891 char class = TREE_CODE_CLASS (code);
1893 /* We can handle some of the 'e' cases here. */
1894 if (class == 'e' && code == TRUTH_NOT_EXPR)
1896 else if (class == 'e'
1897 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1903 return fold (build1 (code, type,
1904 eval_subst (TREE_OPERAND (arg, 0),
1905 old0, new0, old1, new1)));
1908 return fold (build (code, type,
1909 eval_subst (TREE_OPERAND (arg, 0),
1910 old0, new0, old1, new1),
1911 eval_subst (TREE_OPERAND (arg, 1),
1912 old0, new0, old1, new1)));
1918 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1921 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1924 return fold (build (code, type,
1925 eval_subst (TREE_OPERAND (arg, 0),
1926 old0, new0, old1, new1),
1927 eval_subst (TREE_OPERAND (arg, 1),
1928 old0, new0, old1, new1),
1929 eval_subst (TREE_OPERAND (arg, 2),
1930 old0, new0, old1, new1)));
1935 tree arg0 = TREE_OPERAND (arg, 0);
1936 tree arg1 = TREE_OPERAND (arg, 1);
1938 /* We need to check both for exact equality and tree equality. The
1939 former will be true if the operand has a side-effect. In that
1940 case, we know the operand occurred exactly once. */
1942 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1944 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1947 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1949 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1952 return fold (build (code, type, arg0, arg1));
1959 /* Return a tree for the case when the result of an expression is RESULT
1960 converted to TYPE and OMITTED was previously an operand of the expression
1961 but is now not needed (e.g., we folded OMITTED * 0).
1963 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1964 the conversion of RESULT to TYPE. */
1967 omit_one_operand (type, result, omitted)
1968 tree type, result, omitted;
1970 tree t = convert (type, result);
1972 if (TREE_SIDE_EFFECTS (omitted))
1973 return build (COMPOUND_EXPR, type, omitted, t);
1975 return non_lvalue (t);
1978 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
1981 pedantic_omit_one_operand (type, result, omitted)
1982 tree type, result, omitted;
1984 tree t = convert (type, result);
1986 if (TREE_SIDE_EFFECTS (omitted))
1987 return build (COMPOUND_EXPR, type, omitted, t);
1989 return pedantic_non_lvalue (t);
1994 /* Return a simplified tree node for the truth-negation of ARG. This
1995 never alters ARG itself. We assume that ARG is an operation that
1996 returns a truth value (0 or 1). */
1999 invert_truthvalue (arg)
2002 tree type = TREE_TYPE (arg);
2003 enum tree_code code = TREE_CODE (arg);
2005 if (code == ERROR_MARK)
2008 /* If this is a comparison, we can simply invert it, except for
2009 floating-point non-equality comparisons, in which case we just
2010 enclose a TRUTH_NOT_EXPR around what we have. */
2012 if (TREE_CODE_CLASS (code) == '<')
2014 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2015 && code != NE_EXPR && code != EQ_EXPR)
2016 return build1 (TRUTH_NOT_EXPR, type, arg);
2018 return build (invert_tree_comparison (code), type,
2019 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2025 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2026 && TREE_INT_CST_HIGH (arg) == 0, 0));
2028 case TRUTH_AND_EXPR:
2029 return build (TRUTH_OR_EXPR, type,
2030 invert_truthvalue (TREE_OPERAND (arg, 0)),
2031 invert_truthvalue (TREE_OPERAND (arg, 1)));
2034 return build (TRUTH_AND_EXPR, type,
2035 invert_truthvalue (TREE_OPERAND (arg, 0)),
2036 invert_truthvalue (TREE_OPERAND (arg, 1)));
2038 case TRUTH_XOR_EXPR:
2039 /* Here we can invert either operand. We invert the first operand
2040 unless the second operand is a TRUTH_NOT_EXPR in which case our
2041 result is the XOR of the first operand with the inside of the
2042 negation of the second operand. */
2044 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2045 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2046 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2048 return build (TRUTH_XOR_EXPR, type,
2049 invert_truthvalue (TREE_OPERAND (arg, 0)),
2050 TREE_OPERAND (arg, 1));
2052 case TRUTH_ANDIF_EXPR:
2053 return build (TRUTH_ORIF_EXPR, type,
2054 invert_truthvalue (TREE_OPERAND (arg, 0)),
2055 invert_truthvalue (TREE_OPERAND (arg, 1)));
2057 case TRUTH_ORIF_EXPR:
2058 return build (TRUTH_ANDIF_EXPR, type,
2059 invert_truthvalue (TREE_OPERAND (arg, 0)),
2060 invert_truthvalue (TREE_OPERAND (arg, 1)));
2062 case TRUTH_NOT_EXPR:
2063 return TREE_OPERAND (arg, 0);
2066 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2067 invert_truthvalue (TREE_OPERAND (arg, 1)),
2068 invert_truthvalue (TREE_OPERAND (arg, 2)));
2071 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2072 invert_truthvalue (TREE_OPERAND (arg, 1)));
2074 case NON_LVALUE_EXPR:
2075 return invert_truthvalue (TREE_OPERAND (arg, 0));
2080 return build1 (TREE_CODE (arg), type,
2081 invert_truthvalue (TREE_OPERAND (arg, 0)));
2084 if (!integer_onep (TREE_OPERAND (arg, 1)))
2086 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2089 return build1 (TRUTH_NOT_EXPR, type, arg);
2091 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2093 return build1 (TRUTH_NOT_EXPR, type, arg);
2096 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2097 operands are another bit-wise operation with a common input. If so,
2098 distribute the bit operations to save an operation and possibly two if
2099 constants are involved. For example, convert
2100 (A | B) & (A | C) into A | (B & C)
2101 Further simplification will occur if B and C are constants.
2103 If this optimization cannot be done, 0 will be returned. */
2106 distribute_bit_expr (code, type, arg0, arg1)
2107 enum tree_code code;
2114 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2115 || TREE_CODE (arg0) == code
2116 || (TREE_CODE (arg0) != BIT_AND_EXPR
2117 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2120 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2122 common = TREE_OPERAND (arg0, 0);
2123 left = TREE_OPERAND (arg0, 1);
2124 right = TREE_OPERAND (arg1, 1);
2126 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2128 common = TREE_OPERAND (arg0, 0);
2129 left = TREE_OPERAND (arg0, 1);
2130 right = TREE_OPERAND (arg1, 0);
2132 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2134 common = TREE_OPERAND (arg0, 1);
2135 left = TREE_OPERAND (arg0, 0);
2136 right = TREE_OPERAND (arg1, 1);
2138 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2140 common = TREE_OPERAND (arg0, 1);
2141 left = TREE_OPERAND (arg0, 0);
2142 right = TREE_OPERAND (arg1, 0);
2147 return fold (build (TREE_CODE (arg0), type, common,
2148 fold (build (code, type, left, right))));
2151 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2152 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2155 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2158 int bitsize, bitpos;
2161 tree result = build (BIT_FIELD_REF, type, inner,
2162 size_int (bitsize), size_int (bitpos));
2164 TREE_UNSIGNED (result) = unsignedp;
2169 /* Optimize a bit-field compare.
2171 There are two cases: First is a compare against a constant and the
2172 second is a comparison of two items where the fields are at the same
2173 bit position relative to the start of a chunk (byte, halfword, word)
2174 large enough to contain it. In these cases we can avoid the shift
2175 implicit in bitfield extractions.
2177 For constants, we emit a compare of the shifted constant with the
2178 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2179 compared. For two fields at the same position, we do the ANDs with the
2180 similar mask and compare the result of the ANDs.
2182 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2183 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2184 are the left and right operands of the comparison, respectively.
2186 If the optimization described above can be done, we return the resulting
2187 tree. Otherwise we return zero. */
2190 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2191 enum tree_code code;
2195 int lbitpos, lbitsize, rbitpos, rbitsize;
2196 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2197 tree type = TREE_TYPE (lhs);
2198 tree signed_type, unsigned_type;
2199 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2200 enum machine_mode lmode, rmode, lnmode, rnmode;
2201 int lunsignedp, runsignedp;
2202 int lvolatilep = 0, rvolatilep = 0;
2203 tree linner, rinner;
2207 /* Get all the information about the extractions being done. If the bit size
2208 if the same as the size of the underlying object, we aren't doing an
2209 extraction at all and so can do nothing. */
2210 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2211 &lunsignedp, &lvolatilep);
2212 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2218 /* If this is not a constant, we can only do something if bit positions,
2219 sizes, and signedness are the same. */
2220 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2221 &rmode, &runsignedp, &rvolatilep);
2223 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2224 || lunsignedp != runsignedp || offset != 0)
2228 /* See if we can find a mode to refer to this field. We should be able to,
2229 but fail if we can't. */
2230 lnmode = get_best_mode (lbitsize, lbitpos,
2231 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2233 if (lnmode == VOIDmode)
2236 /* Set signed and unsigned types of the precision of this mode for the
2238 signed_type = type_for_mode (lnmode, 0);
2239 unsigned_type = type_for_mode (lnmode, 1);
2243 rnmode = get_best_mode (rbitsize, rbitpos,
2244 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2246 if (rnmode == VOIDmode)
2250 /* Compute the bit position and size for the new reference and our offset
2251 within it. If the new reference is the same size as the original, we
2252 won't optimize anything, so return zero. */
2253 lnbitsize = GET_MODE_BITSIZE (lnmode);
2254 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2255 lbitpos -= lnbitpos;
2256 if (lnbitsize == lbitsize)
2261 rnbitsize = GET_MODE_BITSIZE (rnmode);
2262 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2263 rbitpos -= rnbitpos;
2264 if (rnbitsize == rbitsize)
2268 if (BYTES_BIG_ENDIAN)
2269 lbitpos = lnbitsize - lbitsize - lbitpos;
2271 /* Make the mask to be used against the extracted field. */
2272 mask = build_int_2 (~0, ~0);
2273 TREE_TYPE (mask) = unsigned_type;
2274 force_fit_type (mask, 0);
2275 mask = convert (unsigned_type, mask);
2276 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2277 mask = const_binop (RSHIFT_EXPR, mask,
2278 size_int (lnbitsize - lbitsize - lbitpos), 0);
2281 /* If not comparing with constant, just rework the comparison
2283 return build (code, compare_type,
2284 build (BIT_AND_EXPR, unsigned_type,
2285 make_bit_field_ref (linner, unsigned_type,
2286 lnbitsize, lnbitpos, 1),
2288 build (BIT_AND_EXPR, unsigned_type,
2289 make_bit_field_ref (rinner, unsigned_type,
2290 rnbitsize, rnbitpos, 1),
2293 /* Otherwise, we are handling the constant case. See if the constant is too
2294 big for the field. Warn and return a tree of for 0 (false) if so. We do
2295 this not only for its own sake, but to avoid having to test for this
2296 error case below. If we didn't, we might generate wrong code.
2298 For unsigned fields, the constant shifted right by the field length should
2299 be all zero. For signed fields, the high-order bits should agree with
2304 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2305 convert (unsigned_type, rhs),
2306 size_int (lbitsize), 0)))
2308 warning ("comparison is always %s due to width of bitfield",
2309 code == NE_EXPR ? "one" : "zero");
2310 return convert (compare_type,
2312 ? integer_one_node : integer_zero_node));
2317 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2318 size_int (lbitsize - 1), 0);
2319 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2321 warning ("comparison is always %s due to width of bitfield",
2322 code == NE_EXPR ? "one" : "zero");
2323 return convert (compare_type,
2325 ? integer_one_node : integer_zero_node));
2329 /* Single-bit compares should always be against zero. */
2330 if (lbitsize == 1 && ! integer_zerop (rhs))
2332 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2333 rhs = convert (type, integer_zero_node);
2336 /* Make a new bitfield reference, shift the constant over the
2337 appropriate number of bits and mask it with the computed mask
2338 (in case this was a signed field). If we changed it, make a new one. */
2339 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2342 TREE_SIDE_EFFECTS (lhs) = 1;
2343 TREE_THIS_VOLATILE (lhs) = 1;
2346 rhs = fold (const_binop (BIT_AND_EXPR,
2347 const_binop (LSHIFT_EXPR,
2348 convert (unsigned_type, rhs),
2349 size_int (lbitpos), 0),
2352 return build (code, compare_type,
2353 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2357 /* Subroutine for fold_truthop: decode a field reference.
2359 If EXP is a comparison reference, we return the innermost reference.
2361 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2362 set to the starting bit number.
2364 If the innermost field can be completely contained in a mode-sized
2365 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2367 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2368 otherwise it is not changed.
2370 *PUNSIGNEDP is set to the signedness of the field.
2372 *PMASK is set to the mask used. This is either contained in a
2373 BIT_AND_EXPR or derived from the width of the field.
2375 Return 0 if this is not a component reference or is one that we can't
2376 do anything with. */
2379 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2382 int *pbitsize, *pbitpos;
2383 enum machine_mode *pmode;
2384 int *punsignedp, *pvolatilep;
2388 tree mask, inner, offset;
2392 /* All the optimizations using this function assume integer fields.
2393 There are problems with FP fields since the type_for_size call
2394 below can fail for, e.g., XFmode. */
2395 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2400 if (TREE_CODE (exp) == BIT_AND_EXPR)
2402 and_mask = TREE_OPERAND (exp, 1);
2403 exp = TREE_OPERAND (exp, 0);
2404 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2405 if (TREE_CODE (and_mask) != INTEGER_CST)
2410 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2411 punsignedp, pvolatilep);
2412 if ((inner == exp && and_mask == 0)
2413 || *pbitsize < 0 || offset != 0)
2416 /* Compute the mask to access the bitfield. */
2417 unsigned_type = type_for_size (*pbitsize, 1);
2418 precision = TYPE_PRECISION (unsigned_type);
2420 mask = build_int_2 (~0, ~0);
2421 TREE_TYPE (mask) = unsigned_type;
2422 force_fit_type (mask, 0);
2423 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2424 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2426 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2428 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2429 convert (unsigned_type, and_mask), mask));
2435 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2439 all_ones_mask_p (mask, size)
2443 tree type = TREE_TYPE (mask);
2444 int precision = TYPE_PRECISION (type);
2447 tmask = build_int_2 (~0, ~0);
2448 TREE_TYPE (tmask) = signed_type (type);
2449 force_fit_type (tmask, 0);
2451 tree_int_cst_equal (mask,
2452 const_binop (RSHIFT_EXPR,
2453 const_binop (LSHIFT_EXPR, tmask,
2454 size_int (precision - size),
2456 size_int (precision - size), 0));
2459 /* Subroutine for fold_truthop: determine if an operand is simple enough
2460 to be evaluated unconditionally. */
2463 simple_operand_p (exp)
2466 /* Strip any conversions that don't change the machine mode. */
2467 while ((TREE_CODE (exp) == NOP_EXPR
2468 || TREE_CODE (exp) == CONVERT_EXPR)
2469 && (TYPE_MODE (TREE_TYPE (exp))
2470 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2471 exp = TREE_OPERAND (exp, 0);
2473 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2474 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2475 && ! TREE_ADDRESSABLE (exp)
2476 && ! TREE_THIS_VOLATILE (exp)
2477 && ! DECL_NONLOCAL (exp)
2478 /* Don't regard global variables as simple. They may be
2479 allocated in ways unknown to the compiler (shared memory,
2480 #pragma weak, etc). */
2481 && ! TREE_PUBLIC (exp)
2482 && ! DECL_EXTERNAL (exp)
2483 /* Loading a static variable is unduly expensive, but global
2484 registers aren't expensive. */
2485 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2488 /* Subroutine for fold_truthop: try to optimize a range test.
2490 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2492 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2493 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2494 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2497 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2498 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2499 larger than HI_CST (they may be equal).
2501 We return the simplified tree or 0 if no optimization is possible. */
2504 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2505 enum tree_code jcode, lo_code, hi_code;
2506 tree type, var, lo_cst, hi_cst;
2509 enum tree_code rcode;
2511 /* See if this is a range test and normalize the constant terms. */
2513 if (jcode == TRUTH_AND_EXPR)
2518 /* See if we have VAR != CST && VAR != CST+1. */
2519 if (! (hi_code == NE_EXPR
2520 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2521 && tree_int_cst_equal (integer_one_node,
2522 const_binop (MINUS_EXPR,
2523 hi_cst, lo_cst, 0))))
2531 if (hi_code == LT_EXPR)
2532 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2533 else if (hi_code != LE_EXPR)
2536 if (lo_code == GT_EXPR)
2537 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2539 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2552 /* See if we have VAR == CST || VAR == CST+1. */
2553 if (! (hi_code == EQ_EXPR
2554 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2555 && tree_int_cst_equal (integer_one_node,
2556 const_binop (MINUS_EXPR,
2557 hi_cst, lo_cst, 0))))
2565 if (hi_code == GE_EXPR)
2566 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2567 else if (hi_code != GT_EXPR)
2570 if (lo_code == LE_EXPR)
2571 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2573 /* We now have VAR < LO_CST || VAR > HI_CST. */
2582 /* When normalizing, it is possible to both increment the smaller constant
2583 and decrement the larger constant. See if they are still ordered. */
2584 if (tree_int_cst_lt (hi_cst, lo_cst))
2587 /* Fail if VAR isn't an integer. */
2588 utype = TREE_TYPE (var);
2589 if (! INTEGRAL_TYPE_P (utype))
2592 /* The range test is invalid if subtracting the two constants results
2593 in overflow. This can happen in traditional mode. */
2594 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2595 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2598 if (! TREE_UNSIGNED (utype))
2600 utype = unsigned_type (utype);
2601 var = convert (utype, var);
2602 lo_cst = convert (utype, lo_cst);
2603 hi_cst = convert (utype, hi_cst);
2606 return fold (convert (type,
2607 build (rcode, utype,
2608 build (MINUS_EXPR, utype, var, lo_cst),
2609 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2612 /* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P
2613 bit value. Arrange things so the extra bits will be set to zero if and]
2614 only if C is signed-extended to its full width. */
2617 unextend (c, p, unsignedp)
2622 tree type = TREE_TYPE (c);
2623 int modesize = GET_MODE_BITSIZE (TYPE_MODE (type));
2626 if (p == modesize || unsignedp)
2629 if (TREE_UNSIGNED (type))
2630 c = convert (signed_type (type), c);
2632 /* We work by getting just the sign bit into the low-order bit, then
2633 into the high-order bit, then sign-extened. We then XOR that value
2635 temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
2636 temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
2637 temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
2638 temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
2639 return convert (type, const_binop (BIT_XOR_EXPR, c, temp, 0));
2642 /* Find ways of folding logical expressions of LHS and RHS:
2643 Try to merge two comparisons to the same innermost item.
2644 Look for range tests like "ch >= '0' && ch <= '9'".
2645 Look for combinations of simple terms on machines with expensive branches
2646 and evaluate the RHS unconditionally.
2648 For example, if we have p->a == 2 && p->b == 4 and we can make an
2649 object large enough to span both A and B, we can do this with a comparison
2650 against the object ANDed with the a mask.
2652 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2653 operations to do this with one comparison.
2655 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2656 function and the one above.
2658 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2659 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2661 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2664 We return the simplified tree or 0 if no optimization is possible. */
2667 fold_truthop (code, truth_type, lhs, rhs)
2668 enum tree_code code;
2669 tree truth_type, lhs, rhs;
2671 /* If this is the "or" of two comparisons, we can do something if we
2672 the comparisons are NE_EXPR. If this is the "and", we can do something
2673 if the comparisons are EQ_EXPR. I.e.,
2674 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2676 WANTED_CODE is this operation code. For single bit fields, we can
2677 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2678 comparison for one-bit fields. */
2680 enum tree_code wanted_code;
2681 enum tree_code lcode, rcode;
2682 tree ll_arg, lr_arg, rl_arg, rr_arg;
2683 tree ll_inner, lr_inner, rl_inner, rr_inner;
2684 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2685 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2686 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2687 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2688 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2689 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2690 enum machine_mode lnmode, rnmode;
2691 tree ll_mask, lr_mask, rl_mask, rr_mask;
2692 tree l_const, r_const;
2694 int first_bit, end_bit;
2697 /* Start by getting the comparison codes and seeing if this looks like
2698 a range test. Fail if anything is volatile. If one operand is a
2699 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2702 if (TREE_SIDE_EFFECTS (lhs)
2703 || TREE_SIDE_EFFECTS (rhs))
2706 lcode = TREE_CODE (lhs);
2707 rcode = TREE_CODE (rhs);
2709 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2710 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2712 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2713 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2715 if (TREE_CODE_CLASS (lcode) != '<'
2716 || TREE_CODE_CLASS (rcode) != '<')
2719 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2720 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2722 ll_arg = TREE_OPERAND (lhs, 0);
2723 lr_arg = TREE_OPERAND (lhs, 1);
2724 rl_arg = TREE_OPERAND (rhs, 0);
2725 rr_arg = TREE_OPERAND (rhs, 1);
2727 if (TREE_CODE (lr_arg) == INTEGER_CST
2728 && TREE_CODE (rr_arg) == INTEGER_CST
2729 && operand_equal_p (ll_arg, rl_arg, 0))
2731 if (tree_int_cst_lt (lr_arg, rr_arg))
2732 result = range_test (code, truth_type, lcode, rcode,
2733 ll_arg, lr_arg, rr_arg);
2735 result = range_test (code, truth_type, rcode, lcode,
2736 ll_arg, rr_arg, lr_arg);
2738 /* If this isn't a range test, it also isn't a comparison that
2739 can be merged. However, it wins to evaluate the RHS unconditionally
2740 on machines with expensive branches. */
2742 if (result == 0 && BRANCH_COST >= 2)
2744 if (TREE_CODE (ll_arg) != VAR_DECL
2745 && TREE_CODE (ll_arg) != PARM_DECL)
2747 /* Avoid evaluating the variable part twice. */
2748 ll_arg = save_expr (ll_arg);
2749 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2750 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2752 return build (code, truth_type, lhs, rhs);
2757 /* If the RHS can be evaluated unconditionally and its operands are
2758 simple, it wins to evaluate the RHS unconditionally on machines
2759 with expensive branches. In this case, this isn't a comparison
2760 that can be merged. */
2762 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2763 are with zero (tmw). */
2765 if (BRANCH_COST >= 2
2766 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2767 && simple_operand_p (rl_arg)
2768 && simple_operand_p (rr_arg))
2769 return build (code, truth_type, lhs, rhs);
2771 /* See if the comparisons can be merged. Then get all the parameters for
2774 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2775 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2779 ll_inner = decode_field_reference (ll_arg,
2780 &ll_bitsize, &ll_bitpos, &ll_mode,
2781 &ll_unsignedp, &volatilep, &ll_mask);
2782 lr_inner = decode_field_reference (lr_arg,
2783 &lr_bitsize, &lr_bitpos, &lr_mode,
2784 &lr_unsignedp, &volatilep, &lr_mask);
2785 rl_inner = decode_field_reference (rl_arg,
2786 &rl_bitsize, &rl_bitpos, &rl_mode,
2787 &rl_unsignedp, &volatilep, &rl_mask);
2788 rr_inner = decode_field_reference (rr_arg,
2789 &rr_bitsize, &rr_bitpos, &rr_mode,
2790 &rr_unsignedp, &volatilep, &rr_mask);
2792 /* It must be true that the inner operation on the lhs of each
2793 comparison must be the same if we are to be able to do anything.
2794 Then see if we have constants. If not, the same must be true for
2796 if (volatilep || ll_inner == 0 || rl_inner == 0
2797 || ! operand_equal_p (ll_inner, rl_inner, 0))
2800 if (TREE_CODE (lr_arg) == INTEGER_CST
2801 && TREE_CODE (rr_arg) == INTEGER_CST)
2802 l_const = lr_arg, r_const = rr_arg;
2803 else if (lr_inner == 0 || rr_inner == 0
2804 || ! operand_equal_p (lr_inner, rr_inner, 0))
2807 l_const = r_const = 0;
2809 /* If either comparison code is not correct for our logical operation,
2810 fail. However, we can convert a one-bit comparison against zero into
2811 the opposite comparison against that bit being set in the field. */
2813 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2814 if (lcode != wanted_code)
2816 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2822 if (rcode != wanted_code)
2824 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2830 /* See if we can find a mode that contains both fields being compared on
2831 the left. If we can't, fail. Otherwise, update all constants and masks
2832 to be relative to a field of that size. */
2833 first_bit = MIN (ll_bitpos, rl_bitpos);
2834 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2835 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2836 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2838 if (lnmode == VOIDmode)
2841 lnbitsize = GET_MODE_BITSIZE (lnmode);
2842 lnbitpos = first_bit & ~ (lnbitsize - 1);
2843 type = type_for_size (lnbitsize, 1);
2844 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2846 if (BYTES_BIG_ENDIAN)
2848 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2849 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2852 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2853 size_int (xll_bitpos), 0);
2854 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2855 size_int (xrl_bitpos), 0);
2859 l_const = convert (type, unextend (l_const, ll_bitsize, ll_unsignedp));
2860 l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
2861 if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
2862 fold (build1 (BIT_NOT_EXPR,
2866 warning ("comparison is always %s",
2867 wanted_code == NE_EXPR ? "one" : "zero");
2869 return convert (truth_type,
2870 wanted_code == NE_EXPR
2871 ? integer_one_node : integer_zero_node);
2876 r_const = convert (type, unextend (r_const, rl_bitsize, rl_unsignedp));
2877 r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
2878 if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
2879 fold (build1 (BIT_NOT_EXPR,
2883 warning ("comparison is always %s",
2884 wanted_code == NE_EXPR ? "one" : "zero");
2886 return convert (truth_type,
2887 wanted_code == NE_EXPR
2888 ? integer_one_node : integer_zero_node);
2892 /* If the right sides are not constant, do the same for it. Also,
2893 disallow this optimization if a size or signedness mismatch occurs
2894 between the left and right sides. */
2897 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2898 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2899 /* Make sure the two fields on the right
2900 correspond to the left without being swapped. */
2901 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2904 first_bit = MIN (lr_bitpos, rr_bitpos);
2905 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2906 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2907 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2909 if (rnmode == VOIDmode)
2912 rnbitsize = GET_MODE_BITSIZE (rnmode);
2913 rnbitpos = first_bit & ~ (rnbitsize - 1);
2914 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2916 if (BYTES_BIG_ENDIAN)
2918 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2919 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2922 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2923 size_int (xlr_bitpos), 0);
2924 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2925 size_int (xrr_bitpos), 0);
2927 /* Make a mask that corresponds to both fields being compared.
2928 Do this for both items being compared. If the masks agree,
2929 we can do this by masking both and comparing the masked
2931 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2932 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2933 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2935 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2936 ll_unsignedp || rl_unsignedp);
2937 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2938 lr_unsignedp || rr_unsignedp);
2939 if (! all_ones_mask_p (ll_mask, lnbitsize))
2941 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2942 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2944 return build (wanted_code, truth_type, lhs, rhs);
2947 /* There is still another way we can do something: If both pairs of
2948 fields being compared are adjacent, we may be able to make a wider
2949 field containing them both. */
2950 if ((ll_bitsize + ll_bitpos == rl_bitpos
2951 && lr_bitsize + lr_bitpos == rr_bitpos)
2952 || (ll_bitpos == rl_bitpos + rl_bitsize
2953 && lr_bitpos == rr_bitpos + rr_bitsize))
2954 return build (wanted_code, truth_type,
2955 make_bit_field_ref (ll_inner, type,
2956 ll_bitsize + rl_bitsize,
2957 MIN (ll_bitpos, rl_bitpos),
2959 make_bit_field_ref (lr_inner, type,
2960 lr_bitsize + rr_bitsize,
2961 MIN (lr_bitpos, rr_bitpos),
2967 /* Handle the case of comparisons with constants. If there is something in
2968 common between the masks, those bits of the constants must be the same.
2969 If not, the condition is always false. Test for this to avoid generating
2970 incorrect code below. */
2971 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2972 if (! integer_zerop (result)
2973 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2974 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2976 if (wanted_code == NE_EXPR)
2978 warning ("`or' of unmatched not-equal tests is always 1");
2979 return convert (truth_type, integer_one_node);
2983 warning ("`and' of mutually exclusive equal-tests is always zero");
2984 return convert (truth_type, integer_zero_node);
2988 /* Construct the expression we will return. First get the component
2989 reference we will make. Unless the mask is all ones the width of
2990 that field, perform the mask operation. Then compare with the
2992 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2993 ll_unsignedp || rl_unsignedp);
2995 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2996 if (! all_ones_mask_p (ll_mask, lnbitsize))
2997 result = build (BIT_AND_EXPR, type, result, ll_mask);
2999 return build (wanted_code, truth_type, result,
3000 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
3003 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
3004 S, a SAVE_EXPR, return the expression actually being evaluated. Note
3005 that we may sometimes modify the tree. */
3008 strip_compound_expr (t, s)
3012 tree type = TREE_TYPE (t);
3013 enum tree_code code = TREE_CODE (t);
3015 /* See if this is the COMPOUND_EXPR we want to eliminate. */
3016 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
3017 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
3018 return TREE_OPERAND (t, 1);
3020 /* See if this is a COND_EXPR or a simple arithmetic operator. We
3021 don't bother handling any other types. */
3022 else if (code == COND_EXPR)
3024 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3025 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3026 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
3028 else if (TREE_CODE_CLASS (code) == '1')
3029 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3030 else if (TREE_CODE_CLASS (code) == '<'
3031 || TREE_CODE_CLASS (code) == '2')
3033 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3034 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3040 /* Perform constant folding and related simplification of EXPR.
3041 The related simplifications include x*1 => x, x*0 => 0, etc.,
3042 and application of the associative law.
3043 NOP_EXPR conversions may be removed freely (as long as we
3044 are careful not to change the C type of the overall expression)
3045 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3046 but we can constant-fold them if they have constant operands. */
3052 register tree t = expr;
3053 tree t1 = NULL_TREE;
3055 tree type = TREE_TYPE (expr);
3056 register tree arg0, arg1;
3057 register enum tree_code code = TREE_CODE (t);
3061 /* WINS will be nonzero when the switch is done
3062 if all operands are constant. */
3066 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3067 if (code == RTL_EXPR)
3070 /* Return right away if already constant. */
3071 if (TREE_CONSTANT (t))
3073 if (code == CONST_DECL)
3074 return DECL_INITIAL (t);
3078 kind = TREE_CODE_CLASS (code);
3079 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3083 /* Special case for conversion ops that can have fixed point args. */
3084 arg0 = TREE_OPERAND (t, 0);
3086 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3088 STRIP_TYPE_NOPS (arg0);
3090 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3091 subop = TREE_REALPART (arg0);
3095 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3096 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3097 && TREE_CODE (subop) != REAL_CST
3098 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3100 /* Note that TREE_CONSTANT isn't enough:
3101 static var addresses are constant but we can't
3102 do arithmetic on them. */
3105 else if (kind == 'e' || kind == '<'
3106 || kind == '1' || kind == '2' || kind == 'r')
3108 register int len = tree_code_length[(int) code];
3110 for (i = 0; i < len; i++)
3112 tree op = TREE_OPERAND (t, i);
3116 continue; /* Valid for CALL_EXPR, at least. */
3118 if (kind == '<' || code == RSHIFT_EXPR)
3120 /* Signedness matters here. Perhaps we can refine this
3122 STRIP_TYPE_NOPS (op);
3126 /* Strip any conversions that don't change the mode. */
3130 if (TREE_CODE (op) == COMPLEX_CST)
3131 subop = TREE_REALPART (op);
3135 if (TREE_CODE (subop) != INTEGER_CST
3136 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3137 && TREE_CODE (subop) != REAL_CST
3138 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3140 /* Note that TREE_CONSTANT isn't enough:
3141 static var addresses are constant but we can't
3142 do arithmetic on them. */
3152 /* If this is a commutative operation, and ARG0 is a constant, move it
3153 to ARG1 to reduce the number of tests below. */
3154 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3155 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3156 || code == BIT_AND_EXPR)
3157 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3159 tem = arg0; arg0 = arg1; arg1 = tem;
3161 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3162 TREE_OPERAND (t, 1) = tem;
3165 /* Now WINS is set as described above,
3166 ARG0 is the first operand of EXPR,
3167 and ARG1 is the second operand (if it has more than one operand).
3169 First check for cases where an arithmetic operation is applied to a
3170 compound, conditional, or comparison operation. Push the arithmetic
3171 operation inside the compound or conditional to see if any folding
3172 can then be done. Convert comparison to conditional for this purpose.
3173 The also optimizes non-constant cases that used to be done in
3176 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3177 one of the operands is a comparison and the other is a comparison, a
3178 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3179 code below would make the expression more complex. Change it to a
3180 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3181 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3183 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3184 || code == EQ_EXPR || code == NE_EXPR)
3185 && ((truth_value_p (TREE_CODE (arg0))
3186 && (truth_value_p (TREE_CODE (arg1))
3187 || (TREE_CODE (arg1) == BIT_AND_EXPR
3188 && integer_onep (TREE_OPERAND (arg1, 1)))))
3189 || (truth_value_p (TREE_CODE (arg1))
3190 && (truth_value_p (TREE_CODE (arg0))
3191 || (TREE_CODE (arg0) == BIT_AND_EXPR
3192 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3194 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3195 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3199 if (code == EQ_EXPR)
3200 t = invert_truthvalue (t);
3205 if (TREE_CODE_CLASS (code) == '1')
3207 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3208 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3209 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3210 else if (TREE_CODE (arg0) == COND_EXPR)
3212 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3213 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3214 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3216 /* If this was a conversion, and all we did was to move into
3217 inside the COND_EXPR, bring it back out. But leave it if
3218 it is a conversion from integer to integer and the
3219 result precision is no wider than a word since such a
3220 conversion is cheap and may be optimized away by combine,
3221 while it couldn't if it were outside the COND_EXPR. Then return
3222 so we don't get into an infinite recursion loop taking the
3223 conversion out and then back in. */
3225 if ((code == NOP_EXPR || code == CONVERT_EXPR
3226 || code == NON_LVALUE_EXPR)
3227 && TREE_CODE (t) == COND_EXPR
3228 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3229 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3230 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3231 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
3232 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
3233 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
3234 && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
3235 t = build1 (code, type,
3237 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3238 TREE_OPERAND (t, 0),
3239 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3240 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3243 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3244 return fold (build (COND_EXPR, type, arg0,
3245 fold (build1 (code, type, integer_one_node)),
3246 fold (build1 (code, type, integer_zero_node))));
3248 else if (TREE_CODE_CLASS (code) == '2'
3249 || TREE_CODE_CLASS (code) == '<')
3251 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3252 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3253 fold (build (code, type,
3254 arg0, TREE_OPERAND (arg1, 1))));
3255 else if (TREE_CODE (arg1) == COND_EXPR
3256 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3258 tree test, true_value, false_value;
3260 if (TREE_CODE (arg1) == COND_EXPR)
3262 test = TREE_OPERAND (arg1, 0);
3263 true_value = TREE_OPERAND (arg1, 1);
3264 false_value = TREE_OPERAND (arg1, 2);
3269 true_value = integer_one_node;
3270 false_value = integer_zero_node;
3273 /* If ARG0 is complex we want to make sure we only evaluate
3274 it once. Though this is only required if it is volatile, it
3275 might be more efficient even if it is not. However, if we
3276 succeed in folding one part to a constant, we do not need
3277 to make this SAVE_EXPR. Since we do this optimization
3278 primarily to see if we do end up with constant and this
3279 SAVE_EXPR interfers with later optimizations, suppressing
3280 it when we can is important. */
3282 if (TREE_CODE (arg0) != SAVE_EXPR
3283 && ((TREE_CODE (arg0) != VAR_DECL
3284 && TREE_CODE (arg0) != PARM_DECL)
3285 || TREE_SIDE_EFFECTS (arg0)))
3287 tree lhs = fold (build (code, type, arg0, true_value));
3288 tree rhs = fold (build (code, type, arg0, false_value));
3290 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3291 return fold (build (COND_EXPR, type, test, lhs, rhs));
3293 arg0 = save_expr (arg0);
3296 test = fold (build (COND_EXPR, type, test,
3297 fold (build (code, type, arg0, true_value)),
3298 fold (build (code, type, arg0, false_value))));
3299 if (TREE_CODE (arg0) == SAVE_EXPR)
3300 return build (COMPOUND_EXPR, type,
3301 convert (void_type_node, arg0),
3302 strip_compound_expr (test, arg0));
3304 return convert (type, test);
3307 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3308 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3309 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3310 else if (TREE_CODE (arg0) == COND_EXPR
3311 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3313 tree test, true_value, false_value;
3315 if (TREE_CODE (arg0) == COND_EXPR)
3317 test = TREE_OPERAND (arg0, 0);
3318 true_value = TREE_OPERAND (arg0, 1);
3319 false_value = TREE_OPERAND (arg0, 2);
3324 true_value = integer_one_node;
3325 false_value = integer_zero_node;
3328 if (TREE_CODE (arg1) != SAVE_EXPR
3329 && ((TREE_CODE (arg1) != VAR_DECL
3330 && TREE_CODE (arg1) != PARM_DECL)
3331 || TREE_SIDE_EFFECTS (arg1)))
3333 tree lhs = fold (build (code, type, true_value, arg1));
3334 tree rhs = fold (build (code, type, false_value, arg1));
3336 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
3337 || TREE_CONSTANT (arg1))
3338 return fold (build (COND_EXPR, type, test, lhs, rhs));
3340 arg1 = save_expr (arg1);
3343 test = fold (build (COND_EXPR, type, test,
3344 fold (build (code, type, true_value, arg1)),
3345 fold (build (code, type, false_value, arg1))));
3346 if (TREE_CODE (arg1) == SAVE_EXPR)
3347 return build (COMPOUND_EXPR, type,
3348 convert (void_type_node, arg1),
3349 strip_compound_expr (test, arg1));
3351 return convert (type, test);
3354 else if (TREE_CODE_CLASS (code) == '<'
3355 && TREE_CODE (arg0) == COMPOUND_EXPR)
3356 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3357 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3358 else if (TREE_CODE_CLASS (code) == '<'
3359 && TREE_CODE (arg1) == COMPOUND_EXPR)
3360 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3361 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3373 return fold (DECL_INITIAL (t));
3378 case FIX_TRUNC_EXPR:
3379 /* Other kinds of FIX are not handled properly by fold_convert. */
3381 if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
3382 return TREE_OPERAND (t, 0);
3384 /* Handle cases of two conversions in a row. */
3385 if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3386 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3388 tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3389 tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0));
3390 tree final_type = TREE_TYPE (t);
3391 int inside_int = INTEGRAL_TYPE_P (inside_type);
3392 int inside_ptr = POINTER_TYPE_P (inside_type);
3393 int inside_float = FLOAT_TYPE_P (inside_type);
3394 int inside_prec = TYPE_PRECISION (inside_type);
3395 int inside_unsignedp = TREE_UNSIGNED (inside_type);
3396 int inter_int = INTEGRAL_TYPE_P (inter_type);
3397 int inter_ptr = POINTER_TYPE_P (inter_type);
3398 int inter_float = FLOAT_TYPE_P (inter_type);
3399 int inter_prec = TYPE_PRECISION (inter_type);
3400 int inter_unsignedp = TREE_UNSIGNED (inter_type);
3401 int final_int = INTEGRAL_TYPE_P (final_type);
3402 int final_ptr = POINTER_TYPE_P (final_type);
3403 int final_float = FLOAT_TYPE_P (final_type);
3404 int final_prec = TYPE_PRECISION (final_type);
3405 int final_unsignedp = TREE_UNSIGNED (final_type);
3407 /* In addition to the cases of two conversions in a row
3408 handled below, if we are converting something to its own
3409 type via an object of identical or wider precision, neither
3410 conversion is needed. */
3411 if (inside_type == final_type
3412 && ((inter_int && final_int) || (inter_float && final_float))
3413 && inter_prec >= final_prec)
3414 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3416 /* Likewise, if the intermediate and final types are either both
3417 float or both integer, we don't need the middle conversion if
3418 it is wider than the final type and doesn't change the signedness
3419 (for integers). Avoid this if the final type is a pointer
3420 since then we sometimes need the inner conversion. */
3421 if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
3422 || (inter_float && inside_float))
3423 && inter_prec >= inside_prec
3424 && (inter_float || inter_unsignedp == inside_unsignedp)
3426 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3428 /* Two conversions in a row are not needed unless:
3429 - some conversion is floating-point (overstrict for now), or
3430 - the intermediate type is narrower than both initial and
3432 - the intermediate type and innermost type differ in signedness,
3433 and the outermost type is wider than the intermediate, or
3434 - the initial type is a pointer type and the precisions of the
3435 intermediate and final types differ, or
3436 - the final type is a pointer type and the precisions of the
3437 initial and intermediate types differ. */
3438 if (! inside_float && ! inter_float && ! final_float
3439 && (inter_prec > inside_prec || inter_prec > final_prec)
3440 && ! (inside_int && inter_int
3441 && inter_unsignedp != inside_unsignedp
3442 && inter_prec < final_prec)
3443 && ((inter_unsignedp && inter_prec > inside_prec)
3444 == (final_unsignedp && final_prec > inter_prec))
3445 && ! (inside_ptr && inter_prec != final_prec)
3446 && ! (final_ptr && inside_prec != inter_prec))
3447 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3450 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3451 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3452 /* Detect assigning a bitfield. */
3453 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3454 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3456 /* Don't leave an assignment inside a conversion
3457 unless assigning a bitfield. */
3458 tree prev = TREE_OPERAND (t, 0);
3459 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3460 /* First do the assignment, then return converted constant. */
3461 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3467 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3470 return fold_convert (t, arg0);
3472 #if 0 /* This loses on &"foo"[0]. */
3477 /* Fold an expression like: "foo"[2] */
3478 if (TREE_CODE (arg0) == STRING_CST
3479 && TREE_CODE (arg1) == INTEGER_CST
3480 && !TREE_INT_CST_HIGH (arg1)
3481 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3483 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3484 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3485 force_fit_type (t, 0);
3492 if (TREE_CODE (arg0) == CONSTRUCTOR)
3494 tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
3501 TREE_CONSTANT (t) = wins;
3507 if (TREE_CODE (arg0) == INTEGER_CST)
3509 HOST_WIDE_INT low, high;
3510 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3511 TREE_INT_CST_HIGH (arg0),
3513 t = build_int_2 (low, high);
3514 TREE_TYPE (t) = type;
3516 = (TREE_OVERFLOW (arg0)
3517 | force_fit_type (t, overflow));
3518 TREE_CONSTANT_OVERFLOW (t)
3519 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3521 else if (TREE_CODE (arg0) == REAL_CST)
3522 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3523 TREE_TYPE (t) = type;
3525 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3526 return TREE_OPERAND (arg0, 0);
3528 /* Convert - (a - b) to (b - a) for non-floating-point. */
3529 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3530 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3531 TREE_OPERAND (arg0, 0));
3538 if (TREE_CODE (arg0) == INTEGER_CST)
3540 if (! TREE_UNSIGNED (type)
3541 && TREE_INT_CST_HIGH (arg0) < 0)
3543 HOST_WIDE_INT low, high;
3544 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3545 TREE_INT_CST_HIGH (arg0),
3547 t = build_int_2 (low, high);
3548 TREE_TYPE (t) = type;
3550 = (TREE_OVERFLOW (arg0)
3551 | force_fit_type (t, overflow));
3552 TREE_CONSTANT_OVERFLOW (t)
3553 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3556 else if (TREE_CODE (arg0) == REAL_CST)
3558 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3559 t = build_real (type,
3560 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3562 TREE_TYPE (t) = type;
3564 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3565 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3569 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3571 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3572 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3573 TREE_OPERAND (arg0, 0),
3574 fold (build1 (NEGATE_EXPR,
3575 TREE_TYPE (TREE_TYPE (arg0)),
3576 TREE_OPERAND (arg0, 1))));
3577 else if (TREE_CODE (arg0) == COMPLEX_CST)
3578 return build_complex (TREE_OPERAND (arg0, 0),
3579 fold (build1 (NEGATE_EXPR,
3580 TREE_TYPE (TREE_TYPE (arg0)),
3581 TREE_OPERAND (arg0, 1))));
3582 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3583 return fold (build (TREE_CODE (arg0), type,
3584 fold (build1 (CONJ_EXPR, type,
3585 TREE_OPERAND (arg0, 0))),
3586 fold (build1 (CONJ_EXPR,
3587 type, TREE_OPERAND (arg0, 1)))));
3588 else if (TREE_CODE (arg0) == CONJ_EXPR)
3589 return TREE_OPERAND (arg0, 0);
3595 if (TREE_CODE (arg0) == INTEGER_CST)
3596 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3597 ~ TREE_INT_CST_HIGH (arg0));
3598 TREE_TYPE (t) = type;
3599 force_fit_type (t, 0);
3600 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3601 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3603 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3604 return TREE_OPERAND (arg0, 0);
3608 /* A + (-B) -> A - B */
3609 if (TREE_CODE (arg1) == NEGATE_EXPR)
3610 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3611 else if (! FLOAT_TYPE_P (type))
3613 if (integer_zerop (arg1))
3614 return non_lvalue (convert (type, arg0));
3616 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3617 with a constant, and the two constants have no bits in common,
3618 we should treat this as a BIT_IOR_EXPR since this may produce more
3620 if (TREE_CODE (arg0) == BIT_AND_EXPR
3621 && TREE_CODE (arg1) == BIT_AND_EXPR
3622 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3623 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3624 && integer_zerop (const_binop (BIT_AND_EXPR,
3625 TREE_OPERAND (arg0, 1),
3626 TREE_OPERAND (arg1, 1), 0)))
3628 code = BIT_IOR_EXPR;
3632 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3633 about the case where C is a constant, just try one of the
3634 four possibilities. */
3636 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3637 && operand_equal_p (TREE_OPERAND (arg0, 1),
3638 TREE_OPERAND (arg1, 1), 0))
3639 return fold (build (MULT_EXPR, type,
3640 fold (build (PLUS_EXPR, type,
3641 TREE_OPERAND (arg0, 0),
3642 TREE_OPERAND (arg1, 0))),
3643 TREE_OPERAND (arg0, 1)));
3645 /* In IEEE floating point, x+0 may not equal x. */
3646 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3648 && real_zerop (arg1))
3649 return non_lvalue (convert (type, arg0));
3651 /* In most languages, can't associate operations on floats
3652 through parentheses. Rather than remember where the parentheses
3653 were, we don't associate floats at all. It shouldn't matter much.
3654 However, associating multiplications is only very slightly
3655 inaccurate, so do that if -ffast-math is specified. */
3656 if (FLOAT_TYPE_P (type)
3657 && ! (flag_fast_math && code == MULT_EXPR))
3660 /* The varsign == -1 cases happen only for addition and subtraction.
3661 It says that the arg that was split was really CON minus VAR.
3662 The rest of the code applies to all associative operations. */
3668 if (split_tree (arg0, code, &var, &con, &varsign))
3672 /* EXPR is (CON-VAR) +- ARG1. */
3673 /* If it is + and VAR==ARG1, return just CONST. */
3674 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3675 return convert (TREE_TYPE (t), con);
3677 /* If ARG0 is a constant, don't change things around;
3678 instead keep all the constant computations together. */
3680 if (TREE_CONSTANT (arg0))
3683 /* Otherwise return (CON +- ARG1) - VAR. */
3684 t = build (MINUS_EXPR, type,
3685 fold (build (code, type, con, arg1)), var);
3689 /* EXPR is (VAR+CON) +- ARG1. */
3690 /* If it is - and VAR==ARG1, return just CONST. */
3691 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3692 return convert (TREE_TYPE (t), con);
3694 /* If ARG0 is a constant, don't change things around;
3695 instead keep all the constant computations together. */
3697 if (TREE_CONSTANT (arg0))
3700 /* Otherwise return VAR +- (ARG1 +- CON). */
3701 tem = fold (build (code, type, arg1, con));
3702 t = build (code, type, var, tem);
3704 if (integer_zerop (tem)
3705 && (code == PLUS_EXPR || code == MINUS_EXPR))
3706 return convert (type, var);
3707 /* If we have x +/- (c - d) [c an explicit integer]
3708 change it to x -/+ (d - c) since if d is relocatable
3709 then the latter can be a single immediate insn
3710 and the former cannot. */
3711 if (TREE_CODE (tem) == MINUS_EXPR
3712 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3714 tree tem1 = TREE_OPERAND (tem, 1);
3715 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3716 TREE_OPERAND (tem, 0) = tem1;
3718 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3724 if (split_tree (arg1, code, &var, &con, &varsign))
3726 if (TREE_CONSTANT (arg1))
3731 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3733 /* EXPR is ARG0 +- (CON +- VAR). */
3734 if (TREE_CODE (t) == MINUS_EXPR
3735 && operand_equal_p (var, arg0, 0))
3737 /* If VAR and ARG0 cancel, return just CON or -CON. */
3738 if (code == PLUS_EXPR)
3739 return convert (TREE_TYPE (t), con);
3740 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3741 convert (TREE_TYPE (t), con)));
3744 t = build (TREE_CODE (t), type,
3745 fold (build (code, TREE_TYPE (t), arg0, con)), var);
3747 if (integer_zerop (TREE_OPERAND (t, 0))
3748 && TREE_CODE (t) == PLUS_EXPR)
3749 return convert (TREE_TYPE (t), var);
3754 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3755 if (TREE_CODE (arg1) == REAL_CST)
3757 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3759 t1 = const_binop (code, arg0, arg1, 0);
3760 if (t1 != NULL_TREE)
3762 /* The return value should always have
3763 the same type as the original expression. */
3764 TREE_TYPE (t1) = TREE_TYPE (t);
3770 if (! FLOAT_TYPE_P (type))
3772 if (! wins && integer_zerop (arg0))
3773 return build1 (NEGATE_EXPR, type, arg1);
3774 if (integer_zerop (arg1))
3775 return non_lvalue (convert (type, arg0));
3777 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3778 about the case where C is a constant, just try one of the
3779 four possibilities. */
3781 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3782 && operand_equal_p (TREE_OPERAND (arg0, 1),
3783 TREE_OPERAND (arg1, 1), 0))
3784 return fold (build (MULT_EXPR, type,
3785 fold (build (MINUS_EXPR, type,
3786 TREE_OPERAND (arg0, 0),
3787 TREE_OPERAND (arg1, 0))),
3788 TREE_OPERAND (arg0, 1)));
3790 /* Convert A - (-B) to A + B. */
3791 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3792 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3794 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3797 /* Except with IEEE floating point, 0-x equals -x. */
3798 if (! wins && real_zerop (arg0))
3799 return build1 (NEGATE_EXPR, type, arg1);
3800 /* Except with IEEE floating point, x-0 equals x. */
3801 if (real_zerop (arg1))
3802 return non_lvalue (convert (type, arg0));
3805 /* Fold &x - &x. This can happen from &x.foo - &x.
3806 This is unsafe for certain floats even in non-IEEE formats.
3807 In IEEE, it is unsafe because it does wrong for NaNs.
3808 Also note that operand_equal_p is always false if an operand
3811 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
3812 && operand_equal_p (arg0, arg1, 0))
3813 return convert (type, integer_zero_node);
3818 if (! FLOAT_TYPE_P (type))
3820 if (integer_zerop (arg1))
3821 return omit_one_operand (type, arg1, arg0);
3822 if (integer_onep (arg1))
3823 return non_lvalue (convert (type, arg0));
3825 /* ((A / C) * C) is A if the division is an
3826 EXACT_DIV_EXPR. Since C is normally a constant,
3827 just check for one of the four possibilities. */
3829 if (TREE_CODE (arg0) == EXACT_DIV_EXPR
3830 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3831 return TREE_OPERAND (arg0, 0);
3833 /* (a * (1 << b)) is (a << b) */
3834 if (TREE_CODE (arg1) == LSHIFT_EXPR
3835 && integer_onep (TREE_OPERAND (arg1, 0)))
3836 return fold (build (LSHIFT_EXPR, type, arg0,
3837 TREE_OPERAND (arg1, 1)));
3838 if (TREE_CODE (arg0) == LSHIFT_EXPR
3839 && integer_onep (TREE_OPERAND (arg0, 0)))
3840 return fold (build (LSHIFT_EXPR, type, arg1,
3841 TREE_OPERAND (arg0, 1)));
3845 /* x*0 is 0, except for IEEE floating point. */
3846 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3848 && real_zerop (arg1))
3849 return omit_one_operand (type, arg1, arg0);
3850 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3851 However, ANSI says we can drop signals,
3852 so we can do this anyway. */
3853 if (real_onep (arg1))
3854 return non_lvalue (convert (type, arg0));
3856 if (! wins && real_twop (arg1))
3858 tree arg = save_expr (arg0);
3859 return build (PLUS_EXPR, type, arg, arg);
3866 if (integer_all_onesp (arg1))
3867 return omit_one_operand (type, arg1, arg0);
3868 if (integer_zerop (arg1))
3869 return non_lvalue (convert (type, arg0));
3870 t1 = distribute_bit_expr (code, type, arg0, arg1);
3871 if (t1 != NULL_TREE)
3874 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3875 is a rotate of A by C1 bits. */
3877 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3878 || TREE_CODE (arg0) == LSHIFT_EXPR)
3879 && (TREE_CODE (arg1) == RSHIFT_EXPR
3880 || TREE_CODE (arg1) == LSHIFT_EXPR)
3881 && TREE_CODE (arg0) != TREE_CODE (arg1)
3882 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3883 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3884 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3885 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3886 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3887 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3888 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3889 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3890 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3891 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3892 TREE_CODE (arg0) == LSHIFT_EXPR
3893 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3898 if (integer_zerop (arg1))
3899 return non_lvalue (convert (type, arg0));
3900 if (integer_all_onesp (arg1))
3901 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3906 if (integer_all_onesp (arg1))
3907 return non_lvalue (convert (type, arg0));
3908 if (integer_zerop (arg1))
3909 return omit_one_operand (type, arg1, arg0);
3910 t1 = distribute_bit_expr (code, type, arg0, arg1);
3911 if (t1 != NULL_TREE)
3913 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3914 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3915 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3917 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3918 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3919 && (~TREE_INT_CST_LOW (arg0)
3920 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3921 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3923 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3924 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3926 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3927 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3928 && (~TREE_INT_CST_LOW (arg1)
3929 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3930 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3934 case BIT_ANDTC_EXPR:
3935 if (integer_all_onesp (arg0))
3936 return non_lvalue (convert (type, arg1));
3937 if (integer_zerop (arg0))
3938 return omit_one_operand (type, arg0, arg1);
3939 if (TREE_CODE (arg1) == INTEGER_CST)
3941 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3942 code = BIT_AND_EXPR;
3948 /* In most cases, do nothing with a divide by zero. */
3949 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3950 #ifndef REAL_INFINITY
3951 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
3954 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3956 /* In IEEE floating point, x/1 is not equivalent to x for snans.
3957 However, ANSI says we can drop signals, so we can do this anyway. */
3958 if (real_onep (arg1))
3959 return non_lvalue (convert (type, arg0));
3961 /* If ARG1 is a constant, we can convert this to a multiply by the
3962 reciprocal. This does not have the same rounding properties,
3963 so only do this if -ffast-math. We can actually always safely
3964 do it if ARG1 is a power of two, but it's hard to tell if it is
3965 or not in a portable manner. */
3966 if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
3967 && 0 != (tem = const_binop (code, build_real (type, dconst1),
3969 return fold (build (MULT_EXPR, type, arg0, tem));
3973 case TRUNC_DIV_EXPR:
3974 case ROUND_DIV_EXPR:
3975 case FLOOR_DIV_EXPR:
3977 case EXACT_DIV_EXPR:
3978 if (integer_onep (arg1))
3979 return non_lvalue (convert (type, arg0));
3980 if (integer_zerop (arg1))
3983 /* If we have ((a / C1) / C2) where both division are the same type, try
3984 to simplify. First see if C1 * C2 overflows or not. */
3985 if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
3986 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
3990 new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
3991 tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
3993 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
3994 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
3996 /* If no overflow, divide by C1*C2. */
3997 return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
4001 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
4002 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
4003 expressions, which often appear in the offsets or sizes of
4004 objects with a varying size. Only deal with positive divisors
4005 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
4007 Look for NOPs and SAVE_EXPRs inside. */
4009 if (TREE_CODE (arg1) == INTEGER_CST
4010 && tree_int_cst_sgn (arg1) >= 0)
4012 int have_save_expr = 0;
4013 tree c2 = integer_zero_node;
4016 if (TREE_CODE (xarg0) == SAVE_EXPR)
4017 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4021 if (TREE_CODE (xarg0) == PLUS_EXPR
4022 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4023 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4024 else if (TREE_CODE (xarg0) == MINUS_EXPR
4025 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4026 /* If we are doing this computation unsigned, the negate
4028 && ! TREE_UNSIGNED (type))
4030 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4031 xarg0 = TREE_OPERAND (xarg0, 0);
4034 if (TREE_CODE (xarg0) == SAVE_EXPR)
4035 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4039 if (TREE_CODE (xarg0) == MULT_EXPR
4040 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4041 && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
4042 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
4043 TREE_OPERAND (xarg0, 1), arg1, 1))
4044 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
4045 TREE_OPERAND (xarg0, 1), 1)))
4046 && (tree_int_cst_sgn (c2) >= 0
4047 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
4050 tree outer_div = integer_one_node;
4051 tree c1 = TREE_OPERAND (xarg0, 1);
4054 /* If C3 > C1, set them equal and do a divide by
4055 C3/C1 at the end of the operation. */
4056 if (tree_int_cst_lt (c1, c3))
4057 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
4059 /* The result is A * (C1/C3) + (C2/C3). */
4060 t = fold (build (PLUS_EXPR, type,
4061 fold (build (MULT_EXPR, type,
4062 TREE_OPERAND (xarg0, 0),
4063 const_binop (code, c1, c3, 1))),
4064 const_binop (code, c2, c3, 1)));
4066 if (! integer_onep (outer_div))
4067 t = fold (build (code, type, t, convert (type, outer_div)));
4079 case FLOOR_MOD_EXPR:
4080 case ROUND_MOD_EXPR:
4081 case TRUNC_MOD_EXPR:
4082 if (integer_onep (arg1))
4083 return omit_one_operand (type, integer_zero_node, arg0);
4084 if (integer_zerop (arg1))
4087 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4088 where C1 % C3 == 0. Handle similarly to the division case,
4089 but don't bother with SAVE_EXPRs. */
4091 if (TREE_CODE (arg1) == INTEGER_CST
4092 && ! integer_zerop (arg1))
4094 tree c2 = integer_zero_node;
4097 if (TREE_CODE (xarg0) == PLUS_EXPR
4098 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4099 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4100 else if (TREE_CODE (xarg0) == MINUS_EXPR
4101 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4102 && ! TREE_UNSIGNED (type))
4104 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4105 xarg0 = TREE_OPERAND (xarg0, 0);
4110 if (TREE_CODE (xarg0) == MULT_EXPR
4111 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4112 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4113 TREE_OPERAND (xarg0, 1),
4115 && tree_int_cst_sgn (c2) >= 0)
4116 /* The result is (C2%C3). */
4117 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4118 TREE_OPERAND (xarg0, 0));
4127 if (integer_zerop (arg1))
4128 return non_lvalue (convert (type, arg0));
4129 /* Since negative shift count is not well-defined,
4130 don't try to compute it in the compiler. */
4131 if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
4133 /* Rewrite an LROTATE_EXPR by a constant into an
4134 RROTATE_EXPR by a new constant. */
4135 if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST)
4137 TREE_SET_CODE (t, RROTATE_EXPR);
4138 code = RROTATE_EXPR;
4139 TREE_OPERAND (t, 1) = arg1
4142 convert (TREE_TYPE (arg1),
4143 build_int_2 (GET_MODE_BITSIZE (TYPE_MODE (type)), 0)),
4145 if (tree_int_cst_sgn (arg1) < 0)
4149 /* If we have a rotate of a bit operation with the rotate count and
4150 the second operand of the bit operation both constant,
4151 permute the two operations. */
4152 if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
4153 && (TREE_CODE (arg0) == BIT_AND_EXPR
4154 || TREE_CODE (arg0) == BIT_ANDTC_EXPR
4155 || TREE_CODE (arg0) == BIT_IOR_EXPR
4156 || TREE_CODE (arg0) == BIT_XOR_EXPR)
4157 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
4158 return fold (build (TREE_CODE (arg0), type,
4159 fold (build (code, type,
4160 TREE_OPERAND (arg0, 0), arg1)),
4161 fold (build (code, type,
4162 TREE_OPERAND (arg0, 1), arg1))));
4164 /* Two consecutive rotates adding up to the width of the mode can
4166 if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
4167 && TREE_CODE (arg0) == RROTATE_EXPR
4168 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4169 && TREE_INT_CST_HIGH (arg1) == 0
4170 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
4171 && ((TREE_INT_CST_LOW (arg1)
4172 + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
4173 == GET_MODE_BITSIZE (TYPE_MODE (type))))
4174 return TREE_OPERAND (arg0, 0);
4179 if (operand_equal_p (arg0, arg1, 0))
4181 if (INTEGRAL_TYPE_P (type)
4182 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4183 return omit_one_operand (type, arg1, arg0);
4187 if (operand_equal_p (arg0, arg1, 0))
4189 if (INTEGRAL_TYPE_P (type)
4190 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4191 return omit_one_operand (type, arg1, arg0);
4194 case TRUTH_NOT_EXPR:
4195 /* Note that the operand of this must be an int
4196 and its values must be 0 or 1.
4197 ("true" is a fixed value perhaps depending on the language,
4198 but we don't handle values other than 1 correctly yet.) */
4199 return invert_truthvalue (arg0);
4201 case TRUTH_ANDIF_EXPR:
4202 /* Note that the operands of this must be ints
4203 and their values must be 0 or 1.
4204 ("true" is a fixed value perhaps depending on the language.) */
4205 /* If first arg is constant zero, return it. */
4206 if (integer_zerop (arg0))
4208 case TRUTH_AND_EXPR:
4209 /* If either arg is constant true, drop it. */
4210 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4211 return non_lvalue (arg1);
4212 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4213 return non_lvalue (arg0);
4214 /* If second arg is constant zero, result is zero, but first arg
4215 must be evaluated. */
4216 if (integer_zerop (arg1))
4217 return omit_one_operand (type, arg1, arg0);
4220 /* We only do these simplifications if we are optimizing. */
4224 /* Check for things like (A || B) && (A || C). We can convert this
4225 to A || (B && C). Note that either operator can be any of the four
4226 truth and/or operations and the transformation will still be
4227 valid. Also note that we only care about order for the
4228 ANDIF and ORIF operators. */
4229 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4230 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4231 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4232 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4233 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4235 tree a00 = TREE_OPERAND (arg0, 0);
4236 tree a01 = TREE_OPERAND (arg0, 1);
4237 tree a10 = TREE_OPERAND (arg1, 0);
4238 tree a11 = TREE_OPERAND (arg1, 1);
4239 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
4240 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
4241 && (code == TRUTH_AND_EXPR
4242 || code == TRUTH_OR_EXPR));
4244 if (operand_equal_p (a00, a10, 0))
4245 return fold (build (TREE_CODE (arg0), type, a00,
4246 fold (build (code, type, a01, a11))));
4247 else if (commutative && operand_equal_p (a00, a11, 0))
4248 return fold (build (TREE_CODE (arg0), type, a00,
4249 fold (build (code, type, a01, a10))));
4250 else if (commutative && operand_equal_p (a01, a10, 0))
4251 return fold (build (TREE_CODE (arg0), type, a01,
4252 fold (build (code, type, a00, a11))));
4254 /* This case if tricky because we must either have commutative
4255 operators or else A10 must not have side-effects. */
4257 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
4258 && operand_equal_p (a01, a11, 0))
4259 return fold (build (TREE_CODE (arg0), type,
4260 fold (build (code, type, a00, a10)),
4264 /* Check for the possibility of merging component references. If our
4265 lhs is another similar operation, try to merge its rhs with our
4266 rhs. Then try to merge our lhs and rhs. */
4267 if (TREE_CODE (arg0) == code
4268 && 0 != (tem = fold_truthop (code, type,
4269 TREE_OPERAND (arg0, 1), arg1)))
4270 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4272 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
4277 case TRUTH_ORIF_EXPR:
4278 /* Note that the operands of this must be ints
4279 and their values must be 0 or true.
4280 ("true" is a fixed value perhaps depending on the language.) */
4281 /* If first arg is constant true, return it. */
4282 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4285 /* If either arg is constant zero, drop it. */
4286 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4287 return non_lvalue (arg1);
4288 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4289 return non_lvalue (arg0);
4290 /* If second arg is constant true, result is true, but we must
4291 evaluate first arg. */
4292 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4293 return omit_one_operand (type, arg1, arg0);
4296 case TRUTH_XOR_EXPR:
4297 /* If either arg is constant zero, drop it. */
4298 if (integer_zerop (arg0))
4299 return non_lvalue (arg1);
4300 if (integer_zerop (arg1))
4301 return non_lvalue (arg0);
4302 /* If either arg is constant true, this is a logical inversion. */
4303 if (integer_onep (arg0))
4304 return non_lvalue (invert_truthvalue (arg1));
4305 if (integer_onep (arg1))
4306 return non_lvalue (invert_truthvalue (arg0));
4315 /* If one arg is a constant integer, put it last. */
4316 if (TREE_CODE (arg0) == INTEGER_CST
4317 && TREE_CODE (arg1) != INTEGER_CST)
4319 TREE_OPERAND (t, 0) = arg1;
4320 TREE_OPERAND (t, 1) = arg0;
4321 arg0 = TREE_OPERAND (t, 0);
4322 arg1 = TREE_OPERAND (t, 1);
4323 code = swap_tree_comparison (code);
4324 TREE_SET_CODE (t, code);
4327 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4328 First, see if one arg is constant; find the constant arg
4329 and the other one. */
4331 tree constop = 0, varop;
4332 int constopnum = -1;
4334 if (TREE_CONSTANT (arg1))
4335 constopnum = 1, constop = arg1, varop = arg0;
4336 if (TREE_CONSTANT (arg0))
4337 constopnum = 0, constop = arg0, varop = arg1;
4339 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4341 /* This optimization is invalid for ordered comparisons
4342 if CONST+INCR overflows or if foo+incr might overflow.
4343 This optimization is invalid for floating point due to rounding.
4344 For pointer types we assume overflow doesn't happen. */
4345 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4346 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4347 && (code == EQ_EXPR || code == NE_EXPR)))
4350 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4351 constop, TREE_OPERAND (varop, 1)));
4352 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4354 t = build (code, type, TREE_OPERAND (t, 0),
4355 TREE_OPERAND (t, 1));
4356 TREE_OPERAND (t, constopnum) = newconst;
4360 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4362 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4363 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4364 && (code == EQ_EXPR || code == NE_EXPR)))
4367 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4368 constop, TREE_OPERAND (varop, 1)));
4369 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4370 t = build (code, type, TREE_OPERAND (t, 0),
4371 TREE_OPERAND (t, 1));
4372 TREE_OPERAND (t, constopnum) = newconst;
4378 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4379 if (TREE_CODE (arg1) == INTEGER_CST
4380 && TREE_CODE (arg0) != INTEGER_CST
4381 && tree_int_cst_sgn (arg1) > 0)
4383 switch (TREE_CODE (t))
4387 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4388 t = build (code, type, TREE_OPERAND (t, 0), arg1);
4393 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4394 t = build (code, type, TREE_OPERAND (t, 0), arg1);
4399 /* If this is an EQ or NE comparison with zero and ARG0 is
4400 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4401 two operations, but the latter can be done in one less insn
4402 one machine that have only two-operand insns or on which a
4403 constant cannot be the first operand. */
4404 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4405 && TREE_CODE (arg0) == BIT_AND_EXPR)
4407 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4408 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4410 fold (build (code, type,
4411 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4413 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4414 TREE_OPERAND (arg0, 1),
4415 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4416 convert (TREE_TYPE (arg0),
4419 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4420 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4422 fold (build (code, type,
4423 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4425 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4426 TREE_OPERAND (arg0, 0),
4427 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4428 convert (TREE_TYPE (arg0),
4433 /* If this is an NE or EQ comparison of zero against the result of a
4434 signed MOD operation whose second operand is a power of 2, make
4435 the MOD operation unsigned since it is simpler and equivalent. */
4436 if ((code == NE_EXPR || code == EQ_EXPR)
4437 && integer_zerop (arg1)
4438 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4439 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4440 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4441 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4442 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4443 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4445 tree newtype = unsigned_type (TREE_TYPE (arg0));
4446 tree newmod = build (TREE_CODE (arg0), newtype,
4447 convert (newtype, TREE_OPERAND (arg0, 0)),
4448 convert (newtype, TREE_OPERAND (arg0, 1)));
4450 return build (code, type, newmod, convert (newtype, arg1));
4453 /* If this is an NE comparison of zero with an AND of one, remove the
4454 comparison since the AND will give the correct value. */
4455 if (code == NE_EXPR && integer_zerop (arg1)
4456 && TREE_CODE (arg0) == BIT_AND_EXPR
4457 && integer_onep (TREE_OPERAND (arg0, 1)))
4458 return convert (type, arg0);
4460 /* If we have (A & C) == C where C is a power of 2, convert this into
4461 (A & C) != 0. Similarly for NE_EXPR. */
4462 if ((code == EQ_EXPR || code == NE_EXPR)
4463 && TREE_CODE (arg0) == BIT_AND_EXPR
4464 && integer_pow2p (TREE_OPERAND (arg0, 1))
4465 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4466 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4467 arg0, integer_zero_node);
4469 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
4470 and similarly for >= into !=. */
4471 if ((code == LT_EXPR || code == GE_EXPR)
4472 && TREE_UNSIGNED (TREE_TYPE (arg0))
4473 && TREE_CODE (arg1) == LSHIFT_EXPR
4474 && integer_onep (TREE_OPERAND (arg1, 0)))
4475 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4476 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4477 TREE_OPERAND (arg1, 1)),
4478 convert (TREE_TYPE (arg0), integer_zero_node));
4480 else if ((code == LT_EXPR || code == GE_EXPR)
4481 && TREE_UNSIGNED (TREE_TYPE (arg0))
4482 && (TREE_CODE (arg1) == NOP_EXPR
4483 || TREE_CODE (arg1) == CONVERT_EXPR)
4484 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
4485 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
4487 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4488 convert (TREE_TYPE (arg0),
4489 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4490 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
4491 convert (TREE_TYPE (arg0), integer_zero_node));
4493 /* Simplify comparison of something with itself. (For IEEE
4494 floating-point, we can only do some of these simplifications.) */
4495 if (operand_equal_p (arg0, arg1, 0))
4502 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4504 t = build_int_2 (1, 0);
4505 TREE_TYPE (t) = type;
4509 TREE_SET_CODE (t, code);
4513 /* For NE, we can only do this simplification if integer. */
4514 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4516 /* ... fall through ... */
4519 t = build_int_2 (0, 0);
4520 TREE_TYPE (t) = type;
4525 /* An unsigned comparison against 0 can be simplified. */
4526 if (integer_zerop (arg1)
4527 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4528 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4529 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4531 switch (TREE_CODE (t))
4535 TREE_SET_CODE (t, NE_EXPR);
4539 TREE_SET_CODE (t, EQ_EXPR);
4542 return omit_one_operand (type,
4543 convert (type, integer_one_node),
4546 return omit_one_operand (type,
4547 convert (type, integer_zero_node),
4552 /* If we are comparing an expression that just has comparisons
4553 of two integer values, arithmetic expressions of those comparisons,
4554 and constants, we can simplify it. There are only three cases
4555 to check: the two values can either be equal, the first can be
4556 greater, or the second can be greater. Fold the expression for
4557 those three values. Since each value must be 0 or 1, we have
4558 eight possibilities, each of which corresponds to the constant 0
4559 or 1 or one of the six possible comparisons.
4561 This handles common cases like (a > b) == 0 but also handles
4562 expressions like ((x > y) - (y > x)) > 0, which supposedly
4563 occur in macroized code. */
4565 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4567 tree cval1 = 0, cval2 = 0;
4570 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4571 /* Don't handle degenerate cases here; they should already
4572 have been handled anyway. */
4573 && cval1 != 0 && cval2 != 0
4574 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4575 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4576 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4577 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4578 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4580 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4581 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4583 /* We can't just pass T to eval_subst in case cval1 or cval2
4584 was the same as ARG1. */
4587 = fold (build (code, type,
4588 eval_subst (arg0, cval1, maxval, cval2, minval),
4591 = fold (build (code, type,
4592 eval_subst (arg0, cval1, maxval, cval2, maxval),
4595 = fold (build (code, type,
4596 eval_subst (arg0, cval1, minval, cval2, maxval),
4599 /* All three of these results should be 0 or 1. Confirm they
4600 are. Then use those values to select the proper code
4603 if ((integer_zerop (high_result)
4604 || integer_onep (high_result))
4605 && (integer_zerop (equal_result)
4606 || integer_onep (equal_result))
4607 && (integer_zerop (low_result)
4608 || integer_onep (low_result)))
4610 /* Make a 3-bit mask with the high-order bit being the
4611 value for `>', the next for '=', and the low for '<'. */
4612 switch ((integer_onep (high_result) * 4)
4613 + (integer_onep (equal_result) * 2)
4614 + integer_onep (low_result))
4618 return omit_one_operand (type, integer_zero_node, arg0);
4639 return omit_one_operand (type, integer_one_node, arg0);
4642 t = build (code, type, cval1, cval2);
4644 return save_expr (t);
4651 /* If this is a comparison of a field, we may be able to simplify it. */
4652 if ((TREE_CODE (arg0) == COMPONENT_REF
4653 || TREE_CODE (arg0) == BIT_FIELD_REF)
4654 && (code == EQ_EXPR || code == NE_EXPR)
4655 /* Handle the constant case even without -O
4656 to make sure the warnings are given. */
4657 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4659 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4663 /* If this is a comparison of complex values and either or both
4664 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4665 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4666 may prevent needless evaluations. */
4667 if ((code == EQ_EXPR || code == NE_EXPR)
4668 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4669 && (TREE_CODE (arg0) == COMPLEX_EXPR
4670 || TREE_CODE (arg1) == COMPLEX_EXPR))
4672 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4673 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4674 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4675 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4676 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4678 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4681 fold (build (code, type, real0, real1)),
4682 fold (build (code, type, imag0, imag1))));
4685 /* From here on, the only cases we handle are when the result is
4686 known to be a constant.
4688 To compute GT, swap the arguments and do LT.
4689 To compute GE, do LT and invert the result.
4690 To compute LE, swap the arguments, do LT and invert the result.
4691 To compute NE, do EQ and invert the result.
4693 Therefore, the code below must handle only EQ and LT. */
4695 if (code == LE_EXPR || code == GT_EXPR)
4697 tem = arg0, arg0 = arg1, arg1 = tem;
4698 code = swap_tree_comparison (code);
4701 /* Note that it is safe to invert for real values here because we
4702 will check below in the one case that it matters. */
4705 if (code == NE_EXPR || code == GE_EXPR)
4708 code = invert_tree_comparison (code);
4711 /* Compute a result for LT or EQ if args permit;
4712 otherwise return T. */
4713 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4715 if (code == EQ_EXPR)
4716 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4717 == TREE_INT_CST_LOW (arg1))
4718 && (TREE_INT_CST_HIGH (arg0)
4719 == TREE_INT_CST_HIGH (arg1)),
4722 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4723 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4724 : INT_CST_LT (arg0, arg1)),
4728 /* Assume a nonexplicit constant cannot equal an explicit one,
4729 since such code would be undefined anyway.
4730 Exception: on sysvr4, using #pragma weak,
4731 a label can come out as 0. */
4732 else if (TREE_CODE (arg1) == INTEGER_CST
4733 && !integer_zerop (arg1)
4734 && TREE_CONSTANT (arg0)
4735 && TREE_CODE (arg0) == ADDR_EXPR
4737 t1 = build_int_2 (0, 0);
4739 /* Two real constants can be compared explicitly. */
4740 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4742 /* If either operand is a NaN, the result is false with two
4743 exceptions: First, an NE_EXPR is true on NaNs, but that case
4744 is already handled correctly since we will be inverting the
4745 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4746 or a GE_EXPR into a LT_EXPR, we must return true so that it
4747 will be inverted into false. */
4749 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4750 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4751 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4753 else if (code == EQ_EXPR)
4754 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4755 TREE_REAL_CST (arg1)),
4758 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4759 TREE_REAL_CST (arg1)),
4763 if (t1 == NULL_TREE)
4767 TREE_INT_CST_LOW (t1) ^= 1;
4769 TREE_TYPE (t1) = type;
4773 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4774 so all simple results must be passed through pedantic_non_lvalue. */
4775 if (TREE_CODE (arg0) == INTEGER_CST)
4776 return pedantic_non_lvalue
4777 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4778 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4779 return pedantic_omit_one_operand (type, arg1, arg0);
4781 /* If the second operand is zero, invert the comparison and swap
4782 the second and third operands. Likewise if the second operand
4783 is constant and the third is not or if the third operand is
4784 equivalent to the first operand of the comparison. */
4786 if (integer_zerop (arg1)
4787 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4788 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4789 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4790 TREE_OPERAND (t, 2),
4791 TREE_OPERAND (arg0, 1))))
4793 /* See if this can be inverted. If it can't, possibly because
4794 it was a floating-point inequality comparison, don't do
4796 tem = invert_truthvalue (arg0);
4798 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4800 t = build (code, type, tem,
4801 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
4803 arg1 = TREE_OPERAND (t, 2);
4808 /* If we have A op B ? A : C, we may be able to convert this to a
4809 simpler expression, depending on the operation and the values
4810 of B and C. IEEE floating point prevents this though,
4811 because A or B might be -0.0 or a NaN. */
4813 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4814 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4815 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4817 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4818 arg1, TREE_OPERAND (arg0, 1)))
4820 tree arg2 = TREE_OPERAND (t, 2);
4821 enum tree_code comp_code = TREE_CODE (arg0);
4825 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4826 depending on the comparison operation. */
4827 if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
4828 ? real_zerop (TREE_OPERAND (arg0, 1))
4829 : integer_zerop (TREE_OPERAND (arg0, 1)))
4830 && TREE_CODE (arg2) == NEGATE_EXPR
4831 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4835 return pedantic_non_lvalue
4836 (fold (build1 (NEGATE_EXPR, type, arg1)));
4838 return pedantic_non_lvalue (convert (type, arg1));
4841 return pedantic_non_lvalue
4842 (fold (build1 (ABS_EXPR, type, arg1)));
4845 return pedantic_non_lvalue
4846 (fold (build1 (NEGATE_EXPR, type,
4847 fold (build1 (ABS_EXPR, type, arg1)))));
4850 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4853 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4855 if (comp_code == NE_EXPR)
4856 return pedantic_non_lvalue (convert (type, arg1));
4857 else if (comp_code == EQ_EXPR)
4858 return pedantic_non_lvalue (convert (type, integer_zero_node));
4861 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4862 or max (A, B), depending on the operation. */
4864 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4865 arg2, TREE_OPERAND (arg0, 0)))
4867 tree comp_op0 = TREE_OPERAND (arg0, 0);
4868 tree comp_op1 = TREE_OPERAND (arg0, 1);
4869 tree comp_type = TREE_TYPE (comp_op0);
4874 return pedantic_non_lvalue (convert (type, arg2));
4876 return pedantic_non_lvalue (convert (type, arg1));
4879 return pedantic_non_lvalue
4880 (convert (type, (fold (build (MIN_EXPR, comp_type,
4881 comp_op0, comp_op1)))));
4884 return pedantic_non_lvalue
4885 (convert (type, fold (build (MAX_EXPR, comp_type,
4886 comp_op0, comp_op1))));
4890 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4891 we might still be able to simplify this. For example,
4892 if C1 is one less or one more than C2, this might have started
4893 out as a MIN or MAX and been transformed by this function.
4894 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4896 if (INTEGRAL_TYPE_P (type)
4897 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4898 && TREE_CODE (arg2) == INTEGER_CST)
4902 /* We can replace A with C1 in this case. */
4903 arg1 = convert (type, TREE_OPERAND (arg0, 1));
4904 t = build (code, type, TREE_OPERAND (t, 0), arg1,
4905 TREE_OPERAND (t, 2));
4909 /* If C1 is C2 + 1, this is min(A, C2). */
4910 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4911 && operand_equal_p (TREE_OPERAND (arg0, 1),
4912 const_binop (PLUS_EXPR, arg2,
4913 integer_one_node, 0), 1))
4914 return pedantic_non_lvalue
4915 (fold (build (MIN_EXPR, type, arg1, arg2)));
4919 /* If C1 is C2 - 1, this is min(A, C2). */
4920 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4921 && operand_equal_p (TREE_OPERAND (arg0, 1),
4922 const_binop (MINUS_EXPR, arg2,
4923 integer_one_node, 0), 1))
4924 return pedantic_non_lvalue
4925 (fold (build (MIN_EXPR, type, arg1, arg2)));
4929 /* If C1 is C2 - 1, this is max(A, C2). */
4930 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4931 && operand_equal_p (TREE_OPERAND (arg0, 1),
4932 const_binop (MINUS_EXPR, arg2,
4933 integer_one_node, 0), 1))
4934 return pedantic_non_lvalue
4935 (fold (build (MAX_EXPR, type, arg1, arg2)));
4939 /* If C1 is C2 + 1, this is max(A, C2). */
4940 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4941 && operand_equal_p (TREE_OPERAND (arg0, 1),
4942 const_binop (PLUS_EXPR, arg2,
4943 integer_one_node, 0), 1))
4944 return pedantic_non_lvalue
4945 (fold (build (MAX_EXPR, type, arg1, arg2)));
4950 /* If the second operand is simpler than the third, swap them
4951 since that produces better jump optimization results. */
4952 if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
4953 || TREE_CODE (arg1) == SAVE_EXPR)
4954 && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
4955 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
4956 || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
4958 /* See if this can be inverted. If it can't, possibly because
4959 it was a floating-point inequality comparison, don't do
4961 tem = invert_truthvalue (arg0);
4963 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4965 t = build (code, type, tem,
4966 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
4968 arg1 = TREE_OPERAND (t, 2);
4973 /* Convert A ? 1 : 0 to simply A. */
4974 if (integer_onep (TREE_OPERAND (t, 1))
4975 && integer_zerop (TREE_OPERAND (t, 2))
4976 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4977 call to fold will try to move the conversion inside
4978 a COND, which will recurse. In that case, the COND_EXPR
4979 is probably the best choice, so leave it alone. */
4980 && type == TREE_TYPE (arg0))
4981 return pedantic_non_lvalue (arg0);
4983 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4984 operation is simply A & 2. */
4986 if (integer_zerop (TREE_OPERAND (t, 2))
4987 && TREE_CODE (arg0) == NE_EXPR
4988 && integer_zerop (TREE_OPERAND (arg0, 1))
4989 && integer_pow2p (arg1)
4990 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4991 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4993 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
4998 /* When pedantic, a compound expression can be neither an lvalue
4999 nor an integer constant expression. */
5000 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
5002 /* Don't let (0, 0) be null pointer constant. */
5003 if (integer_zerop (arg1))
5004 return non_lvalue (arg1);
5009 return build_complex (arg0, arg1);
5013 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5015 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5016 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
5017 TREE_OPERAND (arg0, 1));
5018 else if (TREE_CODE (arg0) == COMPLEX_CST)
5019 return TREE_REALPART (arg0);
5020 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5021 return fold (build (TREE_CODE (arg0), type,
5022 fold (build1 (REALPART_EXPR, type,
5023 TREE_OPERAND (arg0, 0))),
5024 fold (build1 (REALPART_EXPR,
5025 type, TREE_OPERAND (arg0, 1)))));
5029 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5030 return convert (type, integer_zero_node);
5031 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5032 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
5033 TREE_OPERAND (arg0, 0));
5034 else if (TREE_CODE (arg0) == COMPLEX_CST)
5035 return TREE_IMAGPART (arg0);
5036 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5037 return fold (build (TREE_CODE (arg0), type,
5038 fold (build1 (IMAGPART_EXPR, type,
5039 TREE_OPERAND (arg0, 0))),
5040 fold (build1 (IMAGPART_EXPR, type,
5041 TREE_OPERAND (arg0, 1)))));
5044 /* Pull arithmetic ops out of the CLEANUP_POINT_EXPR where
5046 case CLEANUP_POINT_EXPR:
5047 if (! TREE_SIDE_EFFECTS (arg0))
5048 return convert (type, arg0);
5051 enum tree_code code0 = TREE_CODE (arg0);
5052 int kind0 = TREE_CODE_CLASS (code0);
5053 tree arg00 = TREE_OPERAND (arg0, 0);
5057 return fold (build1 (code0, type,
5058 fold (build1 (CLEANUP_POINT_EXPR,
5059 TREE_TYPE (arg00), arg00))));
5060 if ((kind0 == '<' || kind0 == '2')
5061 && ! TREE_SIDE_EFFECTS (arg01 = TREE_OPERAND (arg0, 1)))
5062 return fold (build (code0, type,
5063 fold (build1 (CLEANUP_POINT_EXPR,
5064 TREE_TYPE (arg00), arg00)),
5072 } /* switch (code) */