1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ This file should be rewritten to use an arbitrary precision
21 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
22 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
23 @@ The routines that translate from the ap rep should
24 @@ warn if precision et. al. is lost.
25 @@ This would also make life easier when this technology is used
26 @@ for cross-compilers. */
29 /* The entry points in this file are fold, size_int and size_binop.
31 fold takes a tree as argument and returns a simplified tree.
33 size_binop takes a tree code for an arithmetic operation
34 and two operands that are trees, and produces a tree for the
35 result, assuming the type comes from `sizetype'.
37 size_int takes an integer value, and creates a tree constant
38 with type from `sizetype'. */
46 /* Handle floating overflow for `const_binop'. */
47 static jmp_buf float_error;
49 static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
50 static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
51 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
52 HOST_WIDE_INT, HOST_WIDE_INT,
53 HOST_WIDE_INT, HOST_WIDE_INT *,
54 HOST_WIDE_INT *, HOST_WIDE_INT *,
56 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
57 static tree const_binop PROTO((enum tree_code, tree, tree, int));
58 static tree fold_convert PROTO((tree, tree));
59 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
60 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
61 static int truth_value_p PROTO((enum tree_code));
62 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
63 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
64 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
65 static tree omit_one_operand PROTO((tree, tree, tree));
66 static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
67 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
68 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
69 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
71 static tree decode_field_reference PROTO((tree, int *, int *,
72 enum machine_mode *, int *,
74 static int all_ones_mask_p PROTO((tree, int));
75 static int simple_operand_p PROTO((tree));
76 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
77 enum tree_code, tree, tree, tree));
78 static tree unextend PROTO((tree, int, int));
79 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
80 static tree strip_compound_expr PROTO((tree, tree));
86 /* Yield nonzero if a signed left shift of A by B bits overflows. */
87 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
89 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
90 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
91 Then this yields nonzero if overflow occurred during the addition.
92 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
93 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
94 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
96 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
97 We do that by representing the two-word integer in 4 words, with only
98 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
101 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
102 #define HIGHPART(x) \
103 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
104 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
106 /* Unpack a two-word integer into 4 words.
107 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
108 WORDS points to the array of HOST_WIDE_INTs. */
111 encode (words, low, hi)
112 HOST_WIDE_INT *words;
113 HOST_WIDE_INT low, hi;
115 words[0] = LOWPART (low);
116 words[1] = HIGHPART (low);
117 words[2] = LOWPART (hi);
118 words[3] = HIGHPART (hi);
121 /* Pack an array of 4 words into a two-word integer.
122 WORDS points to the array of words.
123 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
126 decode (words, low, hi)
127 HOST_WIDE_INT *words;
128 HOST_WIDE_INT *low, *hi;
130 *low = words[0] | words[1] * BASE;
131 *hi = words[2] | words[3] * BASE;
134 /* Make the integer constant T valid for its type
135 by setting to 0 or 1 all the bits in the constant
136 that don't belong in the type.
137 Yield 1 if a signed overflow occurs, 0 otherwise.
138 If OVERFLOW is nonzero, a signed overflow has already occurred
139 in calculating T, so propagate it.
141 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
145 force_fit_type (t, overflow)
149 HOST_WIDE_INT low, high;
152 if (TREE_CODE (t) == REAL_CST)
154 #ifdef CHECK_FLOAT_VALUE
155 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
161 else if (TREE_CODE (t) != INTEGER_CST)
164 low = TREE_INT_CST_LOW (t);
165 high = TREE_INT_CST_HIGH (t);
167 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
170 prec = TYPE_PRECISION (TREE_TYPE (t));
172 /* First clear all bits that are beyond the type's precision. */
174 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
176 else if (prec > HOST_BITS_PER_WIDE_INT)
178 TREE_INT_CST_HIGH (t)
179 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
183 TREE_INT_CST_HIGH (t) = 0;
184 if (prec < HOST_BITS_PER_WIDE_INT)
185 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
188 /* Unsigned types do not suffer sign extension or overflow. */
189 if (TREE_UNSIGNED (TREE_TYPE (t)))
192 /* If the value's sign bit is set, extend the sign. */
193 if (prec != 2 * HOST_BITS_PER_WIDE_INT
194 && (prec > HOST_BITS_PER_WIDE_INT
195 ? (TREE_INT_CST_HIGH (t)
196 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
197 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
199 /* Value is negative:
200 set to 1 all the bits that are outside this type's precision. */
201 if (prec > HOST_BITS_PER_WIDE_INT)
203 TREE_INT_CST_HIGH (t)
204 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
208 TREE_INT_CST_HIGH (t) = -1;
209 if (prec < HOST_BITS_PER_WIDE_INT)
210 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
214 /* Yield nonzero if signed overflow occurred. */
216 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
220 /* Add two doubleword integers with doubleword result.
221 Each argument is given as two `HOST_WIDE_INT' pieces.
222 One argument is L1 and H1; the other, L2 and H2.
223 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
226 add_double (l1, h1, l2, h2, lv, hv)
227 HOST_WIDE_INT l1, h1, l2, h2;
228 HOST_WIDE_INT *lv, *hv;
233 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
237 return overflow_sum_sign (h1, h2, h);
240 /* Negate a doubleword integer with doubleword result.
241 Return nonzero if the operation overflows, assuming it's signed.
242 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
243 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
246 neg_double (l1, h1, lv, hv)
247 HOST_WIDE_INT l1, h1;
248 HOST_WIDE_INT *lv, *hv;
254 return (*hv & h1) < 0;
264 /* Multiply two doubleword integers with doubleword result.
265 Return nonzero if the operation overflows, assuming it's signed.
266 Each argument is given as two `HOST_WIDE_INT' pieces.
267 One argument is L1 and H1; the other, L2 and H2.
268 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
271 mul_double (l1, h1, l2, h2, lv, hv)
272 HOST_WIDE_INT l1, h1, l2, h2;
273 HOST_WIDE_INT *lv, *hv;
275 HOST_WIDE_INT arg1[4];
276 HOST_WIDE_INT arg2[4];
277 HOST_WIDE_INT prod[4 * 2];
278 register unsigned HOST_WIDE_INT carry;
279 register int i, j, k;
280 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
282 encode (arg1, l1, h1);
283 encode (arg2, l2, h2);
285 bzero ((char *) prod, sizeof prod);
287 for (i = 0; i < 4; i++)
290 for (j = 0; j < 4; j++)
293 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
294 carry += arg1[i] * arg2[j];
295 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
297 prod[k] = LOWPART (carry);
298 carry = HIGHPART (carry);
303 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
305 /* Check for overflow by calculating the top half of the answer in full;
306 it should agree with the low half's sign bit. */
307 decode (prod+4, &toplow, &tophigh);
310 neg_double (l2, h2, &neglow, &neghigh);
311 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
315 neg_double (l1, h1, &neglow, &neghigh);
316 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
318 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
321 /* Shift the doubleword integer in L1, H1 left by COUNT places
322 keeping only PREC bits of result.
323 Shift right if COUNT is negative.
324 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
325 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
328 lshift_double (l1, h1, count, prec, lv, hv, arith)
329 HOST_WIDE_INT l1, h1, count;
331 HOST_WIDE_INT *lv, *hv;
336 rshift_double (l1, h1, - count, prec, lv, hv, arith);
341 count = (unsigned HOST_WIDE_INT) count & prec;
343 if (count >= HOST_BITS_PER_WIDE_INT)
345 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
350 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
351 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
352 *lv = (unsigned HOST_WIDE_INT) l1 << count;
356 /* Shift the doubleword integer in L1, H1 right by COUNT places
357 keeping only PREC bits of result. COUNT must be positive.
358 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
359 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
362 rshift_double (l1, h1, count, prec, lv, hv, arith)
363 HOST_WIDE_INT l1, h1, count;
365 HOST_WIDE_INT *lv, *hv;
368 unsigned HOST_WIDE_INT signmask;
370 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
374 count = (unsigned HOST_WIDE_INT) count % prec;
376 if (count >= HOST_BITS_PER_WIDE_INT)
379 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
380 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
384 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
385 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
386 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
387 | ((unsigned HOST_WIDE_INT) h1 >> count));
391 /* Rotate the doubleword integer in L1, H1 left by COUNT places
392 keeping only PREC bits of result.
393 Rotate right if COUNT is negative.
394 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
397 lrotate_double (l1, h1, count, prec, lv, hv)
398 HOST_WIDE_INT l1, h1, count;
400 HOST_WIDE_INT *lv, *hv;
402 HOST_WIDE_INT arg1[4];
408 rrotate_double (l1, h1, - count, prec, lv, hv);
412 encode (arg1, l1, h1);
417 carry = arg1[4 - 1] >> 16 - 1;
420 for (i = 0; i < 4; i++)
422 carry += arg1[i] << 1;
423 arg1[i] = LOWPART (carry);
424 carry = HIGHPART (carry);
429 decode (arg1, lv, hv);
432 /* Rotate the doubleword integer in L1, H1 left by COUNT places
433 keeping only PREC bits of result. COUNT must be positive.
434 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
437 rrotate_double (l1, h1, count, prec, lv, hv)
438 HOST_WIDE_INT l1, h1, count;
440 HOST_WIDE_INT *lv, *hv;
442 HOST_WIDE_INT arg1[4];
446 encode (arg1, l1, h1);
454 for (i = 4 - 1; i >= 0; i--)
458 arg1[i] = LOWPART (carry >> 1);
463 decode (arg1, lv, hv);
466 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
467 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
468 CODE is a tree code for a kind of division, one of
469 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
471 It controls how the quotient is rounded to a integer.
472 Return nonzero if the operation overflows.
473 UNS nonzero says do unsigned division. */
476 div_and_round_double (code, uns,
477 lnum_orig, hnum_orig, lden_orig, hden_orig,
478 lquo, hquo, lrem, hrem)
481 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
482 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
483 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
486 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
487 HOST_WIDE_INT den[4], quo[4];
489 unsigned HOST_WIDE_INT work;
490 register int carry = 0;
491 HOST_WIDE_INT lnum = lnum_orig;
492 HOST_WIDE_INT hnum = hnum_orig;
493 HOST_WIDE_INT lden = lden_orig;
494 HOST_WIDE_INT hden = hden_orig;
497 if ((hden == 0) && (lden == 0))
500 /* calculate quotient sign and convert operands to unsigned. */
506 /* (minimum integer) / (-1) is the only overflow case. */
507 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
513 neg_double (lden, hden, &lden, &hden);
517 if (hnum == 0 && hden == 0)
518 { /* single precision */
520 /* This unsigned division rounds toward zero. */
521 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
526 { /* trivial case: dividend < divisor */
527 /* hden != 0 already checked. */
534 bzero ((char *) quo, sizeof quo);
536 bzero ((char *) num, sizeof num); /* to zero 9th element */
537 bzero ((char *) den, sizeof den);
539 encode (num, lnum, hnum);
540 encode (den, lden, hden);
542 /* Special code for when the divisor < BASE. */
543 if (hden == 0 && lden < BASE)
545 /* hnum != 0 already checked. */
546 for (i = 4 - 1; i >= 0; i--)
548 work = num[i] + carry * BASE;
549 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
550 carry = work % (unsigned HOST_WIDE_INT) lden;
555 /* Full double precision division,
556 with thanks to Don Knuth's "Seminumerical Algorithms". */
557 int quo_est, scale, num_hi_sig, den_hi_sig;
559 /* Find the highest non-zero divisor digit. */
560 for (i = 4 - 1; ; i--)
566 /* Insure that the first digit of the divisor is at least BASE/2.
567 This is required by the quotient digit estimation algorithm. */
569 scale = BASE / (den[den_hi_sig] + 1);
570 if (scale > 1) { /* scale divisor and dividend */
572 for (i = 0; i <= 4 - 1; i++) {
573 work = (num[i] * scale) + carry;
574 num[i] = LOWPART (work);
575 carry = HIGHPART (work);
578 for (i = 0; i <= 4 - 1; i++) {
579 work = (den[i] * scale) + carry;
580 den[i] = LOWPART (work);
581 carry = HIGHPART (work);
582 if (den[i] != 0) den_hi_sig = i;
589 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
590 /* guess the next quotient digit, quo_est, by dividing the first
591 two remaining dividend digits by the high order quotient digit.
592 quo_est is never low and is at most 2 high. */
593 unsigned HOST_WIDE_INT tmp;
595 num_hi_sig = i + den_hi_sig + 1;
596 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
597 if (num[num_hi_sig] != den[den_hi_sig])
598 quo_est = work / den[den_hi_sig];
602 /* refine quo_est so it's usually correct, and at most one high. */
603 tmp = work - quo_est * den[den_hi_sig];
605 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
608 /* Try QUO_EST as the quotient digit, by multiplying the
609 divisor by QUO_EST and subtracting from the remaining dividend.
610 Keep in mind that QUO_EST is the I - 1st digit. */
613 for (j = 0; j <= den_hi_sig; j++)
615 work = quo_est * den[j] + carry;
616 carry = HIGHPART (work);
617 work = num[i + j] - LOWPART (work);
618 num[i + j] = LOWPART (work);
619 carry += HIGHPART (work) != 0;
622 /* if quo_est was high by one, then num[i] went negative and
623 we need to correct things. */
625 if (num[num_hi_sig] < carry)
628 carry = 0; /* add divisor back in */
629 for (j = 0; j <= den_hi_sig; j++)
631 work = num[i + j] + den[j] + carry;
632 carry = HIGHPART (work);
633 num[i + j] = LOWPART (work);
635 num [num_hi_sig] += carry;
638 /* store the quotient digit. */
643 decode (quo, lquo, hquo);
646 /* if result is negative, make it so. */
648 neg_double (*lquo, *hquo, lquo, hquo);
650 /* compute trial remainder: rem = num - (quo * den) */
651 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
652 neg_double (*lrem, *hrem, lrem, hrem);
653 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
658 case TRUNC_MOD_EXPR: /* round toward zero */
659 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
663 case FLOOR_MOD_EXPR: /* round toward negative infinity */
664 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
667 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
670 else return overflow;
674 case CEIL_MOD_EXPR: /* round toward positive infinity */
675 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
677 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
680 else return overflow;
684 case ROUND_MOD_EXPR: /* round to closest integer */
686 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
687 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
689 /* get absolute values */
690 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
691 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
693 /* if (2 * abs (lrem) >= abs (lden)) */
694 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
695 labs_rem, habs_rem, <wice, &htwice);
696 if (((unsigned HOST_WIDE_INT) habs_den
697 < (unsigned HOST_WIDE_INT) htwice)
698 || (((unsigned HOST_WIDE_INT) habs_den
699 == (unsigned HOST_WIDE_INT) htwice)
700 && ((HOST_WIDE_INT unsigned) labs_den
701 < (unsigned HOST_WIDE_INT) ltwice)))
705 add_double (*lquo, *hquo,
706 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
709 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
712 else return overflow;
720 /* compute true remainder: rem = num - (quo * den) */
721 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
722 neg_double (*lrem, *hrem, lrem, hrem);
723 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
727 #ifndef REAL_ARITHMETIC
728 /* Effectively truncate a real value to represent the nearest possible value
729 in a narrower mode. The result is actually represented in the same data
730 type as the argument, but its value is usually different.
732 A trap may occur during the FP operations and it is the responsibility
733 of the calling function to have a handler established. */
736 real_value_truncate (mode, arg)
737 enum machine_mode mode;
740 return REAL_VALUE_TRUNCATE (mode, arg);
743 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
745 /* Check for infinity in an IEEE double precision number. */
751 /* The IEEE 64-bit double format. */
756 unsigned exponent : 11;
757 unsigned mantissa1 : 20;
762 unsigned mantissa1 : 20;
763 unsigned exponent : 11;
769 if (u.big_endian.sign == 1)
772 return (u.big_endian.exponent == 2047
773 && u.big_endian.mantissa1 == 0
774 && u.big_endian.mantissa2 == 0);
779 return (u.little_endian.exponent == 2047
780 && u.little_endian.mantissa1 == 0
781 && u.little_endian.mantissa2 == 0);
785 /* Check whether an IEEE double precision number is a NaN. */
791 /* The IEEE 64-bit double format. */
796 unsigned exponent : 11;
797 unsigned mantissa1 : 20;
802 unsigned mantissa1 : 20;
803 unsigned exponent : 11;
809 if (u.big_endian.sign == 1)
812 return (u.big_endian.exponent == 2047
813 && (u.big_endian.mantissa1 != 0
814 || u.big_endian.mantissa2 != 0));
819 return (u.little_endian.exponent == 2047
820 && (u.little_endian.mantissa1 != 0
821 || u.little_endian.mantissa2 != 0));
825 /* Check for a negative IEEE double precision number. */
831 /* The IEEE 64-bit double format. */
836 unsigned exponent : 11;
837 unsigned mantissa1 : 20;
842 unsigned mantissa1 : 20;
843 unsigned exponent : 11;
849 if (u.big_endian.sign == 1)
852 return u.big_endian.sign;
857 return u.little_endian.sign;
860 #else /* Target not IEEE */
862 /* Let's assume other float formats don't have infinity.
863 (This can be overridden by redefining REAL_VALUE_ISINF.) */
871 /* Let's assume other float formats don't have NaNs.
872 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
880 /* Let's assume other float formats don't have minus zero.
881 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
888 #endif /* Target not IEEE */
889 #endif /* no REAL_ARITHMETIC */
891 /* Split a tree IN into a constant and a variable part
892 that could be combined with CODE to make IN.
893 CODE must be a commutative arithmetic operation.
894 Store the constant part into *CONP and the variable in &VARP.
895 Return 1 if this was done; zero means the tree IN did not decompose
898 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
899 Therefore, we must tell the caller whether the variable part
900 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
901 The value stored is the coefficient for the variable term.
902 The constant term we return should always be added;
903 we negate it if necessary. */
906 split_tree (in, code, varp, conp, varsignp)
912 register tree outtype = TREE_TYPE (in);
916 /* Strip any conversions that don't change the machine mode. */
917 while ((TREE_CODE (in) == NOP_EXPR
918 || TREE_CODE (in) == CONVERT_EXPR)
919 && (TYPE_MODE (TREE_TYPE (in))
920 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
921 in = TREE_OPERAND (in, 0);
923 if (TREE_CODE (in) == code
924 || (! FLOAT_TYPE_P (TREE_TYPE (in))
925 /* We can associate addition and subtraction together
926 (even though the C standard doesn't say so)
927 for integers because the value is not affected.
928 For reals, the value might be affected, so we can't. */
929 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
930 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
932 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
933 if (code == INTEGER_CST)
935 *conp = TREE_OPERAND (in, 0);
936 *varp = TREE_OPERAND (in, 1);
937 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
938 && TREE_TYPE (*varp) != outtype)
939 *varp = convert (outtype, *varp);
940 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
943 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
945 *conp = TREE_OPERAND (in, 1);
946 *varp = TREE_OPERAND (in, 0);
948 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
949 && TREE_TYPE (*varp) != outtype)
950 *varp = convert (outtype, *varp);
951 if (TREE_CODE (in) == MINUS_EXPR)
953 /* If operation is subtraction and constant is second,
954 must negate it to get an additive constant.
955 And this cannot be done unless it is a manifest constant.
956 It could also be the address of a static variable.
957 We cannot negate that, so give up. */
958 if (TREE_CODE (*conp) == INTEGER_CST)
959 /* Subtracting from integer_zero_node loses for long long. */
960 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
966 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
968 *conp = TREE_OPERAND (in, 0);
969 *varp = TREE_OPERAND (in, 1);
970 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
971 && TREE_TYPE (*varp) != outtype)
972 *varp = convert (outtype, *varp);
973 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
980 /* Combine two constants NUM and ARG2 under operation CODE
981 to produce a new constant.
982 We assume ARG1 and ARG2 have the same data type,
983 or at least are the same kind of constant and the same machine mode.
985 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
988 const_binop (code, arg1, arg2, notrunc)
990 register tree arg1, arg2;
993 if (TREE_CODE (arg1) == INTEGER_CST)
995 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
996 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
997 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
998 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
999 HOST_WIDE_INT low, hi;
1000 HOST_WIDE_INT garbagel, garbageh;
1002 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1008 t = build_int_2 (int1l | int2l, int1h | int2h);
1012 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1016 t = build_int_2 (int1l & int2l, int1h & int2h);
1019 case BIT_ANDTC_EXPR:
1020 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1026 /* It's unclear from the C standard whether shifts can overflow.
1027 The following code ignores overflow; perhaps a C standard
1028 interpretation ruling is needed. */
1029 lshift_double (int1l, int1h, int2l,
1030 TYPE_PRECISION (TREE_TYPE (arg1)),
1033 t = build_int_2 (low, hi);
1034 TREE_TYPE (t) = TREE_TYPE (arg1);
1036 force_fit_type (t, 0);
1037 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1038 TREE_CONSTANT_OVERFLOW (t)
1039 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1045 lrotate_double (int1l, int1h, int2l,
1046 TYPE_PRECISION (TREE_TYPE (arg1)),
1048 t = build_int_2 (low, hi);
1055 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1058 overflow = int2h < hi;
1060 t = build_int_2 (int2l, int2h);
1066 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1069 overflow = int1h < hi;
1071 t = build_int_2 (int1l, int1h);
1074 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1075 t = build_int_2 (low, hi);
1079 if (int2h == 0 && int2l == 0)
1081 t = build_int_2 (int1l, int1h);
1084 neg_double (int2l, int2h, &low, &hi);
1085 add_double (int1l, int1h, low, hi, &low, &hi);
1086 overflow = overflow_sum_sign (hi, int2h, int1h);
1087 t = build_int_2 (low, hi);
1091 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1092 t = build_int_2 (low, hi);
1095 case TRUNC_DIV_EXPR:
1096 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1097 case EXACT_DIV_EXPR:
1098 /* This is a shortcut for a common special case.
1099 It reduces the number of tree nodes generated
1101 if (int2h == 0 && int2l > 0
1102 && TREE_TYPE (arg1) == sizetype
1103 && int1h == 0 && int1l >= 0)
1105 if (code == CEIL_DIV_EXPR)
1107 return size_int (int1l / int2l);
1109 case ROUND_DIV_EXPR:
1110 if (int2h == 0 && int2l == 1)
1112 t = build_int_2 (int1l, int1h);
1115 if (int1l == int2l && int1h == int2h)
1117 if ((int1l | int1h) == 0)
1119 t = build_int_2 (1, 0);
1122 overflow = div_and_round_double (code, uns,
1123 int1l, int1h, int2l, int2h,
1124 &low, &hi, &garbagel, &garbageh);
1125 t = build_int_2 (low, hi);
1128 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1129 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1130 overflow = div_and_round_double (code, uns,
1131 int1l, int1h, int2l, int2h,
1132 &garbagel, &garbageh, &low, &hi);
1133 t = build_int_2 (low, hi);
1140 low = (((unsigned HOST_WIDE_INT) int1h
1141 < (unsigned HOST_WIDE_INT) int2h)
1142 || (((unsigned HOST_WIDE_INT) int1h
1143 == (unsigned HOST_WIDE_INT) int2h)
1144 && ((unsigned HOST_WIDE_INT) int1l
1145 < (unsigned HOST_WIDE_INT) int2l)));
1149 low = ((int1h < int2h)
1150 || ((int1h == int2h)
1151 && ((unsigned HOST_WIDE_INT) int1l
1152 < (unsigned HOST_WIDE_INT) int2l)));
1154 if (low == (code == MIN_EXPR))
1155 t = build_int_2 (int1l, int1h);
1157 t = build_int_2 (int2l, int2h);
1164 TREE_TYPE (t) = TREE_TYPE (arg1);
1166 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1167 | TREE_OVERFLOW (arg1)
1168 | TREE_OVERFLOW (arg2));
1169 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1170 | TREE_CONSTANT_OVERFLOW (arg1)
1171 | TREE_CONSTANT_OVERFLOW (arg2));
1174 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1175 if (TREE_CODE (arg1) == REAL_CST)
1180 REAL_VALUE_TYPE value;
1183 d1 = TREE_REAL_CST (arg1);
1184 d2 = TREE_REAL_CST (arg2);
1186 /* If either operand is a NaN, just return it. Otherwise, set up
1187 for floating-point trap; we return an overflow. */
1188 if (REAL_VALUE_ISNAN (d1))
1190 else if (REAL_VALUE_ISNAN (d2))
1192 else if (setjmp (float_error))
1194 t = copy_node (arg1);
1199 set_float_handler (float_error);
1201 #ifdef REAL_ARITHMETIC
1202 REAL_ARITHMETIC (value, code, d1, d2);
1219 #ifndef REAL_INFINITY
1228 value = MIN (d1, d2);
1232 value = MAX (d1, d2);
1238 #endif /* no REAL_ARITHMETIC */
1239 t = build_real (TREE_TYPE (arg1),
1240 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1242 set_float_handler (NULL_PTR);
1245 = (force_fit_type (t, overflow)
1246 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1247 TREE_CONSTANT_OVERFLOW (t)
1249 | TREE_CONSTANT_OVERFLOW (arg1)
1250 | TREE_CONSTANT_OVERFLOW (arg2);
1253 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1254 if (TREE_CODE (arg1) == COMPLEX_CST)
1256 register tree r1 = TREE_REALPART (arg1);
1257 register tree i1 = TREE_IMAGPART (arg1);
1258 register tree r2 = TREE_REALPART (arg2);
1259 register tree i2 = TREE_IMAGPART (arg2);
1265 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1266 const_binop (PLUS_EXPR, i1, i2, notrunc));
1270 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1271 const_binop (MINUS_EXPR, i1, i2, notrunc));
1275 t = build_complex (const_binop (MINUS_EXPR,
1276 const_binop (MULT_EXPR,
1278 const_binop (MULT_EXPR,
1281 const_binop (PLUS_EXPR,
1282 const_binop (MULT_EXPR,
1284 const_binop (MULT_EXPR,
1291 register tree magsquared
1292 = const_binop (PLUS_EXPR,
1293 const_binop (MULT_EXPR, r2, r2, notrunc),
1294 const_binop (MULT_EXPR, i2, i2, notrunc),
1298 (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1299 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1300 const_binop (PLUS_EXPR,
1301 const_binop (MULT_EXPR, r1, r2,
1303 const_binop (MULT_EXPR, i1, i2,
1306 magsquared, notrunc),
1307 const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1308 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1309 const_binop (MINUS_EXPR,
1310 const_binop (MULT_EXPR, i1, r2,
1312 const_binop (MULT_EXPR, r1, i2,
1315 magsquared, notrunc));
1322 TREE_TYPE (t) = TREE_TYPE (arg1);
1328 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1332 unsigned int number;
1335 /* Type-size nodes already made for small sizes. */
1336 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1338 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1339 && size_table[number] != 0)
1340 return size_table[number];
1341 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1343 push_obstacks_nochange ();
1344 /* Make this a permanent node. */
1345 end_temporary_allocation ();
1346 t = build_int_2 (number, 0);
1347 TREE_TYPE (t) = sizetype;
1348 size_table[number] = t;
1353 t = build_int_2 (number, 0);
1354 TREE_TYPE (t) = sizetype;
1359 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1360 CODE is a tree code. Data type is taken from `sizetype',
1361 If the operands are constant, so is the result. */
1364 size_binop (code, arg0, arg1)
1365 enum tree_code code;
1368 /* Handle the special case of two integer constants faster. */
1369 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1371 /* And some specific cases even faster than that. */
1372 if (code == PLUS_EXPR
1373 && TREE_INT_CST_LOW (arg0) == 0
1374 && TREE_INT_CST_HIGH (arg0) == 0)
1376 if (code == MINUS_EXPR
1377 && TREE_INT_CST_LOW (arg1) == 0
1378 && TREE_INT_CST_HIGH (arg1) == 0)
1380 if (code == MULT_EXPR
1381 && TREE_INT_CST_LOW (arg0) == 1
1382 && TREE_INT_CST_HIGH (arg0) == 0)
1384 /* Handle general case of two integer constants. */
1385 return const_binop (code, arg0, arg1, 1);
1388 if (arg0 == error_mark_node || arg1 == error_mark_node)
1389 return error_mark_node;
1391 return fold (build (code, sizetype, arg0, arg1));
1394 /* Given T, a tree representing type conversion of ARG1, a constant,
1395 return a constant tree representing the result of conversion. */
1398 fold_convert (t, arg1)
1402 register tree type = TREE_TYPE (t);
1405 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1407 if (TREE_CODE (arg1) == INTEGER_CST)
1409 /* If we would build a constant wider than GCC supports,
1410 leave the conversion unfolded. */
1411 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1414 /* Given an integer constant, make new constant with new type,
1415 appropriately sign-extended or truncated. */
1416 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1417 TREE_INT_CST_HIGH (arg1));
1418 TREE_TYPE (t) = type;
1419 /* Indicate an overflow if (1) ARG1 already overflowed,
1420 or (2) force_fit_type indicates an overflow.
1421 Tell force_fit_type that an overflow has already occurred
1422 if ARG1 is a too-large unsigned value and T is signed. */
1424 = (TREE_OVERFLOW (arg1)
1425 | force_fit_type (t,
1426 (TREE_INT_CST_HIGH (arg1) < 0
1427 & (TREE_UNSIGNED (type)
1428 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1429 TREE_CONSTANT_OVERFLOW (t)
1430 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1432 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1433 else if (TREE_CODE (arg1) == REAL_CST)
1435 /* Don't initialize these, use assignments.
1436 Initialized local aggregates don't work on old compilers. */
1441 x = TREE_REAL_CST (arg1);
1442 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1443 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1444 /* See if X will be in range after truncation towards 0.
1445 To compensate for truncation, move the bounds away from 0,
1446 but reject if X exactly equals the adjusted bounds. */
1447 #ifdef REAL_ARITHMETIC
1448 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1449 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1454 /* If X is a NaN, use zero instead and show we have an overflow.
1455 Otherwise, range check. */
1456 if (REAL_VALUE_ISNAN (x))
1457 overflow = 1, x = dconst0;
1458 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1461 #ifndef REAL_ARITHMETIC
1463 HOST_WIDE_INT low, high;
1464 HOST_WIDE_INT half_word
1465 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1470 high = (HOST_WIDE_INT) (x / half_word / half_word);
1471 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1472 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1474 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1475 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1478 low = (HOST_WIDE_INT) x;
1479 if (TREE_REAL_CST (arg1) < 0)
1480 neg_double (low, high, &low, &high);
1481 t = build_int_2 (low, high);
1485 HOST_WIDE_INT low, high;
1486 REAL_VALUE_TO_INT (&low, &high, x);
1487 t = build_int_2 (low, high);
1490 TREE_TYPE (t) = type;
1492 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1493 TREE_CONSTANT_OVERFLOW (t)
1494 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1496 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1497 TREE_TYPE (t) = type;
1499 else if (TREE_CODE (type) == REAL_TYPE)
1501 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1502 if (TREE_CODE (arg1) == INTEGER_CST)
1503 return build_real_from_int_cst (type, arg1);
1504 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1505 if (TREE_CODE (arg1) == REAL_CST)
1507 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1509 else if (setjmp (float_error))
1512 t = copy_node (arg1);
1515 set_float_handler (float_error);
1517 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1518 TREE_REAL_CST (arg1)));
1519 set_float_handler (NULL_PTR);
1523 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1524 TREE_CONSTANT_OVERFLOW (t)
1525 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1529 TREE_CONSTANT (t) = 1;
1533 /* Return an expr equal to X but certainly not valid as an lvalue.
1534 Also make sure it is not valid as an null pointer constant. */
1542 /* These things are certainly not lvalues. */
1543 if (TREE_CODE (x) == NON_LVALUE_EXPR
1544 || TREE_CODE (x) == INTEGER_CST
1545 || TREE_CODE (x) == REAL_CST
1546 || TREE_CODE (x) == STRING_CST
1547 || TREE_CODE (x) == ADDR_EXPR)
1549 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1551 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1552 so convert_for_assignment won't strip it.
1553 This is so this 0 won't be treated as a null pointer constant. */
1554 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1555 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1561 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1562 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1566 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
1567 Zero means allow extended lvalues. */
1569 int pedantic_lvalues;
1571 /* When pedantic, return an expr equal to X but certainly not valid as a
1572 pedantic lvalue. Otherwise, return X. */
1575 pedantic_non_lvalue (x)
1578 if (pedantic_lvalues)
1579 return non_lvalue (x);
1584 /* Given a tree comparison code, return the code that is the logical inverse
1585 of the given code. It is not safe to do this for floating-point
1586 comparisons, except for NE_EXPR and EQ_EXPR. */
1588 static enum tree_code
1589 invert_tree_comparison (code)
1590 enum tree_code code;
1611 /* Similar, but return the comparison that results if the operands are
1612 swapped. This is safe for floating-point. */
1614 static enum tree_code
1615 swap_tree_comparison (code)
1616 enum tree_code code;
1636 /* Return nonzero if CODE is a tree code that represents a truth value. */
1639 truth_value_p (code)
1640 enum tree_code code;
1642 return (TREE_CODE_CLASS (code) == '<'
1643 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1644 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1645 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1648 /* Return nonzero if two operands are necessarily equal.
1649 If ONLY_CONST is non-zero, only return non-zero for constants.
1650 This function tests whether the operands are indistinguishable;
1651 it does not test whether they are equal using C's == operation.
1652 The distinction is important for IEEE floating point, because
1653 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1654 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1657 operand_equal_p (arg0, arg1, only_const)
1661 /* If both types don't have the same signedness, then we can't consider
1662 them equal. We must check this before the STRIP_NOPS calls
1663 because they may change the signedness of the arguments. */
1664 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1670 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1671 We don't care about side effects in that case because the SAVE_EXPR
1672 takes care of that for us. */
1673 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1674 return ! only_const;
1676 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1679 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1680 && TREE_CODE (arg0) == ADDR_EXPR
1681 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1684 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1685 && TREE_CODE (arg0) == INTEGER_CST
1686 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1687 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1690 /* Detect when real constants are equal. */
1691 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1692 && TREE_CODE (arg0) == REAL_CST)
1693 return !bcmp ((char *) &TREE_REAL_CST (arg0),
1694 (char *) &TREE_REAL_CST (arg1),
1695 sizeof (REAL_VALUE_TYPE));
1703 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1705 /* This is needed for conversions and for COMPONENT_REF.
1706 Might as well play it safe and always test this. */
1707 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1710 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1713 /* Two conversions are equal only if signedness and modes match. */
1714 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1715 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1716 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1719 return operand_equal_p (TREE_OPERAND (arg0, 0),
1720 TREE_OPERAND (arg1, 0), 0);
1724 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1725 TREE_OPERAND (arg1, 0), 0)
1726 && operand_equal_p (TREE_OPERAND (arg0, 1),
1727 TREE_OPERAND (arg1, 1), 0));
1730 switch (TREE_CODE (arg0))
1733 return operand_equal_p (TREE_OPERAND (arg0, 0),
1734 TREE_OPERAND (arg1, 0), 0);
1738 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1739 TREE_OPERAND (arg1, 0), 0)
1740 && operand_equal_p (TREE_OPERAND (arg0, 1),
1741 TREE_OPERAND (arg1, 1), 0));
1744 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1745 TREE_OPERAND (arg1, 0), 0)
1746 && operand_equal_p (TREE_OPERAND (arg0, 1),
1747 TREE_OPERAND (arg1, 1), 0)
1748 && operand_equal_p (TREE_OPERAND (arg0, 2),
1749 TREE_OPERAND (arg1, 2), 0));
1757 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1758 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1760 When in doubt, return 0. */
1763 operand_equal_for_comparison_p (arg0, arg1, other)
1767 int unsignedp1, unsignedpo;
1768 tree primarg1, primother;
1769 unsigned correct_width;
1771 if (operand_equal_p (arg0, arg1, 0))
1774 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1777 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1778 actual comparison operand, ARG0.
1780 First throw away any conversions to wider types
1781 already present in the operands. */
1783 primarg1 = get_narrower (arg1, &unsignedp1);
1784 primother = get_narrower (other, &unsignedpo);
1786 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1787 if (unsignedp1 == unsignedpo
1788 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1789 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1791 tree type = TREE_TYPE (arg0);
1793 /* Make sure shorter operand is extended the right way
1794 to match the longer operand. */
1795 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1796 TREE_TYPE (primarg1)),
1799 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1806 /* See if ARG is an expression that is either a comparison or is performing
1807 arithmetic on comparisons. The comparisons must only be comparing
1808 two different values, which will be stored in *CVAL1 and *CVAL2; if
1809 they are non-zero it means that some operands have already been found.
1810 No variables may be used anywhere else in the expression except in the
1811 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1812 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1814 If this is true, return 1. Otherwise, return zero. */
1817 twoval_comparison_p (arg, cval1, cval2, save_p)
1819 tree *cval1, *cval2;
1822 enum tree_code code = TREE_CODE (arg);
1823 char class = TREE_CODE_CLASS (code);
1825 /* We can handle some of the 'e' cases here. */
1826 if (class == 'e' && code == TRUTH_NOT_EXPR)
1828 else if (class == 'e'
1829 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1830 || code == COMPOUND_EXPR))
1833 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1834 the expression. There may be no way to make this work, but it needs
1835 to be looked at again for 2.6. */
1837 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1839 /* If we've already found a CVAL1 or CVAL2, this expression is
1840 two complex to handle. */
1841 if (*cval1 || *cval2)
1852 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1855 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1856 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1857 cval1, cval2, save_p));
1863 if (code == COND_EXPR)
1864 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1865 cval1, cval2, save_p)
1866 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1867 cval1, cval2, save_p)
1868 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1869 cval1, cval2, save_p));
1873 /* First see if we can handle the first operand, then the second. For
1874 the second operand, we know *CVAL1 can't be zero. It must be that
1875 one side of the comparison is each of the values; test for the
1876 case where this isn't true by failing if the two operands
1879 if (operand_equal_p (TREE_OPERAND (arg, 0),
1880 TREE_OPERAND (arg, 1), 0))
1884 *cval1 = TREE_OPERAND (arg, 0);
1885 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1887 else if (*cval2 == 0)
1888 *cval2 = TREE_OPERAND (arg, 0);
1889 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1894 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1896 else if (*cval2 == 0)
1897 *cval2 = TREE_OPERAND (arg, 1);
1898 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1909 /* ARG is a tree that is known to contain just arithmetic operations and
1910 comparisons. Evaluate the operations in the tree substituting NEW0 for
1911 any occurrence of OLD0 as an operand of a comparison and likewise for
1915 eval_subst (arg, old0, new0, old1, new1)
1917 tree old0, new0, old1, new1;
1919 tree type = TREE_TYPE (arg);
1920 enum tree_code code = TREE_CODE (arg);
1921 char class = TREE_CODE_CLASS (code);
1923 /* We can handle some of the 'e' cases here. */
1924 if (class == 'e' && code == TRUTH_NOT_EXPR)
1926 else if (class == 'e'
1927 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1933 return fold (build1 (code, type,
1934 eval_subst (TREE_OPERAND (arg, 0),
1935 old0, new0, old1, new1)));
1938 return fold (build (code, type,
1939 eval_subst (TREE_OPERAND (arg, 0),
1940 old0, new0, old1, new1),
1941 eval_subst (TREE_OPERAND (arg, 1),
1942 old0, new0, old1, new1)));
1948 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1951 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1954 return fold (build (code, type,
1955 eval_subst (TREE_OPERAND (arg, 0),
1956 old0, new0, old1, new1),
1957 eval_subst (TREE_OPERAND (arg, 1),
1958 old0, new0, old1, new1),
1959 eval_subst (TREE_OPERAND (arg, 2),
1960 old0, new0, old1, new1)));
1965 tree arg0 = TREE_OPERAND (arg, 0);
1966 tree arg1 = TREE_OPERAND (arg, 1);
1968 /* We need to check both for exact equality and tree equality. The
1969 former will be true if the operand has a side-effect. In that
1970 case, we know the operand occurred exactly once. */
1972 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1974 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1977 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1979 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1982 return fold (build (code, type, arg0, arg1));
1989 /* Return a tree for the case when the result of an expression is RESULT
1990 converted to TYPE and OMITTED was previously an operand of the expression
1991 but is now not needed (e.g., we folded OMITTED * 0).
1993 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1994 the conversion of RESULT to TYPE. */
1997 omit_one_operand (type, result, omitted)
1998 tree type, result, omitted;
2000 tree t = convert (type, result);
2002 if (TREE_SIDE_EFFECTS (omitted))
2003 return build (COMPOUND_EXPR, type, omitted, t);
2005 return non_lvalue (t);
2008 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
2011 pedantic_omit_one_operand (type, result, omitted)
2012 tree type, result, omitted;
2014 tree t = convert (type, result);
2016 if (TREE_SIDE_EFFECTS (omitted))
2017 return build (COMPOUND_EXPR, type, omitted, t);
2019 return pedantic_non_lvalue (t);
2024 /* Return a simplified tree node for the truth-negation of ARG. This
2025 never alters ARG itself. We assume that ARG is an operation that
2026 returns a truth value (0 or 1). */
2029 invert_truthvalue (arg)
2032 tree type = TREE_TYPE (arg);
2033 enum tree_code code = TREE_CODE (arg);
2035 if (code == ERROR_MARK)
2038 /* If this is a comparison, we can simply invert it, except for
2039 floating-point non-equality comparisons, in which case we just
2040 enclose a TRUTH_NOT_EXPR around what we have. */
2042 if (TREE_CODE_CLASS (code) == '<')
2044 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2045 && code != NE_EXPR && code != EQ_EXPR)
2046 return build1 (TRUTH_NOT_EXPR, type, arg);
2048 return build (invert_tree_comparison (code), type,
2049 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2055 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2056 && TREE_INT_CST_HIGH (arg) == 0, 0));
2058 case TRUTH_AND_EXPR:
2059 return build (TRUTH_OR_EXPR, type,
2060 invert_truthvalue (TREE_OPERAND (arg, 0)),
2061 invert_truthvalue (TREE_OPERAND (arg, 1)));
2064 return build (TRUTH_AND_EXPR, type,
2065 invert_truthvalue (TREE_OPERAND (arg, 0)),
2066 invert_truthvalue (TREE_OPERAND (arg, 1)));
2068 case TRUTH_XOR_EXPR:
2069 /* Here we can invert either operand. We invert the first operand
2070 unless the second operand is a TRUTH_NOT_EXPR in which case our
2071 result is the XOR of the first operand with the inside of the
2072 negation of the second operand. */
2074 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2075 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2076 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2078 return build (TRUTH_XOR_EXPR, type,
2079 invert_truthvalue (TREE_OPERAND (arg, 0)),
2080 TREE_OPERAND (arg, 1));
2082 case TRUTH_ANDIF_EXPR:
2083 return build (TRUTH_ORIF_EXPR, type,
2084 invert_truthvalue (TREE_OPERAND (arg, 0)),
2085 invert_truthvalue (TREE_OPERAND (arg, 1)));
2087 case TRUTH_ORIF_EXPR:
2088 return build (TRUTH_ANDIF_EXPR, type,
2089 invert_truthvalue (TREE_OPERAND (arg, 0)),
2090 invert_truthvalue (TREE_OPERAND (arg, 1)));
2092 case TRUTH_NOT_EXPR:
2093 return TREE_OPERAND (arg, 0);
2096 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2097 invert_truthvalue (TREE_OPERAND (arg, 1)),
2098 invert_truthvalue (TREE_OPERAND (arg, 2)));
2101 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2102 invert_truthvalue (TREE_OPERAND (arg, 1)));
2104 case NON_LVALUE_EXPR:
2105 return invert_truthvalue (TREE_OPERAND (arg, 0));
2110 return build1 (TREE_CODE (arg), type,
2111 invert_truthvalue (TREE_OPERAND (arg, 0)));
2114 if (!integer_onep (TREE_OPERAND (arg, 1)))
2116 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2119 return build1 (TRUTH_NOT_EXPR, type, arg);
2121 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2123 return build1 (TRUTH_NOT_EXPR, type, arg);
2126 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2127 operands are another bit-wise operation with a common input. If so,
2128 distribute the bit operations to save an operation and possibly two if
2129 constants are involved. For example, convert
2130 (A | B) & (A | C) into A | (B & C)
2131 Further simplification will occur if B and C are constants.
2133 If this optimization cannot be done, 0 will be returned. */
2136 distribute_bit_expr (code, type, arg0, arg1)
2137 enum tree_code code;
2144 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2145 || TREE_CODE (arg0) == code
2146 || (TREE_CODE (arg0) != BIT_AND_EXPR
2147 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2150 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2152 common = TREE_OPERAND (arg0, 0);
2153 left = TREE_OPERAND (arg0, 1);
2154 right = TREE_OPERAND (arg1, 1);
2156 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2158 common = TREE_OPERAND (arg0, 0);
2159 left = TREE_OPERAND (arg0, 1);
2160 right = TREE_OPERAND (arg1, 0);
2162 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2164 common = TREE_OPERAND (arg0, 1);
2165 left = TREE_OPERAND (arg0, 0);
2166 right = TREE_OPERAND (arg1, 1);
2168 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2170 common = TREE_OPERAND (arg0, 1);
2171 left = TREE_OPERAND (arg0, 0);
2172 right = TREE_OPERAND (arg1, 0);
2177 return fold (build (TREE_CODE (arg0), type, common,
2178 fold (build (code, type, left, right))));
2181 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2182 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2185 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2188 int bitsize, bitpos;
2191 tree result = build (BIT_FIELD_REF, type, inner,
2192 size_int (bitsize), size_int (bitpos));
2194 TREE_UNSIGNED (result) = unsignedp;
2199 /* Optimize a bit-field compare.
2201 There are two cases: First is a compare against a constant and the
2202 second is a comparison of two items where the fields are at the same
2203 bit position relative to the start of a chunk (byte, halfword, word)
2204 large enough to contain it. In these cases we can avoid the shift
2205 implicit in bitfield extractions.
2207 For constants, we emit a compare of the shifted constant with the
2208 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2209 compared. For two fields at the same position, we do the ANDs with the
2210 similar mask and compare the result of the ANDs.
2212 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2213 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2214 are the left and right operands of the comparison, respectively.
2216 If the optimization described above can be done, we return the resulting
2217 tree. Otherwise we return zero. */
2220 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2221 enum tree_code code;
2225 int lbitpos, lbitsize, rbitpos, rbitsize;
2226 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2227 tree type = TREE_TYPE (lhs);
2228 tree signed_type, unsigned_type;
2229 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2230 enum machine_mode lmode, rmode, lnmode, rnmode;
2231 int lunsignedp, runsignedp;
2232 int lvolatilep = 0, rvolatilep = 0;
2233 tree linner, rinner;
2237 /* Get all the information about the extractions being done. If the bit size
2238 if the same as the size of the underlying object, we aren't doing an
2239 extraction at all and so can do nothing. */
2240 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2241 &lunsignedp, &lvolatilep);
2242 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2248 /* If this is not a constant, we can only do something if bit positions,
2249 sizes, and signedness are the same. */
2250 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2251 &rmode, &runsignedp, &rvolatilep);
2253 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2254 || lunsignedp != runsignedp || offset != 0)
2258 /* See if we can find a mode to refer to this field. We should be able to,
2259 but fail if we can't. */
2260 lnmode = get_best_mode (lbitsize, lbitpos,
2261 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2263 if (lnmode == VOIDmode)
2266 /* Set signed and unsigned types of the precision of this mode for the
2268 signed_type = type_for_mode (lnmode, 0);
2269 unsigned_type = type_for_mode (lnmode, 1);
2273 rnmode = get_best_mode (rbitsize, rbitpos,
2274 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2276 if (rnmode == VOIDmode)
2280 /* Compute the bit position and size for the new reference and our offset
2281 within it. If the new reference is the same size as the original, we
2282 won't optimize anything, so return zero. */
2283 lnbitsize = GET_MODE_BITSIZE (lnmode);
2284 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2285 lbitpos -= lnbitpos;
2286 if (lnbitsize == lbitsize)
2291 rnbitsize = GET_MODE_BITSIZE (rnmode);
2292 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2293 rbitpos -= rnbitpos;
2294 if (rnbitsize == rbitsize)
2298 if (BYTES_BIG_ENDIAN)
2299 lbitpos = lnbitsize - lbitsize - lbitpos;
2301 /* Make the mask to be used against the extracted field. */
2302 mask = build_int_2 (~0, ~0);
2303 TREE_TYPE (mask) = unsigned_type;
2304 force_fit_type (mask, 0);
2305 mask = convert (unsigned_type, mask);
2306 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2307 mask = const_binop (RSHIFT_EXPR, mask,
2308 size_int (lnbitsize - lbitsize - lbitpos), 0);
2311 /* If not comparing with constant, just rework the comparison
2313 return build (code, compare_type,
2314 build (BIT_AND_EXPR, unsigned_type,
2315 make_bit_field_ref (linner, unsigned_type,
2316 lnbitsize, lnbitpos, 1),
2318 build (BIT_AND_EXPR, unsigned_type,
2319 make_bit_field_ref (rinner, unsigned_type,
2320 rnbitsize, rnbitpos, 1),
2323 /* Otherwise, we are handling the constant case. See if the constant is too
2324 big for the field. Warn and return a tree of for 0 (false) if so. We do
2325 this not only for its own sake, but to avoid having to test for this
2326 error case below. If we didn't, we might generate wrong code.
2328 For unsigned fields, the constant shifted right by the field length should
2329 be all zero. For signed fields, the high-order bits should agree with
2334 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2335 convert (unsigned_type, rhs),
2336 size_int (lbitsize), 0)))
2338 warning ("comparison is always %s due to width of bitfield",
2339 code == NE_EXPR ? "one" : "zero");
2340 return convert (compare_type,
2342 ? integer_one_node : integer_zero_node));
2347 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2348 size_int (lbitsize - 1), 0);
2349 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2351 warning ("comparison is always %s due to width of bitfield",
2352 code == NE_EXPR ? "one" : "zero");
2353 return convert (compare_type,
2355 ? integer_one_node : integer_zero_node));
2359 /* Single-bit compares should always be against zero. */
2360 if (lbitsize == 1 && ! integer_zerop (rhs))
2362 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2363 rhs = convert (type, integer_zero_node);
2366 /* Make a new bitfield reference, shift the constant over the
2367 appropriate number of bits and mask it with the computed mask
2368 (in case this was a signed field). If we changed it, make a new one. */
2369 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2372 TREE_SIDE_EFFECTS (lhs) = 1;
2373 TREE_THIS_VOLATILE (lhs) = 1;
2376 rhs = fold (const_binop (BIT_AND_EXPR,
2377 const_binop (LSHIFT_EXPR,
2378 convert (unsigned_type, rhs),
2379 size_int (lbitpos), 0),
2382 return build (code, compare_type,
2383 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2387 /* Subroutine for fold_truthop: decode a field reference.
2389 If EXP is a comparison reference, we return the innermost reference.
2391 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2392 set to the starting bit number.
2394 If the innermost field can be completely contained in a mode-sized
2395 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2397 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2398 otherwise it is not changed.
2400 *PUNSIGNEDP is set to the signedness of the field.
2402 *PMASK is set to the mask used. This is either contained in a
2403 BIT_AND_EXPR or derived from the width of the field.
2405 Return 0 if this is not a component reference or is one that we can't
2406 do anything with. */
2409 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2412 int *pbitsize, *pbitpos;
2413 enum machine_mode *pmode;
2414 int *punsignedp, *pvolatilep;
2418 tree mask, inner, offset;
2422 /* All the optimizations using this function assume integer fields.
2423 There are problems with FP fields since the type_for_size call
2424 below can fail for, e.g., XFmode. */
2425 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2430 if (TREE_CODE (exp) == BIT_AND_EXPR)
2432 and_mask = TREE_OPERAND (exp, 1);
2433 exp = TREE_OPERAND (exp, 0);
2434 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2435 if (TREE_CODE (and_mask) != INTEGER_CST)
2440 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2441 punsignedp, pvolatilep);
2442 if ((inner == exp && and_mask == 0)
2443 || *pbitsize < 0 || offset != 0)
2446 /* Compute the mask to access the bitfield. */
2447 unsigned_type = type_for_size (*pbitsize, 1);
2448 precision = TYPE_PRECISION (unsigned_type);
2450 mask = build_int_2 (~0, ~0);
2451 TREE_TYPE (mask) = unsigned_type;
2452 force_fit_type (mask, 0);
2453 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2454 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2456 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2458 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2459 convert (unsigned_type, and_mask), mask));
2465 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2469 all_ones_mask_p (mask, size)
2473 tree type = TREE_TYPE (mask);
2474 int precision = TYPE_PRECISION (type);
2477 tmask = build_int_2 (~0, ~0);
2478 TREE_TYPE (tmask) = signed_type (type);
2479 force_fit_type (tmask, 0);
2481 tree_int_cst_equal (mask,
2482 const_binop (RSHIFT_EXPR,
2483 const_binop (LSHIFT_EXPR, tmask,
2484 size_int (precision - size),
2486 size_int (precision - size), 0));
2489 /* Subroutine for fold_truthop: determine if an operand is simple enough
2490 to be evaluated unconditionally. */
2493 simple_operand_p (exp)
2496 /* Strip any conversions that don't change the machine mode. */
2497 while ((TREE_CODE (exp) == NOP_EXPR
2498 || TREE_CODE (exp) == CONVERT_EXPR)
2499 && (TYPE_MODE (TREE_TYPE (exp))
2500 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2501 exp = TREE_OPERAND (exp, 0);
2503 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2504 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2505 && ! TREE_ADDRESSABLE (exp)
2506 && ! TREE_THIS_VOLATILE (exp)
2507 && ! DECL_NONLOCAL (exp)
2508 /* Don't regard global variables as simple. They may be
2509 allocated in ways unknown to the compiler (shared memory,
2510 #pragma weak, etc). */
2511 && ! TREE_PUBLIC (exp)
2512 && ! DECL_EXTERNAL (exp)
2513 /* Loading a static variable is unduly expensive, but global
2514 registers aren't expensive. */
2515 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2518 /* Subroutine for fold_truthop: try to optimize a range test.
2520 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2522 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2523 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2524 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2527 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2528 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2529 larger than HI_CST (they may be equal).
2531 We return the simplified tree or 0 if no optimization is possible. */
2534 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2535 enum tree_code jcode, lo_code, hi_code;
2536 tree type, var, lo_cst, hi_cst;
2539 enum tree_code rcode;
2541 /* See if this is a range test and normalize the constant terms. */
2543 if (jcode == TRUTH_AND_EXPR)
2548 /* See if we have VAR != CST && VAR != CST+1. */
2549 if (! (hi_code == NE_EXPR
2550 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2551 && tree_int_cst_equal (integer_one_node,
2552 const_binop (MINUS_EXPR,
2553 hi_cst, lo_cst, 0))))
2561 if (hi_code == LT_EXPR)
2562 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2563 else if (hi_code != LE_EXPR)
2566 if (lo_code == GT_EXPR)
2567 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2569 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2582 /* See if we have VAR == CST || VAR == CST+1. */
2583 if (! (hi_code == EQ_EXPR
2584 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2585 && tree_int_cst_equal (integer_one_node,
2586 const_binop (MINUS_EXPR,
2587 hi_cst, lo_cst, 0))))
2595 if (hi_code == GE_EXPR)
2596 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2597 else if (hi_code != GT_EXPR)
2600 if (lo_code == LE_EXPR)
2601 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2603 /* We now have VAR < LO_CST || VAR > HI_CST. */
2612 /* When normalizing, it is possible to both increment the smaller constant
2613 and decrement the larger constant. See if they are still ordered. */
2614 if (tree_int_cst_lt (hi_cst, lo_cst))
2617 /* Fail if VAR isn't an integer. */
2618 utype = TREE_TYPE (var);
2619 if (! INTEGRAL_TYPE_P (utype))
2622 /* The range test is invalid if subtracting the two constants results
2623 in overflow. This can happen in traditional mode. */
2624 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2625 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2628 if (! TREE_UNSIGNED (utype))
2630 utype = unsigned_type (utype);
2631 var = convert (utype, var);
2632 lo_cst = convert (utype, lo_cst);
2633 hi_cst = convert (utype, hi_cst);
2636 return fold (convert (type,
2637 build (rcode, utype,
2638 build (MINUS_EXPR, utype, var, lo_cst),
2639 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2642 /* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P
2643 bit value. Arrange things so the extra bits will be set to zero if and]
2644 only if C is signed-extended to its full width. */
2647 unextend (c, p, unsignedp)
2652 tree type = TREE_TYPE (c);
2653 int modesize = GET_MODE_BITSIZE (TYPE_MODE (type));
2656 if (p == modesize || unsignedp)
2659 if (TREE_UNSIGNED (type))
2660 c = convert (signed_type (type), c);
2662 /* We work by getting just the sign bit into the low-order bit, then
2663 into the high-order bit, then sign-extened. We then XOR that value
2665 temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
2666 temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
2667 temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
2668 temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
2669 return convert (type, const_binop (BIT_XOR_EXPR, c, temp, 0));
2672 /* Find ways of folding logical expressions of LHS and RHS:
2673 Try to merge two comparisons to the same innermost item.
2674 Look for range tests like "ch >= '0' && ch <= '9'".
2675 Look for combinations of simple terms on machines with expensive branches
2676 and evaluate the RHS unconditionally.
2678 For example, if we have p->a == 2 && p->b == 4 and we can make an
2679 object large enough to span both A and B, we can do this with a comparison
2680 against the object ANDed with the a mask.
2682 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2683 operations to do this with one comparison.
2685 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2686 function and the one above.
2688 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2689 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2691 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2694 We return the simplified tree or 0 if no optimization is possible. */
2697 fold_truthop (code, truth_type, lhs, rhs)
2698 enum tree_code code;
2699 tree truth_type, lhs, rhs;
2701 /* If this is the "or" of two comparisons, we can do something if we
2702 the comparisons are NE_EXPR. If this is the "and", we can do something
2703 if the comparisons are EQ_EXPR. I.e.,
2704 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2706 WANTED_CODE is this operation code. For single bit fields, we can
2707 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2708 comparison for one-bit fields. */
2710 enum tree_code wanted_code;
2711 enum tree_code lcode, rcode;
2712 tree ll_arg, lr_arg, rl_arg, rr_arg;
2713 tree ll_inner, lr_inner, rl_inner, rr_inner;
2714 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2715 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2716 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2717 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2718 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2719 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2720 enum machine_mode lnmode, rnmode;
2721 tree ll_mask, lr_mask, rl_mask, rr_mask;
2722 tree l_const, r_const;
2724 int first_bit, end_bit;
2727 /* Start by getting the comparison codes and seeing if this looks like
2728 a range test. Fail if anything is volatile. If one operand is a
2729 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2732 if (TREE_SIDE_EFFECTS (lhs)
2733 || TREE_SIDE_EFFECTS (rhs))
2736 lcode = TREE_CODE (lhs);
2737 rcode = TREE_CODE (rhs);
2739 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2740 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2742 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2743 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2745 if (TREE_CODE_CLASS (lcode) != '<'
2746 || TREE_CODE_CLASS (rcode) != '<')
2749 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2750 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2752 ll_arg = TREE_OPERAND (lhs, 0);
2753 lr_arg = TREE_OPERAND (lhs, 1);
2754 rl_arg = TREE_OPERAND (rhs, 0);
2755 rr_arg = TREE_OPERAND (rhs, 1);
2757 if (TREE_CODE (lr_arg) == INTEGER_CST
2758 && TREE_CODE (rr_arg) == INTEGER_CST
2759 && operand_equal_p (ll_arg, rl_arg, 0))
2761 if (tree_int_cst_lt (lr_arg, rr_arg))
2762 result = range_test (code, truth_type, lcode, rcode,
2763 ll_arg, lr_arg, rr_arg);
2765 result = range_test (code, truth_type, rcode, lcode,
2766 ll_arg, rr_arg, lr_arg);
2768 /* If this isn't a range test, it also isn't a comparison that
2769 can be merged. However, it wins to evaluate the RHS unconditionally
2770 on machines with expensive branches. */
2772 if (result == 0 && BRANCH_COST >= 2)
2774 if (TREE_CODE (ll_arg) != VAR_DECL
2775 && TREE_CODE (ll_arg) != PARM_DECL)
2777 /* Avoid evaluating the variable part twice. */
2778 ll_arg = save_expr (ll_arg);
2779 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2780 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2782 return build (code, truth_type, lhs, rhs);
2787 /* If the RHS can be evaluated unconditionally and its operands are
2788 simple, it wins to evaluate the RHS unconditionally on machines
2789 with expensive branches. In this case, this isn't a comparison
2790 that can be merged. */
2792 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2793 are with zero (tmw). */
2795 if (BRANCH_COST >= 2
2796 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2797 && simple_operand_p (rl_arg)
2798 && simple_operand_p (rr_arg))
2799 return build (code, truth_type, lhs, rhs);
2801 /* See if the comparisons can be merged. Then get all the parameters for
2804 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2805 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2809 ll_inner = decode_field_reference (ll_arg,
2810 &ll_bitsize, &ll_bitpos, &ll_mode,
2811 &ll_unsignedp, &volatilep, &ll_mask);
2812 lr_inner = decode_field_reference (lr_arg,
2813 &lr_bitsize, &lr_bitpos, &lr_mode,
2814 &lr_unsignedp, &volatilep, &lr_mask);
2815 rl_inner = decode_field_reference (rl_arg,
2816 &rl_bitsize, &rl_bitpos, &rl_mode,
2817 &rl_unsignedp, &volatilep, &rl_mask);
2818 rr_inner = decode_field_reference (rr_arg,
2819 &rr_bitsize, &rr_bitpos, &rr_mode,
2820 &rr_unsignedp, &volatilep, &rr_mask);
2822 /* It must be true that the inner operation on the lhs of each
2823 comparison must be the same if we are to be able to do anything.
2824 Then see if we have constants. If not, the same must be true for
2826 if (volatilep || ll_inner == 0 || rl_inner == 0
2827 || ! operand_equal_p (ll_inner, rl_inner, 0))
2830 if (TREE_CODE (lr_arg) == INTEGER_CST
2831 && TREE_CODE (rr_arg) == INTEGER_CST)
2832 l_const = lr_arg, r_const = rr_arg;
2833 else if (lr_inner == 0 || rr_inner == 0
2834 || ! operand_equal_p (lr_inner, rr_inner, 0))
2837 l_const = r_const = 0;
2839 /* If either comparison code is not correct for our logical operation,
2840 fail. However, we can convert a one-bit comparison against zero into
2841 the opposite comparison against that bit being set in the field. */
2843 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2844 if (lcode != wanted_code)
2846 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2852 if (rcode != wanted_code)
2854 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2860 /* See if we can find a mode that contains both fields being compared on
2861 the left. If we can't, fail. Otherwise, update all constants and masks
2862 to be relative to a field of that size. */
2863 first_bit = MIN (ll_bitpos, rl_bitpos);
2864 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2865 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2866 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2868 if (lnmode == VOIDmode)
2871 lnbitsize = GET_MODE_BITSIZE (lnmode);
2872 lnbitpos = first_bit & ~ (lnbitsize - 1);
2873 type = type_for_size (lnbitsize, 1);
2874 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2876 if (BYTES_BIG_ENDIAN)
2878 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2879 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2882 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2883 size_int (xll_bitpos), 0);
2884 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2885 size_int (xrl_bitpos), 0);
2889 l_const = convert (type, unextend (l_const, ll_bitsize, ll_unsignedp));
2890 l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
2891 if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
2892 fold (build1 (BIT_NOT_EXPR,
2896 warning ("comparison is always %s",
2897 wanted_code == NE_EXPR ? "one" : "zero");
2899 return convert (truth_type,
2900 wanted_code == NE_EXPR
2901 ? integer_one_node : integer_zero_node);
2906 r_const = convert (type, unextend (r_const, rl_bitsize, rl_unsignedp));
2907 r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
2908 if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
2909 fold (build1 (BIT_NOT_EXPR,
2913 warning ("comparison is always %s",
2914 wanted_code == NE_EXPR ? "one" : "zero");
2916 return convert (truth_type,
2917 wanted_code == NE_EXPR
2918 ? integer_one_node : integer_zero_node);
2922 /* If the right sides are not constant, do the same for it. Also,
2923 disallow this optimization if a size or signedness mismatch occurs
2924 between the left and right sides. */
2927 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2928 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2929 /* Make sure the two fields on the right
2930 correspond to the left without being swapped. */
2931 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2934 first_bit = MIN (lr_bitpos, rr_bitpos);
2935 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2936 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2937 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2939 if (rnmode == VOIDmode)
2942 rnbitsize = GET_MODE_BITSIZE (rnmode);
2943 rnbitpos = first_bit & ~ (rnbitsize - 1);
2944 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2946 if (BYTES_BIG_ENDIAN)
2948 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2949 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2952 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2953 size_int (xlr_bitpos), 0);
2954 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2955 size_int (xrr_bitpos), 0);
2957 /* Make a mask that corresponds to both fields being compared.
2958 Do this for both items being compared. If the masks agree,
2959 we can do this by masking both and comparing the masked
2961 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2962 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2963 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2965 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2966 ll_unsignedp || rl_unsignedp);
2967 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2968 lr_unsignedp || rr_unsignedp);
2969 if (! all_ones_mask_p (ll_mask, lnbitsize))
2971 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2972 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2974 return build (wanted_code, truth_type, lhs, rhs);
2977 /* There is still another way we can do something: If both pairs of
2978 fields being compared are adjacent, we may be able to make a wider
2979 field containing them both. */
2980 if ((ll_bitsize + ll_bitpos == rl_bitpos
2981 && lr_bitsize + lr_bitpos == rr_bitpos)
2982 || (ll_bitpos == rl_bitpos + rl_bitsize
2983 && lr_bitpos == rr_bitpos + rr_bitsize))
2984 return build (wanted_code, truth_type,
2985 make_bit_field_ref (ll_inner, type,
2986 ll_bitsize + rl_bitsize,
2987 MIN (ll_bitpos, rl_bitpos),
2989 make_bit_field_ref (lr_inner, type,
2990 lr_bitsize + rr_bitsize,
2991 MIN (lr_bitpos, rr_bitpos),
2997 /* Handle the case of comparisons with constants. If there is something in
2998 common between the masks, those bits of the constants must be the same.
2999 If not, the condition is always false. Test for this to avoid generating
3000 incorrect code below. */
3001 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
3002 if (! integer_zerop (result)
3003 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
3004 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
3006 if (wanted_code == NE_EXPR)
3008 warning ("`or' of unmatched not-equal tests is always 1");
3009 return convert (truth_type, integer_one_node);
3013 warning ("`and' of mutually exclusive equal-tests is always zero");
3014 return convert (truth_type, integer_zero_node);
3018 /* Construct the expression we will return. First get the component
3019 reference we will make. Unless the mask is all ones the width of
3020 that field, perform the mask operation. Then compare with the
3022 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
3023 ll_unsignedp || rl_unsignedp);
3025 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
3026 if (! all_ones_mask_p (ll_mask, lnbitsize))
3027 result = build (BIT_AND_EXPR, type, result, ll_mask);
3029 return build (wanted_code, truth_type, result,
3030 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
3033 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
3034 S, a SAVE_EXPR, return the expression actually being evaluated. Note
3035 that we may sometimes modify the tree. */
3038 strip_compound_expr (t, s)
3042 tree type = TREE_TYPE (t);
3043 enum tree_code code = TREE_CODE (t);
3045 /* See if this is the COMPOUND_EXPR we want to eliminate. */
3046 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
3047 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
3048 return TREE_OPERAND (t, 1);
3050 /* See if this is a COND_EXPR or a simple arithmetic operator. We
3051 don't bother handling any other types. */
3052 else if (code == COND_EXPR)
3054 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3055 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3056 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
3058 else if (TREE_CODE_CLASS (code) == '1')
3059 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3060 else if (TREE_CODE_CLASS (code) == '<'
3061 || TREE_CODE_CLASS (code) == '2')
3063 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3064 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3070 /* Perform constant folding and related simplification of EXPR.
3071 The related simplifications include x*1 => x, x*0 => 0, etc.,
3072 and application of the associative law.
3073 NOP_EXPR conversions may be removed freely (as long as we
3074 are careful not to change the C type of the overall expression)
3075 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3076 but we can constant-fold them if they have constant operands. */
3082 register tree t = expr;
3083 tree t1 = NULL_TREE;
3085 tree type = TREE_TYPE (expr);
3086 register tree arg0, arg1;
3087 register enum tree_code code = TREE_CODE (t);
3091 /* WINS will be nonzero when the switch is done
3092 if all operands are constant. */
3096 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3097 if (code == RTL_EXPR)
3100 /* Return right away if already constant. */
3101 if (TREE_CONSTANT (t))
3103 if (code == CONST_DECL)
3104 return DECL_INITIAL (t);
3108 kind = TREE_CODE_CLASS (code);
3109 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3113 /* Special case for conversion ops that can have fixed point args. */
3114 arg0 = TREE_OPERAND (t, 0);
3116 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3118 STRIP_TYPE_NOPS (arg0);
3120 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3121 subop = TREE_REALPART (arg0);
3125 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3126 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3127 && TREE_CODE (subop) != REAL_CST
3128 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3130 /* Note that TREE_CONSTANT isn't enough:
3131 static var addresses are constant but we can't
3132 do arithmetic on them. */
3135 else if (kind == 'e' || kind == '<'
3136 || kind == '1' || kind == '2' || kind == 'r')
3138 register int len = tree_code_length[(int) code];
3140 for (i = 0; i < len; i++)
3142 tree op = TREE_OPERAND (t, i);
3146 continue; /* Valid for CALL_EXPR, at least. */
3148 if (kind == '<' || code == RSHIFT_EXPR)
3150 /* Signedness matters here. Perhaps we can refine this
3152 STRIP_TYPE_NOPS (op);
3156 /* Strip any conversions that don't change the mode. */
3160 if (TREE_CODE (op) == COMPLEX_CST)
3161 subop = TREE_REALPART (op);
3165 if (TREE_CODE (subop) != INTEGER_CST
3166 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3167 && TREE_CODE (subop) != REAL_CST
3168 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3170 /* Note that TREE_CONSTANT isn't enough:
3171 static var addresses are constant but we can't
3172 do arithmetic on them. */
3182 /* If this is a commutative operation, and ARG0 is a constant, move it
3183 to ARG1 to reduce the number of tests below. */
3184 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3185 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3186 || code == BIT_AND_EXPR)
3187 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3189 tem = arg0; arg0 = arg1; arg1 = tem;
3191 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3192 TREE_OPERAND (t, 1) = tem;
3195 /* Now WINS is set as described above,
3196 ARG0 is the first operand of EXPR,
3197 and ARG1 is the second operand (if it has more than one operand).
3199 First check for cases where an arithmetic operation is applied to a
3200 compound, conditional, or comparison operation. Push the arithmetic
3201 operation inside the compound or conditional to see if any folding
3202 can then be done. Convert comparison to conditional for this purpose.
3203 The also optimizes non-constant cases that used to be done in
3206 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3207 one of the operands is a comparison and the other is a comparison, a
3208 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3209 code below would make the expression more complex. Change it to a
3210 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3211 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3213 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3214 || code == EQ_EXPR || code == NE_EXPR)
3215 && ((truth_value_p (TREE_CODE (arg0))
3216 && (truth_value_p (TREE_CODE (arg1))
3217 || (TREE_CODE (arg1) == BIT_AND_EXPR
3218 && integer_onep (TREE_OPERAND (arg1, 1)))))
3219 || (truth_value_p (TREE_CODE (arg1))
3220 && (truth_value_p (TREE_CODE (arg0))
3221 || (TREE_CODE (arg0) == BIT_AND_EXPR
3222 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3224 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3225 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3229 if (code == EQ_EXPR)
3230 t = invert_truthvalue (t);
3235 if (TREE_CODE_CLASS (code) == '1')
3237 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3238 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3239 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3240 else if (TREE_CODE (arg0) == COND_EXPR)
3242 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3243 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3244 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3246 /* If this was a conversion, and all we did was to move into
3247 inside the COND_EXPR, bring it back out. But leave it if
3248 it is a conversion from integer to integer and the
3249 result precision is no wider than a word since such a
3250 conversion is cheap and may be optimized away by combine,
3251 while it couldn't if it were outside the COND_EXPR. Then return
3252 so we don't get into an infinite recursion loop taking the
3253 conversion out and then back in. */
3255 if ((code == NOP_EXPR || code == CONVERT_EXPR
3256 || code == NON_LVALUE_EXPR)
3257 && TREE_CODE (t) == COND_EXPR
3258 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3259 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3260 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3261 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
3262 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
3263 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
3264 && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
3265 t = build1 (code, type,
3267 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3268 TREE_OPERAND (t, 0),
3269 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3270 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3273 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3274 return fold (build (COND_EXPR, type, arg0,
3275 fold (build1 (code, type, integer_one_node)),
3276 fold (build1 (code, type, integer_zero_node))));
3278 else if (TREE_CODE_CLASS (code) == '2'
3279 || TREE_CODE_CLASS (code) == '<')
3281 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3282 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3283 fold (build (code, type,
3284 arg0, TREE_OPERAND (arg1, 1))));
3285 else if (TREE_CODE (arg1) == COND_EXPR
3286 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3288 tree test, true_value, false_value;
3290 if (TREE_CODE (arg1) == COND_EXPR)
3292 test = TREE_OPERAND (arg1, 0);
3293 true_value = TREE_OPERAND (arg1, 1);
3294 false_value = TREE_OPERAND (arg1, 2);
3299 true_value = integer_one_node;
3300 false_value = integer_zero_node;
3303 /* If ARG0 is complex we want to make sure we only evaluate
3304 it once. Though this is only required if it is volatile, it
3305 might be more efficient even if it is not. However, if we
3306 succeed in folding one part to a constant, we do not need
3307 to make this SAVE_EXPR. Since we do this optimization
3308 primarily to see if we do end up with constant and this
3309 SAVE_EXPR interfers with later optimizations, suppressing
3310 it when we can is important. */
3312 if (TREE_CODE (arg0) != SAVE_EXPR
3313 && ((TREE_CODE (arg0) != VAR_DECL
3314 && TREE_CODE (arg0) != PARM_DECL)
3315 || TREE_SIDE_EFFECTS (arg0)))
3317 tree lhs = fold (build (code, type, arg0, true_value));
3318 tree rhs = fold (build (code, type, arg0, false_value));
3320 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3321 return fold (build (COND_EXPR, type, test, lhs, rhs));
3323 arg0 = save_expr (arg0);
3326 test = fold (build (COND_EXPR, type, test,
3327 fold (build (code, type, arg0, true_value)),
3328 fold (build (code, type, arg0, false_value))));
3329 if (TREE_CODE (arg0) == SAVE_EXPR)
3330 return build (COMPOUND_EXPR, type,
3331 convert (void_type_node, arg0),
3332 strip_compound_expr (test, arg0));
3334 return convert (type, test);
3337 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3338 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3339 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3340 else if (TREE_CODE (arg0) == COND_EXPR
3341 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3343 tree test, true_value, false_value;
3345 if (TREE_CODE (arg0) == COND_EXPR)
3347 test = TREE_OPERAND (arg0, 0);
3348 true_value = TREE_OPERAND (arg0, 1);
3349 false_value = TREE_OPERAND (arg0, 2);
3354 true_value = integer_one_node;
3355 false_value = integer_zero_node;
3358 if (TREE_CODE (arg1) != SAVE_EXPR
3359 && ((TREE_CODE (arg1) != VAR_DECL
3360 && TREE_CODE (arg1) != PARM_DECL)
3361 || TREE_SIDE_EFFECTS (arg1)))
3363 tree lhs = fold (build (code, type, true_value, arg1));
3364 tree rhs = fold (build (code, type, false_value, arg1));
3366 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
3367 || TREE_CONSTANT (arg1))
3368 return fold (build (COND_EXPR, type, test, lhs, rhs));
3370 arg1 = save_expr (arg1);
3373 test = fold (build (COND_EXPR, type, test,
3374 fold (build (code, type, true_value, arg1)),
3375 fold (build (code, type, false_value, arg1))));
3376 if (TREE_CODE (arg1) == SAVE_EXPR)
3377 return build (COMPOUND_EXPR, type,
3378 convert (void_type_node, arg1),
3379 strip_compound_expr (test, arg1));
3381 return convert (type, test);
3384 else if (TREE_CODE_CLASS (code) == '<'
3385 && TREE_CODE (arg0) == COMPOUND_EXPR)
3386 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3387 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3388 else if (TREE_CODE_CLASS (code) == '<'
3389 && TREE_CODE (arg1) == COMPOUND_EXPR)
3390 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3391 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3403 return fold (DECL_INITIAL (t));
3408 case FIX_TRUNC_EXPR:
3409 /* Other kinds of FIX are not handled properly by fold_convert. */
3411 if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
3412 return TREE_OPERAND (t, 0);
3414 /* Handle cases of two conversions in a row. */
3415 if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3416 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3418 tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3419 tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0));
3420 tree final_type = TREE_TYPE (t);
3421 int inside_int = INTEGRAL_TYPE_P (inside_type);
3422 int inside_ptr = POINTER_TYPE_P (inside_type);
3423 int inside_float = FLOAT_TYPE_P (inside_type);
3424 int inside_prec = TYPE_PRECISION (inside_type);
3425 int inside_unsignedp = TREE_UNSIGNED (inside_type);
3426 int inter_int = INTEGRAL_TYPE_P (inter_type);
3427 int inter_ptr = POINTER_TYPE_P (inter_type);
3428 int inter_float = FLOAT_TYPE_P (inter_type);
3429 int inter_prec = TYPE_PRECISION (inter_type);
3430 int inter_unsignedp = TREE_UNSIGNED (inter_type);
3431 int final_int = INTEGRAL_TYPE_P (final_type);
3432 int final_ptr = POINTER_TYPE_P (final_type);
3433 int final_float = FLOAT_TYPE_P (final_type);
3434 int final_prec = TYPE_PRECISION (final_type);
3435 int final_unsignedp = TREE_UNSIGNED (final_type);
3437 /* In addition to the cases of two conversions in a row
3438 handled below, if we are converting something to its own
3439 type via an object of identical or wider precision, neither
3440 conversion is needed. */
3441 if (inside_type == final_type
3442 && ((inter_int && final_int) || (inter_float && final_float))
3443 && inter_prec >= final_prec)
3444 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3446 /* Likewise, if the intermediate and final types are either both
3447 float or both integer, we don't need the middle conversion if
3448 it is wider than the final type and doesn't change the signedness
3449 (for integers). Avoid this if the final type is a pointer
3450 since then we sometimes need the inner conversion. */
3451 if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
3452 || (inter_float && inside_float))
3453 && inter_prec >= inside_prec
3454 && (inter_float || inter_unsignedp == inside_unsignedp)
3456 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3458 /* Two conversions in a row are not needed unless:
3459 - some conversion is floating-point (overstrict for now), or
3460 - the intermediate type is narrower than both initial and
3462 - the intermediate type and innermost type differ in signedness,
3463 and the outermost type is wider than the intermediate, or
3464 - the initial type is a pointer type and the precisions of the
3465 intermediate and final types differ, or
3466 - the final type is a pointer type and the precisions of the
3467 initial and intermediate types differ. */
3468 if (! inside_float && ! inter_float && ! final_float
3469 && (inter_prec > inside_prec || inter_prec > final_prec)
3470 && ! (inside_int && inter_int
3471 && inter_unsignedp != inside_unsignedp
3472 && inter_prec < final_prec)
3473 && ((inter_unsignedp && inter_prec > inside_prec)
3474 == (final_unsignedp && final_prec > inter_prec))
3475 && ! (inside_ptr && inter_prec != final_prec)
3476 && ! (final_ptr && inside_prec != inter_prec))
3477 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3480 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3481 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3482 /* Detect assigning a bitfield. */
3483 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3484 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3486 /* Don't leave an assignment inside a conversion
3487 unless assigning a bitfield. */
3488 tree prev = TREE_OPERAND (t, 0);
3489 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3490 /* First do the assignment, then return converted constant. */
3491 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3497 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3500 return fold_convert (t, arg0);
3502 #if 0 /* This loses on &"foo"[0]. */
3507 /* Fold an expression like: "foo"[2] */
3508 if (TREE_CODE (arg0) == STRING_CST
3509 && TREE_CODE (arg1) == INTEGER_CST
3510 && !TREE_INT_CST_HIGH (arg1)
3511 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3513 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3514 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3515 force_fit_type (t, 0);
3522 if (TREE_CODE (arg0) == CONSTRUCTOR)
3524 tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
3531 TREE_CONSTANT (t) = wins;
3537 if (TREE_CODE (arg0) == INTEGER_CST)
3539 HOST_WIDE_INT low, high;
3540 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3541 TREE_INT_CST_HIGH (arg0),
3543 t = build_int_2 (low, high);
3544 TREE_TYPE (t) = type;
3546 = (TREE_OVERFLOW (arg0)
3547 | force_fit_type (t, overflow));
3548 TREE_CONSTANT_OVERFLOW (t)
3549 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3551 else if (TREE_CODE (arg0) == REAL_CST)
3552 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3553 TREE_TYPE (t) = type;
3555 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3556 return TREE_OPERAND (arg0, 0);
3558 /* Convert - (a - b) to (b - a) for non-floating-point. */
3559 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3560 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3561 TREE_OPERAND (arg0, 0));
3568 if (TREE_CODE (arg0) == INTEGER_CST)
3570 if (! TREE_UNSIGNED (type)
3571 && TREE_INT_CST_HIGH (arg0) < 0)
3573 HOST_WIDE_INT low, high;
3574 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3575 TREE_INT_CST_HIGH (arg0),
3577 t = build_int_2 (low, high);
3578 TREE_TYPE (t) = type;
3580 = (TREE_OVERFLOW (arg0)
3581 | force_fit_type (t, overflow));
3582 TREE_CONSTANT_OVERFLOW (t)
3583 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3586 else if (TREE_CODE (arg0) == REAL_CST)
3588 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3589 t = build_real (type,
3590 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3592 TREE_TYPE (t) = type;
3594 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3595 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3599 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3601 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3602 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3603 TREE_OPERAND (arg0, 0),
3604 fold (build1 (NEGATE_EXPR,
3605 TREE_TYPE (TREE_TYPE (arg0)),
3606 TREE_OPERAND (arg0, 1))));
3607 else if (TREE_CODE (arg0) == COMPLEX_CST)
3608 return build_complex (TREE_OPERAND (arg0, 0),
3609 fold (build1 (NEGATE_EXPR,
3610 TREE_TYPE (TREE_TYPE (arg0)),
3611 TREE_OPERAND (arg0, 1))));
3612 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3613 return fold (build (TREE_CODE (arg0), type,
3614 fold (build1 (CONJ_EXPR, type,
3615 TREE_OPERAND (arg0, 0))),
3616 fold (build1 (CONJ_EXPR,
3617 type, TREE_OPERAND (arg0, 1)))));
3618 else if (TREE_CODE (arg0) == CONJ_EXPR)
3619 return TREE_OPERAND (arg0, 0);
3625 if (TREE_CODE (arg0) == INTEGER_CST)
3626 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3627 ~ TREE_INT_CST_HIGH (arg0));
3628 TREE_TYPE (t) = type;
3629 force_fit_type (t, 0);
3630 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3631 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3633 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3634 return TREE_OPERAND (arg0, 0);
3638 /* A + (-B) -> A - B */
3639 if (TREE_CODE (arg1) == NEGATE_EXPR)
3640 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3641 else if (! FLOAT_TYPE_P (type))
3643 if (integer_zerop (arg1))
3644 return non_lvalue (convert (type, arg0));
3646 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3647 with a constant, and the two constants have no bits in common,
3648 we should treat this as a BIT_IOR_EXPR since this may produce more
3650 if (TREE_CODE (arg0) == BIT_AND_EXPR
3651 && TREE_CODE (arg1) == BIT_AND_EXPR
3652 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3653 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3654 && integer_zerop (const_binop (BIT_AND_EXPR,
3655 TREE_OPERAND (arg0, 1),
3656 TREE_OPERAND (arg1, 1), 0)))
3658 code = BIT_IOR_EXPR;
3662 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3663 about the case where C is a constant, just try one of the
3664 four possibilities. */
3666 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3667 && operand_equal_p (TREE_OPERAND (arg0, 1),
3668 TREE_OPERAND (arg1, 1), 0))
3669 return fold (build (MULT_EXPR, type,
3670 fold (build (PLUS_EXPR, type,
3671 TREE_OPERAND (arg0, 0),
3672 TREE_OPERAND (arg1, 0))),
3673 TREE_OPERAND (arg0, 1)));
3675 /* In IEEE floating point, x+0 may not equal x. */
3676 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3678 && real_zerop (arg1))
3679 return non_lvalue (convert (type, arg0));
3681 /* In most languages, can't associate operations on floats
3682 through parentheses. Rather than remember where the parentheses
3683 were, we don't associate floats at all. It shouldn't matter much.
3684 However, associating multiplications is only very slightly
3685 inaccurate, so do that if -ffast-math is specified. */
3686 if (FLOAT_TYPE_P (type)
3687 && ! (flag_fast_math && code == MULT_EXPR))
3690 /* The varsign == -1 cases happen only for addition and subtraction.
3691 It says that the arg that was split was really CON minus VAR.
3692 The rest of the code applies to all associative operations. */
3698 if (split_tree (arg0, code, &var, &con, &varsign))
3702 /* EXPR is (CON-VAR) +- ARG1. */
3703 /* If it is + and VAR==ARG1, return just CONST. */
3704 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3705 return convert (TREE_TYPE (t), con);
3707 /* If ARG0 is a constant, don't change things around;
3708 instead keep all the constant computations together. */
3710 if (TREE_CONSTANT (arg0))
3713 /* Otherwise return (CON +- ARG1) - VAR. */
3714 t = build (MINUS_EXPR, type,
3715 fold (build (code, type, con, arg1)), var);
3719 /* EXPR is (VAR+CON) +- ARG1. */
3720 /* If it is - and VAR==ARG1, return just CONST. */
3721 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3722 return convert (TREE_TYPE (t), con);
3724 /* If ARG0 is a constant, don't change things around;
3725 instead keep all the constant computations together. */
3727 if (TREE_CONSTANT (arg0))
3730 /* Otherwise return VAR +- (ARG1 +- CON). */
3731 tem = fold (build (code, type, arg1, con));
3732 t = build (code, type, var, tem);
3734 if (integer_zerop (tem)
3735 && (code == PLUS_EXPR || code == MINUS_EXPR))
3736 return convert (type, var);
3737 /* If we have x +/- (c - d) [c an explicit integer]
3738 change it to x -/+ (d - c) since if d is relocatable
3739 then the latter can be a single immediate insn
3740 and the former cannot. */
3741 if (TREE_CODE (tem) == MINUS_EXPR
3742 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3744 tree tem1 = TREE_OPERAND (tem, 1);
3745 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3746 TREE_OPERAND (tem, 0) = tem1;
3748 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3754 if (split_tree (arg1, code, &var, &con, &varsign))
3756 if (TREE_CONSTANT (arg1))
3761 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3763 /* EXPR is ARG0 +- (CON +- VAR). */
3764 if (TREE_CODE (t) == MINUS_EXPR
3765 && operand_equal_p (var, arg0, 0))
3767 /* If VAR and ARG0 cancel, return just CON or -CON. */
3768 if (code == PLUS_EXPR)
3769 return convert (TREE_TYPE (t), con);
3770 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3771 convert (TREE_TYPE (t), con)));
3774 t = build (TREE_CODE (t), type,
3775 fold (build (code, TREE_TYPE (t), arg0, con)), var);
3777 if (integer_zerop (TREE_OPERAND (t, 0))
3778 && TREE_CODE (t) == PLUS_EXPR)
3779 return convert (TREE_TYPE (t), var);
3784 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3785 if (TREE_CODE (arg1) == REAL_CST)
3787 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3789 t1 = const_binop (code, arg0, arg1, 0);
3790 if (t1 != NULL_TREE)
3792 /* The return value should always have
3793 the same type as the original expression. */
3794 TREE_TYPE (t1) = TREE_TYPE (t);
3800 if (! FLOAT_TYPE_P (type))
3802 if (! wins && integer_zerop (arg0))
3803 return build1 (NEGATE_EXPR, type, arg1);
3804 if (integer_zerop (arg1))
3805 return non_lvalue (convert (type, arg0));
3807 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3808 about the case where C is a constant, just try one of the
3809 four possibilities. */
3811 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3812 && operand_equal_p (TREE_OPERAND (arg0, 1),
3813 TREE_OPERAND (arg1, 1), 0))
3814 return fold (build (MULT_EXPR, type,
3815 fold (build (MINUS_EXPR, type,
3816 TREE_OPERAND (arg0, 0),
3817 TREE_OPERAND (arg1, 0))),
3818 TREE_OPERAND (arg0, 1)));
3820 /* Convert A - (-B) to A + B. */
3821 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3822 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3824 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3827 /* Except with IEEE floating point, 0-x equals -x. */
3828 if (! wins && real_zerop (arg0))
3829 return build1 (NEGATE_EXPR, type, arg1);
3830 /* Except with IEEE floating point, x-0 equals x. */
3831 if (real_zerop (arg1))
3832 return non_lvalue (convert (type, arg0));
3835 /* Fold &x - &x. This can happen from &x.foo - &x.
3836 This is unsafe for certain floats even in non-IEEE formats.
3837 In IEEE, it is unsafe because it does wrong for NaNs.
3838 Also note that operand_equal_p is always false if an operand
3841 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
3842 && operand_equal_p (arg0, arg1, 0))
3843 return convert (type, integer_zero_node);
3848 if (! FLOAT_TYPE_P (type))
3850 if (integer_zerop (arg1))
3851 return omit_one_operand (type, arg1, arg0);
3852 if (integer_onep (arg1))
3853 return non_lvalue (convert (type, arg0));
3855 /* ((A / C) * C) is A if the division is an
3856 EXACT_DIV_EXPR. Since C is normally a constant,
3857 just check for one of the four possibilities. */
3859 if (TREE_CODE (arg0) == EXACT_DIV_EXPR
3860 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3861 return TREE_OPERAND (arg0, 0);
3863 /* (a * (1 << b)) is (a << b) */
3864 if (TREE_CODE (arg1) == LSHIFT_EXPR
3865 && integer_onep (TREE_OPERAND (arg1, 0)))
3866 return fold (build (LSHIFT_EXPR, type, arg0,
3867 TREE_OPERAND (arg1, 1)));
3868 if (TREE_CODE (arg0) == LSHIFT_EXPR
3869 && integer_onep (TREE_OPERAND (arg0, 0)))
3870 return fold (build (LSHIFT_EXPR, type, arg1,
3871 TREE_OPERAND (arg0, 1)));
3875 /* x*0 is 0, except for IEEE floating point. */
3876 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3878 && real_zerop (arg1))
3879 return omit_one_operand (type, arg1, arg0);
3880 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3881 However, ANSI says we can drop signals,
3882 so we can do this anyway. */
3883 if (real_onep (arg1))
3884 return non_lvalue (convert (type, arg0));
3886 if (! wins && real_twop (arg1))
3888 tree arg = save_expr (arg0);
3889 return build (PLUS_EXPR, type, arg, arg);
3896 if (integer_all_onesp (arg1))
3897 return omit_one_operand (type, arg1, arg0);
3898 if (integer_zerop (arg1))
3899 return non_lvalue (convert (type, arg0));
3900 t1 = distribute_bit_expr (code, type, arg0, arg1);
3901 if (t1 != NULL_TREE)
3904 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3905 is a rotate of A by C1 bits. */
3907 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3908 || TREE_CODE (arg0) == LSHIFT_EXPR)
3909 && (TREE_CODE (arg1) == RSHIFT_EXPR
3910 || TREE_CODE (arg1) == LSHIFT_EXPR)
3911 && TREE_CODE (arg0) != TREE_CODE (arg1)
3912 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3913 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3914 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3915 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3916 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3917 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3918 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3919 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3920 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3921 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3922 TREE_CODE (arg0) == LSHIFT_EXPR
3923 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3928 if (integer_zerop (arg1))
3929 return non_lvalue (convert (type, arg0));
3930 if (integer_all_onesp (arg1))
3931 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3936 if (integer_all_onesp (arg1))
3937 return non_lvalue (convert (type, arg0));
3938 if (integer_zerop (arg1))
3939 return omit_one_operand (type, arg1, arg0);
3940 t1 = distribute_bit_expr (code, type, arg0, arg1);
3941 if (t1 != NULL_TREE)
3943 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3944 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3945 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3947 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3948 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3949 && (~TREE_INT_CST_LOW (arg0)
3950 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3951 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3953 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3954 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3956 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3957 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3958 && (~TREE_INT_CST_LOW (arg1)
3959 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3960 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3964 case BIT_ANDTC_EXPR:
3965 if (integer_all_onesp (arg0))
3966 return non_lvalue (convert (type, arg1));
3967 if (integer_zerop (arg0))
3968 return omit_one_operand (type, arg0, arg1);
3969 if (TREE_CODE (arg1) == INTEGER_CST)
3971 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3972 code = BIT_AND_EXPR;
3978 /* In most cases, do nothing with a divide by zero. */
3979 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3980 #ifndef REAL_INFINITY
3981 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
3984 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3986 /* In IEEE floating point, x/1 is not equivalent to x for snans.
3987 However, ANSI says we can drop signals, so we can do this anyway. */
3988 if (real_onep (arg1))
3989 return non_lvalue (convert (type, arg0));
3991 /* If ARG1 is a constant, we can convert this to a multiply by the
3992 reciprocal. This does not have the same rounding properties,
3993 so only do this if -ffast-math. We can actually always safely
3994 do it if ARG1 is a power of two, but it's hard to tell if it is
3995 or not in a portable manner. */
3996 if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
3997 && 0 != (tem = const_binop (code, build_real (type, dconst1),
3999 return fold (build (MULT_EXPR, type, arg0, tem));
4003 case TRUNC_DIV_EXPR:
4004 case ROUND_DIV_EXPR:
4005 case FLOOR_DIV_EXPR:
4007 case EXACT_DIV_EXPR:
4008 if (integer_onep (arg1))
4009 return non_lvalue (convert (type, arg0));
4010 if (integer_zerop (arg1))
4013 /* If we have ((a / C1) / C2) where both division are the same type, try
4014 to simplify. First see if C1 * C2 overflows or not. */
4015 if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
4016 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
4020 new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
4021 tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
4023 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
4024 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
4026 /* If no overflow, divide by C1*C2. */
4027 return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
4031 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
4032 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
4033 expressions, which often appear in the offsets or sizes of
4034 objects with a varying size. Only deal with positive divisors
4035 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
4037 Look for NOPs and SAVE_EXPRs inside. */
4039 if (TREE_CODE (arg1) == INTEGER_CST
4040 && tree_int_cst_sgn (arg1) >= 0)
4042 int have_save_expr = 0;
4043 tree c2 = integer_zero_node;
4046 if (TREE_CODE (xarg0) == SAVE_EXPR)
4047 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4051 if (TREE_CODE (xarg0) == PLUS_EXPR
4052 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4053 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4054 else if (TREE_CODE (xarg0) == MINUS_EXPR
4055 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4056 /* If we are doing this computation unsigned, the negate
4058 && ! TREE_UNSIGNED (type))
4060 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4061 xarg0 = TREE_OPERAND (xarg0, 0);
4064 if (TREE_CODE (xarg0) == SAVE_EXPR)
4065 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4069 if (TREE_CODE (xarg0) == MULT_EXPR
4070 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4071 && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
4072 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
4073 TREE_OPERAND (xarg0, 1), arg1, 1))
4074 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
4075 TREE_OPERAND (xarg0, 1), 1)))
4076 && (tree_int_cst_sgn (c2) >= 0
4077 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
4080 tree outer_div = integer_one_node;
4081 tree c1 = TREE_OPERAND (xarg0, 1);
4084 /* If C3 > C1, set them equal and do a divide by
4085 C3/C1 at the end of the operation. */
4086 if (tree_int_cst_lt (c1, c3))
4087 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
4089 /* The result is A * (C1/C3) + (C2/C3). */
4090 t = fold (build (PLUS_EXPR, type,
4091 fold (build (MULT_EXPR, type,
4092 TREE_OPERAND (xarg0, 0),
4093 const_binop (code, c1, c3, 1))),
4094 const_binop (code, c2, c3, 1)));
4096 if (! integer_onep (outer_div))
4097 t = fold (build (code, type, t, convert (type, outer_div)));
4109 case FLOOR_MOD_EXPR:
4110 case ROUND_MOD_EXPR:
4111 case TRUNC_MOD_EXPR:
4112 if (integer_onep (arg1))
4113 return omit_one_operand (type, integer_zero_node, arg0);
4114 if (integer_zerop (arg1))
4117 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4118 where C1 % C3 == 0. Handle similarly to the division case,
4119 but don't bother with SAVE_EXPRs. */
4121 if (TREE_CODE (arg1) == INTEGER_CST
4122 && ! integer_zerop (arg1))
4124 tree c2 = integer_zero_node;
4127 if (TREE_CODE (xarg0) == PLUS_EXPR
4128 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4129 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4130 else if (TREE_CODE (xarg0) == MINUS_EXPR
4131 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4132 && ! TREE_UNSIGNED (type))
4134 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4135 xarg0 = TREE_OPERAND (xarg0, 0);
4140 if (TREE_CODE (xarg0) == MULT_EXPR
4141 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4142 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4143 TREE_OPERAND (xarg0, 1),
4145 && tree_int_cst_sgn (c2) >= 0)
4146 /* The result is (C2%C3). */
4147 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4148 TREE_OPERAND (xarg0, 0));
4157 if (integer_zerop (arg1))
4158 return non_lvalue (convert (type, arg0));
4159 /* Since negative shift count is not well-defined,
4160 don't try to compute it in the compiler. */
4161 if (tree_int_cst_sgn (arg1) < 0)
4166 if (operand_equal_p (arg0, arg1, 0))
4168 if (INTEGRAL_TYPE_P (type)
4169 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4170 return omit_one_operand (type, arg1, arg0);
4174 if (operand_equal_p (arg0, arg1, 0))
4176 if (INTEGRAL_TYPE_P (type)
4177 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4178 return omit_one_operand (type, arg1, arg0);
4181 case TRUTH_NOT_EXPR:
4182 /* Note that the operand of this must be an int
4183 and its values must be 0 or 1.
4184 ("true" is a fixed value perhaps depending on the language,
4185 but we don't handle values other than 1 correctly yet.) */
4186 return invert_truthvalue (arg0);
4188 case TRUTH_ANDIF_EXPR:
4189 /* Note that the operands of this must be ints
4190 and their values must be 0 or 1.
4191 ("true" is a fixed value perhaps depending on the language.) */
4192 /* If first arg is constant zero, return it. */
4193 if (integer_zerop (arg0))
4195 case TRUTH_AND_EXPR:
4196 /* If either arg is constant true, drop it. */
4197 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4198 return non_lvalue (arg1);
4199 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4200 return non_lvalue (arg0);
4201 /* If second arg is constant zero, result is zero, but first arg
4202 must be evaluated. */
4203 if (integer_zerop (arg1))
4204 return omit_one_operand (type, arg1, arg0);
4207 /* We only do these simplifications if we are optimizing. */
4211 /* Check for things like (A || B) && (A || C). We can convert this
4212 to A || (B && C). Note that either operator can be any of the four
4213 truth and/or operations and the transformation will still be
4214 valid. Also note that we only care about order for the
4215 ANDIF and ORIF operators. */
4216 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4217 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4218 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4219 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4220 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4222 tree a00 = TREE_OPERAND (arg0, 0);
4223 tree a01 = TREE_OPERAND (arg0, 1);
4224 tree a10 = TREE_OPERAND (arg1, 0);
4225 tree a11 = TREE_OPERAND (arg1, 1);
4226 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
4227 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
4228 && (code == TRUTH_AND_EXPR
4229 || code == TRUTH_OR_EXPR));
4231 if (operand_equal_p (a00, a10, 0))
4232 return fold (build (TREE_CODE (arg0), type, a00,
4233 fold (build (code, type, a01, a11))));
4234 else if (commutative && operand_equal_p (a00, a11, 0))
4235 return fold (build (TREE_CODE (arg0), type, a00,
4236 fold (build (code, type, a01, a10))));
4237 else if (commutative && operand_equal_p (a01, a10, 0))
4238 return fold (build (TREE_CODE (arg0), type, a01,
4239 fold (build (code, type, a00, a11))));
4241 /* This case if tricky because we must either have commutative
4242 operators or else A10 must not have side-effects. */
4244 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
4245 && operand_equal_p (a01, a11, 0))
4246 return fold (build (TREE_CODE (arg0), type,
4247 fold (build (code, type, a00, a10)),
4251 /* Check for the possibility of merging component references. If our
4252 lhs is another similar operation, try to merge its rhs with our
4253 rhs. Then try to merge our lhs and rhs. */
4254 if (TREE_CODE (arg0) == code
4255 && 0 != (tem = fold_truthop (code, type,
4256 TREE_OPERAND (arg0, 1), arg1)))
4257 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4259 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
4264 case TRUTH_ORIF_EXPR:
4265 /* Note that the operands of this must be ints
4266 and their values must be 0 or true.
4267 ("true" is a fixed value perhaps depending on the language.) */
4268 /* If first arg is constant true, return it. */
4269 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4272 /* If either arg is constant zero, drop it. */
4273 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4274 return non_lvalue (arg1);
4275 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4276 return non_lvalue (arg0);
4277 /* If second arg is constant true, result is true, but we must
4278 evaluate first arg. */
4279 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4280 return omit_one_operand (type, arg1, arg0);
4283 case TRUTH_XOR_EXPR:
4284 /* If either arg is constant zero, drop it. */
4285 if (integer_zerop (arg0))
4286 return non_lvalue (arg1);
4287 if (integer_zerop (arg1))
4288 return non_lvalue (arg0);
4289 /* If either arg is constant true, this is a logical inversion. */
4290 if (integer_onep (arg0))
4291 return non_lvalue (invert_truthvalue (arg1));
4292 if (integer_onep (arg1))
4293 return non_lvalue (invert_truthvalue (arg0));
4302 /* If one arg is a constant integer, put it last. */
4303 if (TREE_CODE (arg0) == INTEGER_CST
4304 && TREE_CODE (arg1) != INTEGER_CST)
4306 TREE_OPERAND (t, 0) = arg1;
4307 TREE_OPERAND (t, 1) = arg0;
4308 arg0 = TREE_OPERAND (t, 0);
4309 arg1 = TREE_OPERAND (t, 1);
4310 code = swap_tree_comparison (code);
4311 TREE_SET_CODE (t, code);
4314 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4315 First, see if one arg is constant; find the constant arg
4316 and the other one. */
4318 tree constop = 0, varop;
4319 int constopnum = -1;
4321 if (TREE_CONSTANT (arg1))
4322 constopnum = 1, constop = arg1, varop = arg0;
4323 if (TREE_CONSTANT (arg0))
4324 constopnum = 0, constop = arg0, varop = arg1;
4326 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4328 /* This optimization is invalid for ordered comparisons
4329 if CONST+INCR overflows or if foo+incr might overflow.
4330 This optimization is invalid for floating point due to rounding.
4331 For pointer types we assume overflow doesn't happen. */
4332 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4333 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4334 && (code == EQ_EXPR || code == NE_EXPR)))
4337 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4338 constop, TREE_OPERAND (varop, 1)));
4339 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4341 t = build (code, type, TREE_OPERAND (t, 0),
4342 TREE_OPERAND (t, 1));
4343 TREE_OPERAND (t, constopnum) = newconst;
4347 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4349 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4350 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4351 && (code == EQ_EXPR || code == NE_EXPR)))
4354 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4355 constop, TREE_OPERAND (varop, 1)));
4356 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4357 t = build (code, type, TREE_OPERAND (t, 0),
4358 TREE_OPERAND (t, 1));
4359 TREE_OPERAND (t, constopnum) = newconst;
4365 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4366 if (TREE_CODE (arg1) == INTEGER_CST
4367 && TREE_CODE (arg0) != INTEGER_CST
4368 && tree_int_cst_sgn (arg1) > 0)
4370 switch (TREE_CODE (t))
4374 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4375 t = build (code, type, TREE_OPERAND (t, 0), arg1);
4380 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4381 t = build (code, type, TREE_OPERAND (t, 0), arg1);
4386 /* If this is an EQ or NE comparison with zero and ARG0 is
4387 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4388 two operations, but the latter can be done in one less insn
4389 one machine that have only two-operand insns or on which a
4390 constant cannot be the first operand. */
4391 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4392 && TREE_CODE (arg0) == BIT_AND_EXPR)
4394 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4395 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4397 fold (build (code, type,
4398 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4400 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4401 TREE_OPERAND (arg0, 1),
4402 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4403 convert (TREE_TYPE (arg0),
4406 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4407 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4409 fold (build (code, type,
4410 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4412 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4413 TREE_OPERAND (arg0, 0),
4414 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4415 convert (TREE_TYPE (arg0),
4420 /* If this is an NE or EQ comparison of zero against the result of a
4421 signed MOD operation whose second operand is a power of 2, make
4422 the MOD operation unsigned since it is simpler and equivalent. */
4423 if ((code == NE_EXPR || code == EQ_EXPR)
4424 && integer_zerop (arg1)
4425 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4426 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4427 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4428 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4429 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4430 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4432 tree newtype = unsigned_type (TREE_TYPE (arg0));
4433 tree newmod = build (TREE_CODE (arg0), newtype,
4434 convert (newtype, TREE_OPERAND (arg0, 0)),
4435 convert (newtype, TREE_OPERAND (arg0, 1)));
4437 return build (code, type, newmod, convert (newtype, arg1));
4440 /* If this is an NE comparison of zero with an AND of one, remove the
4441 comparison since the AND will give the correct value. */
4442 if (code == NE_EXPR && integer_zerop (arg1)
4443 && TREE_CODE (arg0) == BIT_AND_EXPR
4444 && integer_onep (TREE_OPERAND (arg0, 1)))
4445 return convert (type, arg0);
4447 /* If we have (A & C) == C where C is a power of 2, convert this into
4448 (A & C) != 0. Similarly for NE_EXPR. */
4449 if ((code == EQ_EXPR || code == NE_EXPR)
4450 && TREE_CODE (arg0) == BIT_AND_EXPR
4451 && integer_pow2p (TREE_OPERAND (arg0, 1))
4452 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4453 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4454 arg0, integer_zero_node);
4456 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
4457 and similarly for >= into !=. */
4458 if ((code == LT_EXPR || code == GE_EXPR)
4459 && TREE_UNSIGNED (TREE_TYPE (arg0))
4460 && TREE_CODE (arg1) == LSHIFT_EXPR
4461 && integer_onep (TREE_OPERAND (arg1, 0)))
4462 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4463 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4464 TREE_OPERAND (arg1, 1)),
4465 convert (TREE_TYPE (arg0), integer_zero_node));
4467 else if ((code == LT_EXPR || code == GE_EXPR)
4468 && TREE_UNSIGNED (TREE_TYPE (arg0))
4469 && (TREE_CODE (arg1) == NOP_EXPR
4470 || TREE_CODE (arg1) == CONVERT_EXPR)
4471 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
4472 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
4474 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4475 convert (TREE_TYPE (arg0),
4476 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4477 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
4478 convert (TREE_TYPE (arg0), integer_zero_node));
4480 /* Simplify comparison of something with itself. (For IEEE
4481 floating-point, we can only do some of these simplifications.) */
4482 if (operand_equal_p (arg0, arg1, 0))
4489 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4491 t = build_int_2 (1, 0);
4492 TREE_TYPE (t) = type;
4496 TREE_SET_CODE (t, code);
4500 /* For NE, we can only do this simplification if integer. */
4501 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4503 /* ... fall through ... */
4506 t = build_int_2 (0, 0);
4507 TREE_TYPE (t) = type;
4512 /* An unsigned comparison against 0 can be simplified. */
4513 if (integer_zerop (arg1)
4514 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4515 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4516 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4518 switch (TREE_CODE (t))
4522 TREE_SET_CODE (t, NE_EXPR);
4526 TREE_SET_CODE (t, EQ_EXPR);
4529 return omit_one_operand (type,
4530 convert (type, integer_one_node),
4533 return omit_one_operand (type,
4534 convert (type, integer_zero_node),
4539 /* If we are comparing an expression that just has comparisons
4540 of two integer values, arithmetic expressions of those comparisons,
4541 and constants, we can simplify it. There are only three cases
4542 to check: the two values can either be equal, the first can be
4543 greater, or the second can be greater. Fold the expression for
4544 those three values. Since each value must be 0 or 1, we have
4545 eight possibilities, each of which corresponds to the constant 0
4546 or 1 or one of the six possible comparisons.
4548 This handles common cases like (a > b) == 0 but also handles
4549 expressions like ((x > y) - (y > x)) > 0, which supposedly
4550 occur in macroized code. */
4552 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4554 tree cval1 = 0, cval2 = 0;
4557 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4558 /* Don't handle degenerate cases here; they should already
4559 have been handled anyway. */
4560 && cval1 != 0 && cval2 != 0
4561 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4562 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4563 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4564 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4565 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4567 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4568 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4570 /* We can't just pass T to eval_subst in case cval1 or cval2
4571 was the same as ARG1. */
4574 = fold (build (code, type,
4575 eval_subst (arg0, cval1, maxval, cval2, minval),
4578 = fold (build (code, type,
4579 eval_subst (arg0, cval1, maxval, cval2, maxval),
4582 = fold (build (code, type,
4583 eval_subst (arg0, cval1, minval, cval2, maxval),
4586 /* All three of these results should be 0 or 1. Confirm they
4587 are. Then use those values to select the proper code
4590 if ((integer_zerop (high_result)
4591 || integer_onep (high_result))
4592 && (integer_zerop (equal_result)
4593 || integer_onep (equal_result))
4594 && (integer_zerop (low_result)
4595 || integer_onep (low_result)))
4597 /* Make a 3-bit mask with the high-order bit being the
4598 value for `>', the next for '=', and the low for '<'. */
4599 switch ((integer_onep (high_result) * 4)
4600 + (integer_onep (equal_result) * 2)
4601 + integer_onep (low_result))
4605 return omit_one_operand (type, integer_zero_node, arg0);
4626 return omit_one_operand (type, integer_one_node, arg0);
4629 t = build (code, type, cval1, cval2);
4631 return save_expr (t);
4638 /* If this is a comparison of a field, we may be able to simplify it. */
4639 if ((TREE_CODE (arg0) == COMPONENT_REF
4640 || TREE_CODE (arg0) == BIT_FIELD_REF)
4641 && (code == EQ_EXPR || code == NE_EXPR)
4642 /* Handle the constant case even without -O
4643 to make sure the warnings are given. */
4644 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4646 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4650 /* If this is a comparison of complex values and either or both
4651 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4652 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4653 may prevent needless evaluations. */
4654 if ((code == EQ_EXPR || code == NE_EXPR)
4655 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4656 && (TREE_CODE (arg0) == COMPLEX_EXPR
4657 || TREE_CODE (arg1) == COMPLEX_EXPR))
4659 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4660 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4661 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4662 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4663 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4665 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4668 fold (build (code, type, real0, real1)),
4669 fold (build (code, type, imag0, imag1))));
4672 /* From here on, the only cases we handle are when the result is
4673 known to be a constant.
4675 To compute GT, swap the arguments and do LT.
4676 To compute GE, do LT and invert the result.
4677 To compute LE, swap the arguments, do LT and invert the result.
4678 To compute NE, do EQ and invert the result.
4680 Therefore, the code below must handle only EQ and LT. */
4682 if (code == LE_EXPR || code == GT_EXPR)
4684 tem = arg0, arg0 = arg1, arg1 = tem;
4685 code = swap_tree_comparison (code);
4688 /* Note that it is safe to invert for real values here because we
4689 will check below in the one case that it matters. */
4692 if (code == NE_EXPR || code == GE_EXPR)
4695 code = invert_tree_comparison (code);
4698 /* Compute a result for LT or EQ if args permit;
4699 otherwise return T. */
4700 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4702 if (code == EQ_EXPR)
4703 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4704 == TREE_INT_CST_LOW (arg1))
4705 && (TREE_INT_CST_HIGH (arg0)
4706 == TREE_INT_CST_HIGH (arg1)),
4709 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4710 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4711 : INT_CST_LT (arg0, arg1)),
4715 /* Assume a nonexplicit constant cannot equal an explicit one,
4716 since such code would be undefined anyway.
4717 Exception: on sysvr4, using #pragma weak,
4718 a label can come out as 0. */
4719 else if (TREE_CODE (arg1) == INTEGER_CST
4720 && !integer_zerop (arg1)
4721 && TREE_CONSTANT (arg0)
4722 && TREE_CODE (arg0) == ADDR_EXPR
4724 t1 = build_int_2 (0, 0);
4726 /* Two real constants can be compared explicitly. */
4727 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4729 /* If either operand is a NaN, the result is false with two
4730 exceptions: First, an NE_EXPR is true on NaNs, but that case
4731 is already handled correctly since we will be inverting the
4732 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4733 or a GE_EXPR into a LT_EXPR, we must return true so that it
4734 will be inverted into false. */
4736 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4737 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4738 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4740 else if (code == EQ_EXPR)
4741 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4742 TREE_REAL_CST (arg1)),
4745 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4746 TREE_REAL_CST (arg1)),
4750 if (t1 == NULL_TREE)
4754 TREE_INT_CST_LOW (t1) ^= 1;
4756 TREE_TYPE (t1) = type;
4760 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4761 so all simple results must be passed through pedantic_non_lvalue. */
4762 if (TREE_CODE (arg0) == INTEGER_CST)
4763 return pedantic_non_lvalue
4764 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4765 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4766 return pedantic_omit_one_operand (type, arg1, arg0);
4768 /* If the second operand is zero, invert the comparison and swap
4769 the second and third operands. Likewise if the second operand
4770 is constant and the third is not or if the third operand is
4771 equivalent to the first operand of the comparison. */
4773 if (integer_zerop (arg1)
4774 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4775 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4776 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4777 TREE_OPERAND (t, 2),
4778 TREE_OPERAND (arg0, 1))))
4780 /* See if this can be inverted. If it can't, possibly because
4781 it was a floating-point inequality comparison, don't do
4783 tem = invert_truthvalue (arg0);
4785 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4787 t = build (code, type, tem,
4788 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
4790 arg1 = TREE_OPERAND (t, 2);
4795 /* If we have A op B ? A : C, we may be able to convert this to a
4796 simpler expression, depending on the operation and the values
4797 of B and C. IEEE floating point prevents this though,
4798 because A or B might be -0.0 or a NaN. */
4800 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4801 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4802 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4804 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4805 arg1, TREE_OPERAND (arg0, 1)))
4807 tree arg2 = TREE_OPERAND (t, 2);
4808 enum tree_code comp_code = TREE_CODE (arg0);
4812 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4813 depending on the comparison operation. */
4814 if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
4815 ? real_zerop (TREE_OPERAND (arg0, 1))
4816 : integer_zerop (TREE_OPERAND (arg0, 1)))
4817 && TREE_CODE (arg2) == NEGATE_EXPR
4818 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4822 return pedantic_non_lvalue
4823 (fold (build1 (NEGATE_EXPR, type, arg1)));
4825 return pedantic_non_lvalue (convert (type, arg1));
4828 return pedantic_non_lvalue
4829 (fold (build1 (ABS_EXPR, type, arg1)));
4832 return pedantic_non_lvalue
4833 (fold (build1 (NEGATE_EXPR, type,
4834 fold (build1 (ABS_EXPR, type, arg1)))));
4837 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4840 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4842 if (comp_code == NE_EXPR)
4843 return pedantic_non_lvalue (convert (type, arg1));
4844 else if (comp_code == EQ_EXPR)
4845 return pedantic_non_lvalue (convert (type, integer_zero_node));
4848 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4849 or max (A, B), depending on the operation. */
4851 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4852 arg2, TREE_OPERAND (arg0, 0)))
4854 tree comp_op0 = TREE_OPERAND (arg0, 0);
4855 tree comp_op1 = TREE_OPERAND (arg0, 1);
4856 tree comp_type = TREE_TYPE (comp_op0);
4861 return pedantic_non_lvalue (convert (type, arg2));
4863 return pedantic_non_lvalue (convert (type, arg1));
4866 return pedantic_non_lvalue
4867 (convert (type, (fold (build (MIN_EXPR, comp_type,
4868 comp_op0, comp_op1)))));
4871 return pedantic_non_lvalue
4872 (convert (type, fold (build (MAX_EXPR, comp_type,
4873 comp_op0, comp_op1))));
4877 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4878 we might still be able to simplify this. For example,
4879 if C1 is one less or one more than C2, this might have started
4880 out as a MIN or MAX and been transformed by this function.
4881 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4883 if (INTEGRAL_TYPE_P (type)
4884 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4885 && TREE_CODE (arg2) == INTEGER_CST)
4889 /* We can replace A with C1 in this case. */
4890 arg1 = convert (type, TREE_OPERAND (arg0, 1));
4891 t = build (code, type, TREE_OPERAND (t, 0), arg1,
4892 TREE_OPERAND (t, 2));
4896 /* If C1 is C2 + 1, this is min(A, C2). */
4897 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4898 && operand_equal_p (TREE_OPERAND (arg0, 1),
4899 const_binop (PLUS_EXPR, arg2,
4900 integer_one_node, 0), 1))
4901 return pedantic_non_lvalue
4902 (fold (build (MIN_EXPR, type, arg1, arg2)));
4906 /* If C1 is C2 - 1, this is min(A, C2). */
4907 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4908 && operand_equal_p (TREE_OPERAND (arg0, 1),
4909 const_binop (MINUS_EXPR, arg2,
4910 integer_one_node, 0), 1))
4911 return pedantic_non_lvalue
4912 (fold (build (MIN_EXPR, type, arg1, arg2)));
4916 /* If C1 is C2 - 1, this is max(A, C2). */
4917 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4918 && operand_equal_p (TREE_OPERAND (arg0, 1),
4919 const_binop (MINUS_EXPR, arg2,
4920 integer_one_node, 0), 1))
4921 return pedantic_non_lvalue
4922 (fold (build (MAX_EXPR, type, arg1, arg2)));
4926 /* If C1 is C2 + 1, this is max(A, C2). */
4927 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4928 && operand_equal_p (TREE_OPERAND (arg0, 1),
4929 const_binop (PLUS_EXPR, arg2,
4930 integer_one_node, 0), 1))
4931 return pedantic_non_lvalue
4932 (fold (build (MAX_EXPR, type, arg1, arg2)));
4937 /* If the second operand is simpler than the third, swap them
4938 since that produces better jump optimization results. */
4939 if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
4940 || TREE_CODE (arg1) == SAVE_EXPR)
4941 && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
4942 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
4943 || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
4945 /* See if this can be inverted. If it can't, possibly because
4946 it was a floating-point inequality comparison, don't do
4948 tem = invert_truthvalue (arg0);
4950 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4952 t = build (code, type, tem,
4953 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
4955 arg1 = TREE_OPERAND (t, 2);
4960 /* Convert A ? 1 : 0 to simply A. */
4961 if (integer_onep (TREE_OPERAND (t, 1))
4962 && integer_zerop (TREE_OPERAND (t, 2))
4963 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4964 call to fold will try to move the conversion inside
4965 a COND, which will recurse. In that case, the COND_EXPR
4966 is probably the best choice, so leave it alone. */
4967 && type == TREE_TYPE (arg0))
4968 return pedantic_non_lvalue (arg0);
4970 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4971 operation is simply A & 2. */
4973 if (integer_zerop (TREE_OPERAND (t, 2))
4974 && TREE_CODE (arg0) == NE_EXPR
4975 && integer_zerop (TREE_OPERAND (arg0, 1))
4976 && integer_pow2p (arg1)
4977 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4978 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4980 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
4985 /* When pedantic, a compound expression can be neither an lvalue
4986 nor an integer constant expression. */
4987 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4989 /* Don't let (0, 0) be null pointer constant. */
4990 if (integer_zerop (arg1))
4991 return non_lvalue (arg1);
4996 return build_complex (arg0, arg1);
5000 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5002 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5003 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
5004 TREE_OPERAND (arg0, 1));
5005 else if (TREE_CODE (arg0) == COMPLEX_CST)
5006 return TREE_REALPART (arg0);
5007 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5008 return fold (build (TREE_CODE (arg0), type,
5009 fold (build1 (REALPART_EXPR, type,
5010 TREE_OPERAND (arg0, 0))),
5011 fold (build1 (REALPART_EXPR,
5012 type, TREE_OPERAND (arg0, 1)))));
5016 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5017 return convert (type, integer_zero_node);
5018 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5019 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
5020 TREE_OPERAND (arg0, 0));
5021 else if (TREE_CODE (arg0) == COMPLEX_CST)
5022 return TREE_IMAGPART (arg0);
5023 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5024 return fold (build (TREE_CODE (arg0), type,
5025 fold (build1 (IMAGPART_EXPR, type,
5026 TREE_OPERAND (arg0, 0))),
5027 fold (build1 (IMAGPART_EXPR, type,
5028 TREE_OPERAND (arg0, 1)))));
5031 /* Pull arithmetic ops out of the CLEANUP_POINT_EXPR where
5033 case CLEANUP_POINT_EXPR:
5034 if (! TREE_SIDE_EFFECTS (arg0))
5038 enum tree_code code0 = TREE_CODE (arg0);
5039 int kind0 = TREE_CODE_CLASS (code0);
5040 tree arg00 = TREE_OPERAND (arg0, 0);
5044 return fold (build1 (code0, type,
5045 fold (build1 (CLEANUP_POINT_EXPR,
5046 TREE_TYPE (arg00), arg00))));
5047 if ((kind0 == '<' || kind0 == '2')
5048 && ! TREE_SIDE_EFFECTS (arg01 = TREE_OPERAND (arg0, 1)))
5049 return fold (build (code0, type,
5050 fold (build1 (CLEANUP_POINT_EXPR,
5051 TREE_TYPE (arg00), arg00)),
5059 } /* switch (code) */