1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ Fix lossage on folding division of big integers. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
31 /* The entry points in this file are fold, size_int and size_binop.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'. */
48 /* Handle floating overflow for `const_binop'. */
49 static jmp_buf float_error;
51 static void encode PROTO((short *, HOST_WIDE_INT, HOST_WIDE_INT));
52 static void decode PROTO((short *, HOST_WIDE_INT *, HOST_WIDE_INT *));
53 static int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
54 HOST_WIDE_INT, HOST_WIDE_INT,
55 HOST_WIDE_INT, HOST_WIDE_INT *,
56 HOST_WIDE_INT *, HOST_WIDE_INT *,
58 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
59 static tree const_binop PROTO((enum tree_code, tree, tree, int));
60 static tree fold_convert PROTO((tree, tree));
61 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
62 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
63 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
64 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
65 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
66 static tree omit_one_operand PROTO((tree, tree, tree));
67 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
68 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
69 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
71 static tree decode_field_reference PROTO((tree, int *, int *,
72 enum machine_mode *, int *,
74 static int all_ones_mask_p PROTO((tree, int));
75 static int simple_operand_p PROTO((tree));
76 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
77 enum tree_code, tree, tree, tree));
78 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
84 /* Yield nonzero if a signed left shift of A by B bits overflows. */
85 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
87 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
88 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
89 Then this yields nonzero if overflow occurred during the addition.
90 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
91 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
92 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
94 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
95 We do that by representing the two-word integer as MAX_SHORTS shorts,
96 with only 8 bits stored in each short, as a positive number. */
98 /* Unpack a two-word integer into MAX_SHORTS shorts.
99 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
100 SHORTS points to the array of shorts. */
103 encode (shorts, low, hi)
105 HOST_WIDE_INT low, hi;
109 for (i = 0; i < MAX_SHORTS / 2; i++)
111 shorts[i] = (low >> (i * 8)) & 0xff;
112 shorts[i + MAX_SHORTS / 2] = (hi >> (i * 8) & 0xff);
116 /* Pack an array of MAX_SHORTS shorts into a two-word integer.
117 SHORTS points to the array of shorts.
118 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
121 decode (shorts, low, hi)
123 HOST_WIDE_INT *low, *hi;
126 HOST_WIDE_INT lv = 0, hv = 0;
128 for (i = 0; i < MAX_SHORTS / 2; i++)
130 lv |= (HOST_WIDE_INT) shorts[i] << (i * 8);
131 hv |= (HOST_WIDE_INT) shorts[i + MAX_SHORTS / 2] << (i * 8);
137 /* Make the integer constant T valid for its type
138 by setting to 0 or 1 all the bits in the constant
139 that don't belong in the type.
140 Yield 1 if a signed overflow occurs, 0 otherwise.
141 If OVERFLOW is nonzero, a signed overflow has already occurred
142 in calculating T, so propagate it. */
145 force_fit_type (t, overflow)
149 HOST_WIDE_INT low, high;
152 if (TREE_CODE (t) != INTEGER_CST)
155 low = TREE_INT_CST_LOW (t);
156 high = TREE_INT_CST_HIGH (t);
158 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
161 prec = TYPE_PRECISION (TREE_TYPE (t));
163 /* First clear all bits that are beyond the type's precision. */
165 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
167 else if (prec > HOST_BITS_PER_WIDE_INT)
169 TREE_INT_CST_HIGH (t)
170 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
174 TREE_INT_CST_HIGH (t) = 0;
175 if (prec < HOST_BITS_PER_WIDE_INT)
176 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
179 /* Unsigned types do not suffer sign extension or overflow. */
180 if (TREE_UNSIGNED (TREE_TYPE (t)))
183 /* If the value's sign bit is set, extend the sign. */
184 if (prec != 2 * HOST_BITS_PER_WIDE_INT
185 && (prec > HOST_BITS_PER_WIDE_INT
186 ? (TREE_INT_CST_HIGH (t)
187 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
188 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
190 /* Value is negative:
191 set to 1 all the bits that are outside this type's precision. */
192 if (prec > HOST_BITS_PER_WIDE_INT)
194 TREE_INT_CST_HIGH (t)
195 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
199 TREE_INT_CST_HIGH (t) = -1;
200 if (prec < HOST_BITS_PER_WIDE_INT)
201 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
205 /* Yield nonzero if signed overflow occurred. */
207 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
211 /* Add two doubleword integers with doubleword result.
212 Each argument is given as two `HOST_WIDE_INT' pieces.
213 One argument is L1 and H1; the other, L2 and H2.
214 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
215 We use the 8-shorts representation internally. */
218 add_double (l1, h1, l2, h2, lv, hv)
219 HOST_WIDE_INT l1, h1, l2, h2;
220 HOST_WIDE_INT *lv, *hv;
222 short arg1[MAX_SHORTS];
223 short arg2[MAX_SHORTS];
224 register int carry = 0;
227 encode (arg1, l1, h1);
228 encode (arg2, l2, h2);
230 for (i = 0; i < MAX_SHORTS; i++)
232 carry += arg1[i] + arg2[i];
233 arg1[i] = carry & 0xff;
237 decode (arg1, lv, hv);
238 return overflow_sum_sign (h1, h2, *hv);
241 /* Negate a doubleword integer with doubleword result.
242 Return nonzero if the operation overflows, assuming it's signed.
243 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
244 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
245 We use the 8-shorts representation internally. */
248 neg_double (l1, h1, lv, hv)
249 HOST_WIDE_INT l1, h1;
250 HOST_WIDE_INT *lv, *hv;
256 return (*hv & h1) < 0;
266 /* Multiply two doubleword integers with doubleword result.
267 Return nonzero if the operation overflows, assuming it's signed.
268 Each argument is given as two `HOST_WIDE_INT' pieces.
269 One argument is L1 and H1; the other, L2 and H2.
270 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
271 We use the 8-shorts representation internally. */
274 mul_double (l1, h1, l2, h2, lv, hv)
275 HOST_WIDE_INT l1, h1, l2, h2;
276 HOST_WIDE_INT *lv, *hv;
278 short arg1[MAX_SHORTS];
279 short arg2[MAX_SHORTS];
280 short prod[MAX_SHORTS * 2];
281 register int carry = 0;
282 register int i, j, k;
283 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
285 /* These cases are used extensively, arising from pointer combinations. */
290 int overflow = left_shift_overflows (h1, 1);
291 unsigned HOST_WIDE_INT temp = l1 + l1;
292 *hv = (h1 << 1) + (temp < l1);
298 int overflow = left_shift_overflows (h1, 2);
299 unsigned HOST_WIDE_INT temp = l1 + l1;
300 h1 = (h1 << 2) + ((temp < l1) << 1);
310 int overflow = left_shift_overflows (h1, 3);
311 unsigned HOST_WIDE_INT temp = l1 + l1;
312 h1 = (h1 << 3) + ((temp < l1) << 2);
315 h1 += (temp < l1) << 1;
325 encode (arg1, l1, h1);
326 encode (arg2, l2, h2);
328 bzero (prod, sizeof prod);
330 for (i = 0; i < MAX_SHORTS; i++)
331 for (j = 0; j < MAX_SHORTS; j++)
334 carry = arg1[i] * arg2[j];
338 prod[k] = carry & 0xff;
344 decode (prod, lv, hv); /* This ignores
345 prod[MAX_SHORTS] -> prod[MAX_SHORTS*2-1] */
347 /* Check for overflow by calculating the top half of the answer in full;
348 it should agree with the low half's sign bit. */
349 decode (prod+MAX_SHORTS, &toplow, &tophigh);
352 neg_double (l2, h2, &neglow, &neghigh);
353 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
357 neg_double (l1, h1, &neglow, &neghigh);
358 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
360 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
363 /* Shift the doubleword integer in L1, H1 left by COUNT places
364 keeping only PREC bits of result.
365 Shift right if COUNT is negative.
366 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
367 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
370 lshift_double (l1, h1, count, prec, lv, hv, arith)
371 HOST_WIDE_INT l1, h1, count;
373 HOST_WIDE_INT *lv, *hv;
376 short arg1[MAX_SHORTS];
382 rshift_double (l1, h1, - count, prec, lv, hv, arith);
386 encode (arg1, l1, h1);
394 for (i = 0; i < MAX_SHORTS; i++)
396 carry += arg1[i] << 1;
397 arg1[i] = carry & 0xff;
403 decode (arg1, lv, hv);
406 /* Shift the doubleword integer in L1, H1 right by COUNT places
407 keeping only PREC bits of result. COUNT must be positive.
408 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
409 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
412 rshift_double (l1, h1, count, prec, lv, hv, arith)
413 HOST_WIDE_INT l1, h1, count;
415 HOST_WIDE_INT *lv, *hv;
418 short arg1[MAX_SHORTS];
422 encode (arg1, l1, h1);
429 carry = arith && arg1[7] >> 7;
430 for (i = MAX_SHORTS - 1; i >= 0; i--)
434 arg1[i] = (carry >> 1) & 0xff;
439 decode (arg1, lv, hv);
442 /* Rotate the doubldword integer in L1, H1 left by COUNT places
443 keeping only PREC bits of result.
444 Rotate right if COUNT is negative.
445 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
448 lrotate_double (l1, h1, count, prec, lv, hv)
449 HOST_WIDE_INT l1, h1, count;
451 HOST_WIDE_INT *lv, *hv;
453 short arg1[MAX_SHORTS];
459 rrotate_double (l1, h1, - count, prec, lv, hv);
463 encode (arg1, l1, h1);
468 carry = arg1[MAX_SHORTS - 1] >> 7;
471 for (i = 0; i < MAX_SHORTS; i++)
473 carry += arg1[i] << 1;
474 arg1[i] = carry & 0xff;
480 decode (arg1, lv, hv);
483 /* Rotate the doubleword integer in L1, H1 left by COUNT places
484 keeping only PREC bits of result. COUNT must be positive.
485 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
488 rrotate_double (l1, h1, count, prec, lv, hv)
489 HOST_WIDE_INT l1, h1, count;
491 HOST_WIDE_INT *lv, *hv;
493 short arg1[MAX_SHORTS];
497 encode (arg1, l1, h1);
505 for (i = MAX_SHORTS - 1; i >= 0; i--)
509 arg1[i] = (carry >> 1) & 0xff;
514 decode (arg1, lv, hv);
517 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
518 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
519 CODE is a tree code for a kind of division, one of
520 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
522 It controls how the quotient is rounded to a integer.
523 Return nonzero if the operation overflows.
524 UNS nonzero says do unsigned division. */
527 div_and_round_double (code, uns,
528 lnum_orig, hnum_orig, lden_orig, hden_orig,
529 lquo, hquo, lrem, hrem)
532 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
533 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
534 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
537 short num[MAX_SHORTS + 1]; /* extra element for scaling. */
538 short den[MAX_SHORTS], quo[MAX_SHORTS];
539 register int i, j, work;
540 register int carry = 0;
541 HOST_WIDE_INT lnum = lnum_orig;
542 HOST_WIDE_INT hnum = hnum_orig;
543 HOST_WIDE_INT lden = lden_orig;
544 HOST_WIDE_INT hden = hden_orig;
547 if ((hden == 0) && (lden == 0))
550 /* calculate quotient sign and convert operands to unsigned. */
556 /* (minimum integer) / (-1) is the only overflow case. */
557 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
563 neg_double (lden, hden, &lden, &hden);
567 if (hnum == 0 && hden == 0)
568 { /* single precision */
570 /* This unsigned division rounds toward zero. */
571 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
576 { /* trivial case: dividend < divisor */
577 /* hden != 0 already checked. */
584 bzero (quo, sizeof quo);
586 bzero (num, sizeof num); /* to zero 9th element */
587 bzero (den, sizeof den);
589 encode (num, lnum, hnum);
590 encode (den, lden, hden);
592 /* This code requires more than just hden == 0.
593 We also have to require that we don't need more than three bytes
594 to hold CARRY. If we ever did need four bytes to hold it, we
595 would lose part of it when computing WORK on the next round. */
596 if (hden == 0 && (((unsigned HOST_WIDE_INT) lden << 8) >> 8) == lden)
597 { /* simpler algorithm */
598 /* hnum != 0 already checked. */
599 for (i = MAX_SHORTS - 1; i >= 0; i--)
601 work = num[i] + (carry << 8);
602 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
603 carry = work % (unsigned HOST_WIDE_INT) lden;
606 else { /* full double precision,
607 with thanks to Don Knuth's
608 "Seminumerical Algorithms". */
610 int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
612 /* Find the highest non-zero divisor digit. */
613 for (i = MAX_SHORTS - 1; ; i--)
618 for (i = MAX_SHORTS - 1; ; i--)
623 quo_hi_sig = num_hi_sig - den_hi_sig + 1;
625 /* Insure that the first digit of the divisor is at least BASE/2.
626 This is required by the quotient digit estimation algorithm. */
628 scale = BASE / (den[den_hi_sig] + 1);
629 if (scale > 1) { /* scale divisor and dividend */
631 for (i = 0; i <= MAX_SHORTS - 1; i++) {
632 work = (num[i] * scale) + carry;
633 num[i] = work & 0xff;
635 if (num[i] != 0) num_hi_sig = i;
638 for (i = 0; i <= MAX_SHORTS - 1; i++) {
639 work = (den[i] * scale) + carry;
640 den[i] = work & 0xff;
642 if (den[i] != 0) den_hi_sig = i;
647 for (i = quo_hi_sig; i > 0; i--) {
648 /* guess the next quotient digit, quo_est, by dividing the first
649 two remaining dividend digits by the high order quotient digit.
650 quo_est is never low and is at most 2 high. */
652 int num_hi; /* index of highest remaining dividend digit */
654 num_hi = i + den_hi_sig;
656 work = (num[num_hi] * BASE) + (num_hi > 0 ? num[num_hi - 1] : 0);
657 if (num[num_hi] != den[den_hi_sig]) {
658 quo_est = work / den[den_hi_sig];
664 /* refine quo_est so it's usually correct, and at most one high. */
665 while ((den[den_hi_sig - 1] * quo_est)
666 > (((work - (quo_est * den[den_hi_sig])) * BASE)
667 + ((num_hi - 1) > 0 ? num[num_hi - 2] : 0)))
670 /* Try QUO_EST as the quotient digit, by multiplying the
671 divisor by QUO_EST and subtracting from the remaining dividend.
672 Keep in mind that QUO_EST is the I - 1st digit. */
676 for (j = 0; j <= den_hi_sig; j++)
680 work = num[i + j - 1] - (quo_est * den[j]) + carry;
688 num[i + j - 1] = digit;
691 /* if quo_est was high by one, then num[i] went negative and
692 we need to correct things. */
697 carry = 0; /* add divisor back in */
698 for (j = 0; j <= den_hi_sig; j++)
700 work = num[i + j - 1] + den[j] + carry;
710 num[i + j - 1] = work;
712 num [num_hi] += carry;
715 /* store the quotient digit. */
716 quo[i - 1] = quo_est;
720 decode (quo, lquo, hquo);
723 /* if result is negative, make it so. */
725 neg_double (*lquo, *hquo, lquo, hquo);
727 /* compute trial remainder: rem = num - (quo * den) */
728 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
729 neg_double (*lrem, *hrem, lrem, hrem);
730 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
735 case TRUNC_MOD_EXPR: /* round toward zero */
736 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
740 case FLOOR_MOD_EXPR: /* round toward negative infinity */
741 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
744 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
747 else return overflow;
751 case CEIL_MOD_EXPR: /* round toward positive infinity */
752 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
754 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
757 else return overflow;
761 case ROUND_MOD_EXPR: /* round to closest integer */
763 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
764 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
766 /* get absolute values */
767 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
768 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
770 /* if (2 * abs (lrem) >= abs (lden)) */
771 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
772 labs_rem, habs_rem, <wice, &htwice);
773 if (((unsigned HOST_WIDE_INT) habs_den
774 < (unsigned HOST_WIDE_INT) htwice)
775 || (((unsigned HOST_WIDE_INT) habs_den
776 == (unsigned HOST_WIDE_INT) htwice)
777 && ((HOST_WIDE_INT unsigned) labs_den
778 < (unsigned HOST_WIDE_INT) ltwice)))
782 add_double (*lquo, *hquo,
783 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
786 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
789 else return overflow;
797 /* compute true remainder: rem = num - (quo * den) */
798 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
799 neg_double (*lrem, *hrem, lrem, hrem);
800 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
804 #ifndef REAL_ARITHMETIC
805 /* Effectively truncate a real value to represent
806 the nearest possible value in a narrower mode.
807 The result is actually represented in the same data type as the argument,
808 but its value is usually different. */
811 real_value_truncate (mode, arg)
812 enum machine_mode mode;
816 /* Make sure the value is actually stored in memory before we turn off
820 REAL_VALUE_TYPE value;
821 jmp_buf handler, old_handler;
824 if (setjmp (handler))
826 error ("floating overflow");
829 handled = push_float_handler (handler, old_handler);
830 value = REAL_VALUE_TRUNCATE (mode, arg);
831 pop_float_handler (handled, old_handler);
835 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
837 /* Check for infinity in an IEEE double precision number. */
843 /* The IEEE 64-bit double format. */
848 unsigned exponent : 11;
849 unsigned mantissa1 : 20;
854 unsigned mantissa1 : 20;
855 unsigned exponent : 11;
861 if (u.big_endian.sign == 1)
864 return (u.big_endian.exponent == 2047
865 && u.big_endian.mantissa1 == 0
866 && u.big_endian.mantissa2 == 0);
871 return (u.little_endian.exponent == 2047
872 && u.little_endian.mantissa1 == 0
873 && u.little_endian.mantissa2 == 0);
877 /* Check whether an IEEE double precision number is a NaN. */
883 /* The IEEE 64-bit double format. */
888 unsigned exponent : 11;
889 unsigned mantissa1 : 20;
894 unsigned mantissa1 : 20;
895 unsigned exponent : 11;
901 if (u.big_endian.sign == 1)
904 return (u.big_endian.exponent == 2047
905 && (u.big_endian.mantissa1 != 0
906 || u.big_endian.mantissa2 != 0));
911 return (u.little_endian.exponent == 2047
912 && (u.little_endian.mantissa1 != 0
913 || u.little_endian.mantissa2 != 0));
917 /* Check for a negative IEEE double precision number. */
923 /* The IEEE 64-bit double format. */
928 unsigned exponent : 11;
929 unsigned mantissa1 : 20;
934 unsigned mantissa1 : 20;
935 unsigned exponent : 11;
941 if (u.big_endian.sign == 1)
944 return u.big_endian.sign;
949 return u.little_endian.sign;
952 #else /* Target not IEEE */
954 /* Let's assume other float formats don't have infinity.
955 (This can be overridden by redefining REAL_VALUE_ISINF.) */
963 /* Let's assume other float formats don't have NaNs.
964 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
972 /* Let's assume other float formats don't have minus zero.
973 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
980 #endif /* Target not IEEE */
981 #endif /* no REAL_ARITHMETIC */
983 /* Split a tree IN into a constant and a variable part
984 that could be combined with CODE to make IN.
985 CODE must be a commutative arithmetic operation.
986 Store the constant part into *CONP and the variable in &VARP.
987 Return 1 if this was done; zero means the tree IN did not decompose
990 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
991 Therefore, we must tell the caller whether the variable part
992 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
993 The value stored is the coefficient for the variable term.
994 The constant term we return should always be added;
995 we negate it if necessary. */
998 split_tree (in, code, varp, conp, varsignp)
1000 enum tree_code code;
1004 register tree outtype = TREE_TYPE (in);
1008 /* Strip any conversions that don't change the machine mode. */
1009 while ((TREE_CODE (in) == NOP_EXPR
1010 || TREE_CODE (in) == CONVERT_EXPR)
1011 && (TYPE_MODE (TREE_TYPE (in))
1012 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
1013 in = TREE_OPERAND (in, 0);
1015 if (TREE_CODE (in) == code
1016 || (! FLOAT_TYPE_P (TREE_TYPE (in))
1017 /* We can associate addition and subtraction together
1018 (even though the C standard doesn't say so)
1019 for integers because the value is not affected.
1020 For reals, the value might be affected, so we can't. */
1021 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1022 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1024 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
1025 if (code == INTEGER_CST)
1027 *conp = TREE_OPERAND (in, 0);
1028 *varp = TREE_OPERAND (in, 1);
1029 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1030 && TREE_TYPE (*varp) != outtype)
1031 *varp = convert (outtype, *varp);
1032 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1035 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
1037 *conp = TREE_OPERAND (in, 1);
1038 *varp = TREE_OPERAND (in, 0);
1040 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1041 && TREE_TYPE (*varp) != outtype)
1042 *varp = convert (outtype, *varp);
1043 if (TREE_CODE (in) == MINUS_EXPR)
1045 /* If operation is subtraction and constant is second,
1046 must negate it to get an additive constant.
1047 And this cannot be done unless it is a manifest constant.
1048 It could also be the address of a static variable.
1049 We cannot negate that, so give up. */
1050 if (TREE_CODE (*conp) == INTEGER_CST)
1051 /* Subtracting from integer_zero_node loses for long long. */
1052 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
1058 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
1060 *conp = TREE_OPERAND (in, 0);
1061 *varp = TREE_OPERAND (in, 1);
1062 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1063 && TREE_TYPE (*varp) != outtype)
1064 *varp = convert (outtype, *varp);
1065 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1072 /* Combine two constants NUM and ARG2 under operation CODE
1073 to produce a new constant.
1074 We assume ARG1 and ARG2 have the same data type,
1075 or at least are the same kind of constant and the same machine mode.
1077 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1080 const_binop (code, arg1, arg2, notrunc)
1081 enum tree_code code;
1082 register tree arg1, arg2;
1085 if (TREE_CODE (arg1) == INTEGER_CST)
1087 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
1088 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
1089 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
1090 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
1091 HOST_WIDE_INT low, hi;
1092 HOST_WIDE_INT garbagel, garbageh;
1094 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1100 t = build_int_2 (int1l | int2l, int1h | int2h);
1104 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1108 t = build_int_2 (int1l & int2l, int1h & int2h);
1111 case BIT_ANDTC_EXPR:
1112 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1118 /* It's unclear from the C standard whether shifts can overflow.
1119 The following code ignores overflow; perhaps a C standard
1120 interpretation ruling is needed. */
1121 lshift_double (int1l, int1h, int2l,
1122 TYPE_PRECISION (TREE_TYPE (arg1)),
1125 t = build_int_2 (low, hi);
1126 TREE_TYPE (t) = TREE_TYPE (arg1);
1128 force_fit_type (t, 0);
1129 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1130 TREE_CONSTANT_OVERFLOW (t)
1131 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1137 lrotate_double (int1l, int1h, int2l,
1138 TYPE_PRECISION (TREE_TYPE (arg1)),
1140 t = build_int_2 (low, hi);
1147 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1150 overflow = int2h < hi;
1152 t = build_int_2 (int2l, int2h);
1158 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1161 overflow = int1h < hi;
1163 t = build_int_2 (int1l, int1h);
1166 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1167 t = build_int_2 (low, hi);
1171 if (int2h == 0 && int2l == 0)
1173 t = build_int_2 (int1l, int1h);
1176 neg_double (int2l, int2h, &low, &hi);
1177 add_double (int1l, int1h, low, hi, &low, &hi);
1178 overflow = overflow_sum_sign (hi, int2h, int1h);
1179 t = build_int_2 (low, hi);
1183 /* Optimize simple cases. */
1186 unsigned HOST_WIDE_INT temp;
1191 t = build_int_2 (0, 0);
1194 t = build_int_2 (int2l, int2h);
1197 overflow = left_shift_overflows (int2h, 1);
1198 temp = int2l + int2l;
1199 int2h = (int2h << 1) + (temp < int2l);
1200 t = build_int_2 (temp, int2h);
1202 #if 0 /* This code can lose carries. */
1204 temp = int2l + int2l + int2l;
1205 int2h = int2h * 3 + (temp < int2l);
1206 t = build_int_2 (temp, int2h);
1210 overflow = left_shift_overflows (int2h, 2);
1211 temp = int2l + int2l;
1212 int2h = (int2h << 2) + ((temp < int2l) << 1);
1215 int2h += (temp < int2l);
1216 t = build_int_2 (temp, int2h);
1219 overflow = left_shift_overflows (int2h, 3);
1220 temp = int2l + int2l;
1221 int2h = (int2h << 3) + ((temp < int2l) << 2);
1224 int2h += (temp < int2l) << 1;
1227 int2h += (temp < int2l);
1228 t = build_int_2 (temp, int2h);
1239 t = build_int_2 (0, 0);
1244 t = build_int_2 (int1l, int1h);
1249 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1250 t = build_int_2 (low, hi);
1253 case TRUNC_DIV_EXPR:
1254 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1255 case EXACT_DIV_EXPR:
1256 /* This is a shortcut for a common special case.
1257 It reduces the number of tree nodes generated
1259 if (int2h == 0 && int2l > 0
1260 && TREE_TYPE (arg1) == sizetype
1261 && int1h == 0 && int1l >= 0)
1263 if (code == CEIL_DIV_EXPR)
1265 return size_int (int1l / int2l);
1267 case ROUND_DIV_EXPR:
1268 if (int2h == 0 && int2l == 1)
1270 t = build_int_2 (int1l, int1h);
1273 if (int1l == int2l && int1h == int2h)
1275 if ((int1l | int1h) == 0)
1277 t = build_int_2 (1, 0);
1280 overflow = div_and_round_double (code, uns,
1281 int1l, int1h, int2l, int2h,
1282 &low, &hi, &garbagel, &garbageh);
1283 t = build_int_2 (low, hi);
1286 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1287 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1288 overflow = div_and_round_double (code, uns,
1289 int1l, int1h, int2l, int2h,
1290 &garbagel, &garbageh, &low, &hi);
1291 t = build_int_2 (low, hi);
1298 low = (((unsigned HOST_WIDE_INT) int1h
1299 < (unsigned HOST_WIDE_INT) int2h)
1300 || (((unsigned HOST_WIDE_INT) int1h
1301 == (unsigned HOST_WIDE_INT) int2h)
1302 && ((unsigned HOST_WIDE_INT) int1l
1303 < (unsigned HOST_WIDE_INT) int2l)));
1307 low = ((int1h < int2h)
1308 || ((int1h == int2h)
1309 && ((unsigned HOST_WIDE_INT) int1l
1310 < (unsigned HOST_WIDE_INT) int2l)));
1312 if (low == (code == MIN_EXPR))
1313 t = build_int_2 (int1l, int1h);
1315 t = build_int_2 (int2l, int2h);
1322 TREE_TYPE (t) = TREE_TYPE (arg1);
1324 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1325 | TREE_OVERFLOW (arg1)
1326 | TREE_OVERFLOW (arg2));
1327 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1328 | TREE_CONSTANT_OVERFLOW (arg1)
1329 | TREE_CONSTANT_OVERFLOW (arg2));
1332 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1333 if (TREE_CODE (arg1) == REAL_CST)
1337 REAL_VALUE_TYPE value;
1340 d1 = TREE_REAL_CST (arg1);
1341 d2 = TREE_REAL_CST (arg2);
1342 if (setjmp (float_error))
1344 pedwarn ("floating overflow in constant expression");
1345 return build (code, TREE_TYPE (arg1), arg1, arg2);
1347 set_float_handler (float_error);
1349 #ifdef REAL_ARITHMETIC
1350 REAL_ARITHMETIC (value, code, d1, d2);
1367 #ifndef REAL_INFINITY
1376 value = MIN (d1, d2);
1380 value = MAX (d1, d2);
1386 #endif /* no REAL_ARITHMETIC */
1387 t = build_real (TREE_TYPE (arg1),
1388 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1389 set_float_handler (NULL_PTR);
1392 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1393 if (TREE_CODE (arg1) == COMPLEX_CST)
1395 register tree r1 = TREE_REALPART (arg1);
1396 register tree i1 = TREE_IMAGPART (arg1);
1397 register tree r2 = TREE_REALPART (arg2);
1398 register tree i2 = TREE_IMAGPART (arg2);
1404 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1405 const_binop (PLUS_EXPR, i1, i2, notrunc));
1409 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1410 const_binop (MINUS_EXPR, i1, i2, notrunc));
1414 t = build_complex (const_binop (MINUS_EXPR,
1415 const_binop (MULT_EXPR,
1417 const_binop (MULT_EXPR,
1420 const_binop (PLUS_EXPR,
1421 const_binop (MULT_EXPR,
1423 const_binop (MULT_EXPR,
1430 register tree magsquared
1431 = const_binop (PLUS_EXPR,
1432 const_binop (MULT_EXPR, r2, r2, notrunc),
1433 const_binop (MULT_EXPR, i2, i2, notrunc),
1435 t = build_complex (const_binop (RDIV_EXPR,
1436 const_binop (PLUS_EXPR,
1437 const_binop (MULT_EXPR, r1, r2, notrunc),
1438 const_binop (MULT_EXPR, i1, i2, notrunc),
1440 magsquared, notrunc),
1441 const_binop (RDIV_EXPR,
1442 const_binop (MINUS_EXPR,
1443 const_binop (MULT_EXPR, i1, r2, notrunc),
1444 const_binop (MULT_EXPR, r1, i2, notrunc),
1446 magsquared, notrunc));
1453 TREE_TYPE (t) = TREE_TYPE (arg1);
1459 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1463 unsigned int number;
1466 /* Type-size nodes already made for small sizes. */
1467 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1469 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1470 && size_table[number] != 0)
1471 return size_table[number];
1472 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1474 push_obstacks_nochange ();
1475 /* Make this a permanent node. */
1476 end_temporary_allocation ();
1477 t = build_int_2 (number, 0);
1478 TREE_TYPE (t) = sizetype;
1479 size_table[number] = t;
1484 t = build_int_2 (number, 0);
1485 TREE_TYPE (t) = sizetype;
1490 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1491 CODE is a tree code. Data type is taken from `sizetype',
1492 If the operands are constant, so is the result. */
1495 size_binop (code, arg0, arg1)
1496 enum tree_code code;
1499 /* Handle the special case of two integer constants faster. */
1500 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1502 /* And some specific cases even faster than that. */
1503 if (code == PLUS_EXPR
1504 && TREE_INT_CST_LOW (arg0) == 0
1505 && TREE_INT_CST_HIGH (arg0) == 0)
1507 if (code == MINUS_EXPR
1508 && TREE_INT_CST_LOW (arg1) == 0
1509 && TREE_INT_CST_HIGH (arg1) == 0)
1511 if (code == MULT_EXPR
1512 && TREE_INT_CST_LOW (arg0) == 1
1513 && TREE_INT_CST_HIGH (arg0) == 0)
1515 /* Handle general case of two integer constants. */
1516 return const_binop (code, arg0, arg1, 1);
1519 if (arg0 == error_mark_node || arg1 == error_mark_node)
1520 return error_mark_node;
1522 return fold (build (code, sizetype, arg0, arg1));
1525 /* Given T, a tree representing type conversion of ARG1, a constant,
1526 return a constant tree representing the result of conversion. */
1529 fold_convert (t, arg1)
1533 register tree type = TREE_TYPE (t);
1535 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1537 if (TREE_CODE (arg1) == INTEGER_CST)
1539 /* Given an integer constant, make new constant with new type,
1540 appropriately sign-extended or truncated. */
1541 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1542 TREE_INT_CST_HIGH (arg1));
1543 TREE_TYPE (t) = type;
1544 /* Indicate an overflow if (1) ARG1 already overflowed,
1545 or (2) force_fit_type indicates an overflow.
1546 Tell force_fit_type that an overflow has already occurred
1547 if ARG1 is a too-large unsigned value and T is signed. */
1549 = (TREE_OVERFLOW (arg1)
1550 | force_fit_type (t,
1551 (TREE_INT_CST_HIGH (arg1) < 0
1552 & (TREE_UNSIGNED (type)
1553 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1554 TREE_CONSTANT_OVERFLOW (t)
1555 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1557 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1558 else if (TREE_CODE (arg1) == REAL_CST)
1560 REAL_VALUE_TYPE l, x, u;
1562 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1563 x = TREE_REAL_CST (arg1);
1564 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1566 /* See if X will be in range after truncation towards 0.
1567 To compensate for truncation, move the bounds away from 0,
1568 but reject if X exactly equals the adjusted bounds. */
1569 #ifdef REAL_ARITHMETIC
1570 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1571 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1576 if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1578 pedwarn ("real constant out of range for integer conversion");
1581 #ifndef REAL_ARITHMETIC
1584 HOST_WIDE_INT low, high;
1585 HOST_WIDE_INT half_word
1586 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1588 d = TREE_REAL_CST (arg1);
1592 high = (HOST_WIDE_INT) (d / half_word / half_word);
1593 d -= (REAL_VALUE_TYPE) high * half_word * half_word;
1594 if (d >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1596 low = d - (REAL_VALUE_TYPE) half_word * half_word / 2;
1597 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1600 low = (HOST_WIDE_INT) d;
1601 if (TREE_REAL_CST (arg1) < 0)
1602 neg_double (low, high, &low, &high);
1603 t = build_int_2 (low, high);
1607 HOST_WIDE_INT low, high;
1608 REAL_VALUE_TO_INT (&low, &high, (TREE_REAL_CST (arg1)));
1609 t = build_int_2 (low, high);
1612 TREE_TYPE (t) = type;
1613 force_fit_type (t, 0);
1615 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1616 TREE_TYPE (t) = type;
1618 else if (TREE_CODE (type) == REAL_TYPE)
1620 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1621 if (TREE_CODE (arg1) == INTEGER_CST)
1622 return build_real_from_int_cst (type, arg1);
1623 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1624 if (TREE_CODE (arg1) == REAL_CST)
1626 if (setjmp (float_error))
1628 pedwarn ("floating overflow in constant expression");
1631 set_float_handler (float_error);
1633 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1634 TREE_REAL_CST (arg1)));
1635 set_float_handler (NULL_PTR);
1639 TREE_CONSTANT (t) = 1;
1643 /* Return an expr equal to X but certainly not valid as an lvalue.
1644 Also make sure it is not valid as an null pointer constant. */
1652 /* These things are certainly not lvalues. */
1653 if (TREE_CODE (x) == NON_LVALUE_EXPR
1654 || TREE_CODE (x) == INTEGER_CST
1655 || TREE_CODE (x) == REAL_CST
1656 || TREE_CODE (x) == STRING_CST
1657 || TREE_CODE (x) == ADDR_EXPR)
1659 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1661 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1662 so convert_for_assignment won't strip it.
1663 This is so this 0 won't be treated as a null pointer constant. */
1664 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1665 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1671 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1672 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1676 /* When pedantic, return an expr equal to X but certainly not valid as a
1677 pedantic lvalue. Otherwise, return X. */
1680 pedantic_non_lvalue (x)
1684 return non_lvalue (x);
1689 /* Given a tree comparison code, return the code that is the logical inverse
1690 of the given code. It is not safe to do this for floating-point
1691 comparisons, except for NE_EXPR and EQ_EXPR. */
1693 static enum tree_code
1694 invert_tree_comparison (code)
1695 enum tree_code code;
1716 /* Similar, but return the comparison that results if the operands are
1717 swapped. This is safe for floating-point. */
1719 static enum tree_code
1720 swap_tree_comparison (code)
1721 enum tree_code code;
1741 /* Return nonzero if two operands are necessarily equal.
1742 If ONLY_CONST is non-zero, only return non-zero for constants.
1743 This function tests whether the operands are indistinguishable;
1744 it does not test whether they are equal using C's == operation.
1745 The distinction is important for IEEE floating point, because
1746 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1747 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1750 operand_equal_p (arg0, arg1, only_const)
1754 /* If both types don't have the same signedness, then we can't consider
1755 them equal. We must check this before the STRIP_NOPS calls
1756 because they may change the signedness of the arguments. */
1757 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1763 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1764 We don't care about side effects in that case because the SAVE_EXPR
1765 takes care of that for us. */
1766 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1767 return ! only_const;
1769 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1772 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1773 && TREE_CODE (arg0) == ADDR_EXPR
1774 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1777 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1778 && TREE_CODE (arg0) == INTEGER_CST
1779 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1780 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1783 /* Detect when real constants are equal. */
1784 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1785 && TREE_CODE (arg0) == REAL_CST)
1786 return !bcmp (&TREE_REAL_CST (arg0), &TREE_REAL_CST (arg1),
1787 sizeof (REAL_VALUE_TYPE));
1795 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1797 /* This is needed for conversions and for COMPONENT_REF.
1798 Might as well play it safe and always test this. */
1799 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1802 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1805 /* Two conversions are equal only if signedness and modes match. */
1806 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1807 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1808 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1811 return operand_equal_p (TREE_OPERAND (arg0, 0),
1812 TREE_OPERAND (arg1, 0), 0);
1816 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1817 TREE_OPERAND (arg1, 0), 0)
1818 && operand_equal_p (TREE_OPERAND (arg0, 1),
1819 TREE_OPERAND (arg1, 1), 0));
1822 switch (TREE_CODE (arg0))
1825 return operand_equal_p (TREE_OPERAND (arg0, 0),
1826 TREE_OPERAND (arg1, 0), 0);
1830 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1831 TREE_OPERAND (arg1, 0), 0)
1832 && operand_equal_p (TREE_OPERAND (arg0, 1),
1833 TREE_OPERAND (arg1, 1), 0));
1836 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1837 TREE_OPERAND (arg1, 0), 0)
1838 && operand_equal_p (TREE_OPERAND (arg0, 1),
1839 TREE_OPERAND (arg1, 1), 0)
1840 && operand_equal_p (TREE_OPERAND (arg0, 2),
1841 TREE_OPERAND (arg1, 2), 0));
1849 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1850 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1852 When in doubt, return 0. */
1855 operand_equal_for_comparison_p (arg0, arg1, other)
1859 int unsignedp1, unsignedpo;
1860 tree primarg1, primother;
1863 if (operand_equal_p (arg0, arg1, 0))
1866 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1869 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1870 actual comparison operand, ARG0.
1872 First throw away any conversions to wider types
1873 already present in the operands. */
1875 primarg1 = get_narrower (arg1, &unsignedp1);
1876 primother = get_narrower (other, &unsignedpo);
1878 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1879 if (unsignedp1 == unsignedpo
1880 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1881 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1883 tree type = TREE_TYPE (arg0);
1885 /* Make sure shorter operand is extended the right way
1886 to match the longer operand. */
1887 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1888 TREE_TYPE (primarg1)),
1891 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1898 /* See if ARG is an expression that is either a comparison or is performing
1899 arithmetic on comparisons. The comparisons must only be comparing
1900 two different values, which will be stored in *CVAL1 and *CVAL2; if
1901 they are non-zero it means that some operands have already been found.
1902 No variables may be used anywhere else in the expression except in the
1903 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1904 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1906 If this is true, return 1. Otherwise, return zero. */
1909 twoval_comparison_p (arg, cval1, cval2, save_p)
1911 tree *cval1, *cval2;
1914 enum tree_code code = TREE_CODE (arg);
1915 char class = TREE_CODE_CLASS (code);
1917 /* We can handle some of the 'e' cases here. */
1918 if (class == 'e' && code == TRUTH_NOT_EXPR)
1920 else if (class == 'e'
1921 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1922 || code == COMPOUND_EXPR))
1924 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1926 /* If we've already found a CVAL1 or CVAL2, this expression is
1927 two complex to handle. */
1928 if (*cval1 || *cval2)
1938 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1941 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1942 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1943 cval1, cval2, save_p));
1949 if (code == COND_EXPR)
1950 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1951 cval1, cval2, save_p)
1952 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1953 cval1, cval2, save_p)
1954 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1955 cval1, cval2, save_p));
1959 /* First see if we can handle the first operand, then the second. For
1960 the second operand, we know *CVAL1 can't be zero. It must be that
1961 one side of the comparison is each of the values; test for the
1962 case where this isn't true by failing if the two operands
1965 if (operand_equal_p (TREE_OPERAND (arg, 0),
1966 TREE_OPERAND (arg, 1), 0))
1970 *cval1 = TREE_OPERAND (arg, 0);
1971 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1973 else if (*cval2 == 0)
1974 *cval2 = TREE_OPERAND (arg, 0);
1975 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1980 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1982 else if (*cval2 == 0)
1983 *cval2 = TREE_OPERAND (arg, 1);
1984 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1995 /* ARG is a tree that is known to contain just arithmetic operations and
1996 comparisons. Evaluate the operations in the tree substituting NEW0 for
1997 any occurrence of OLD0 as an operand of a comparison and likewise for
2001 eval_subst (arg, old0, new0, old1, new1)
2003 tree old0, new0, old1, new1;
2005 tree type = TREE_TYPE (arg);
2006 enum tree_code code = TREE_CODE (arg);
2007 char class = TREE_CODE_CLASS (code);
2009 /* We can handle some of the 'e' cases here. */
2010 if (class == 'e' && code == TRUTH_NOT_EXPR)
2012 else if (class == 'e'
2013 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2019 return fold (build1 (code, type,
2020 eval_subst (TREE_OPERAND (arg, 0),
2021 old0, new0, old1, new1)));
2024 return fold (build (code, type,
2025 eval_subst (TREE_OPERAND (arg, 0),
2026 old0, new0, old1, new1),
2027 eval_subst (TREE_OPERAND (arg, 1),
2028 old0, new0, old1, new1)));
2034 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
2037 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
2040 return fold (build (code, type,
2041 eval_subst (TREE_OPERAND (arg, 0),
2042 old0, new0, old1, new1),
2043 eval_subst (TREE_OPERAND (arg, 1),
2044 old0, new0, old1, new1),
2045 eval_subst (TREE_OPERAND (arg, 2),
2046 old0, new0, old1, new1)));
2051 tree arg0 = TREE_OPERAND (arg, 0);
2052 tree arg1 = TREE_OPERAND (arg, 1);
2054 /* We need to check both for exact equality and tree equality. The
2055 former will be true if the operand has a side-effect. In that
2056 case, we know the operand occurred exactly once. */
2058 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
2060 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
2063 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2065 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2068 return fold (build (code, type, arg0, arg1));
2075 /* Return a tree for the case when the result of an expression is RESULT
2076 converted to TYPE and OMITTED was previously an operand of the expression
2077 but is now not needed (e.g., we folded OMITTED * 0).
2079 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2080 the conversion of RESULT to TYPE. */
2083 omit_one_operand (type, result, omitted)
2084 tree type, result, omitted;
2086 tree t = convert (type, result);
2088 if (TREE_SIDE_EFFECTS (omitted))
2089 return build (COMPOUND_EXPR, type, omitted, t);
2091 return non_lvalue (t);
2094 /* Return a simplified tree node for the truth-negation of ARG. This
2095 never alters ARG itself. We assume that ARG is an operation that
2096 returns a truth value (0 or 1). */
2099 invert_truthvalue (arg)
2102 tree type = TREE_TYPE (arg);
2103 enum tree_code code = TREE_CODE (arg);
2105 if (code == ERROR_MARK)
2108 /* If this is a comparison, we can simply invert it, except for
2109 floating-point non-equality comparisons, in which case we just
2110 enclose a TRUTH_NOT_EXPR around what we have. */
2112 if (TREE_CODE_CLASS (code) == '<')
2114 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2115 && code != NE_EXPR && code != EQ_EXPR)
2116 return build1 (TRUTH_NOT_EXPR, type, arg);
2118 return build (invert_tree_comparison (code), type,
2119 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2125 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2126 && TREE_INT_CST_HIGH (arg) == 0, 0));
2128 case TRUTH_AND_EXPR:
2129 return build (TRUTH_OR_EXPR, type,
2130 invert_truthvalue (TREE_OPERAND (arg, 0)),
2131 invert_truthvalue (TREE_OPERAND (arg, 1)));
2134 return build (TRUTH_AND_EXPR, type,
2135 invert_truthvalue (TREE_OPERAND (arg, 0)),
2136 invert_truthvalue (TREE_OPERAND (arg, 1)));
2138 case TRUTH_XOR_EXPR:
2139 /* Here we can invert either operand. We invert the first operand
2140 unless the second operand is a TRUTH_NOT_EXPR in which case our
2141 result is the XOR of the first operand with the inside of the
2142 negation of the second operand. */
2144 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2145 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2146 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2148 return build (TRUTH_XOR_EXPR, type,
2149 invert_truthvalue (TREE_OPERAND (arg, 0)),
2150 TREE_OPERAND (arg, 1));
2152 case TRUTH_ANDIF_EXPR:
2153 return build (TRUTH_ORIF_EXPR, type,
2154 invert_truthvalue (TREE_OPERAND (arg, 0)),
2155 invert_truthvalue (TREE_OPERAND (arg, 1)));
2157 case TRUTH_ORIF_EXPR:
2158 return build (TRUTH_ANDIF_EXPR, type,
2159 invert_truthvalue (TREE_OPERAND (arg, 0)),
2160 invert_truthvalue (TREE_OPERAND (arg, 1)));
2162 case TRUTH_NOT_EXPR:
2163 return TREE_OPERAND (arg, 0);
2166 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2167 invert_truthvalue (TREE_OPERAND (arg, 1)),
2168 invert_truthvalue (TREE_OPERAND (arg, 2)));
2171 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2172 invert_truthvalue (TREE_OPERAND (arg, 1)));
2174 case NON_LVALUE_EXPR:
2175 return invert_truthvalue (TREE_OPERAND (arg, 0));
2180 return build1 (TREE_CODE (arg), type,
2181 invert_truthvalue (TREE_OPERAND (arg, 0)));
2184 if (!integer_onep (TREE_OPERAND (arg, 1)))
2186 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2189 return build1 (TRUTH_NOT_EXPR, type, arg);
2191 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2193 return build1 (TRUTH_NOT_EXPR, type, arg);
2196 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2197 operands are another bit-wise operation with a common input. If so,
2198 distribute the bit operations to save an operation and possibly two if
2199 constants are involved. For example, convert
2200 (A | B) & (A | C) into A | (B & C)
2201 Further simplification will occur if B and C are constants.
2203 If this optimization cannot be done, 0 will be returned. */
2206 distribute_bit_expr (code, type, arg0, arg1)
2207 enum tree_code code;
2214 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2215 || TREE_CODE (arg0) == code
2216 || (TREE_CODE (arg0) != BIT_AND_EXPR
2217 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2220 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2222 common = TREE_OPERAND (arg0, 0);
2223 left = TREE_OPERAND (arg0, 1);
2224 right = TREE_OPERAND (arg1, 1);
2226 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2228 common = TREE_OPERAND (arg0, 0);
2229 left = TREE_OPERAND (arg0, 1);
2230 right = TREE_OPERAND (arg1, 0);
2232 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2234 common = TREE_OPERAND (arg0, 1);
2235 left = TREE_OPERAND (arg0, 0);
2236 right = TREE_OPERAND (arg1, 1);
2238 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2240 common = TREE_OPERAND (arg0, 1);
2241 left = TREE_OPERAND (arg0, 0);
2242 right = TREE_OPERAND (arg1, 0);
2247 return fold (build (TREE_CODE (arg0), type, common,
2248 fold (build (code, type, left, right))));
2251 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2252 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2255 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2258 int bitsize, bitpos;
2261 tree result = build (BIT_FIELD_REF, type, inner,
2262 size_int (bitsize), size_int (bitpos));
2264 TREE_UNSIGNED (result) = unsignedp;
2269 /* Optimize a bit-field compare.
2271 There are two cases: First is a compare against a constant and the
2272 second is a comparison of two items where the fields are at the same
2273 bit position relative to the start of a chunk (byte, halfword, word)
2274 large enough to contain it. In these cases we can avoid the shift
2275 implicit in bitfield extractions.
2277 For constants, we emit a compare of the shifted constant with the
2278 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2279 compared. For two fields at the same position, we do the ANDs with the
2280 similar mask and compare the result of the ANDs.
2282 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2283 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2284 are the left and right operands of the comparison, respectively.
2286 If the optimization described above can be done, we return the resulting
2287 tree. Otherwise we return zero. */
2290 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2291 enum tree_code code;
2295 int lbitpos, lbitsize, rbitpos, rbitsize;
2296 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2297 tree type = TREE_TYPE (lhs);
2298 tree signed_type, unsigned_type;
2299 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2300 enum machine_mode lmode, rmode, lnmode, rnmode;
2301 int lunsignedp, runsignedp;
2302 int lvolatilep = 0, rvolatilep = 0;
2303 tree linner, rinner;
2307 /* Get all the information about the extractions being done. If the bit size
2308 if the same as the size of the underlying object, we aren't doing an
2309 extraction at all and so can do nothing. */
2310 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2311 &lunsignedp, &lvolatilep);
2312 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2318 /* If this is not a constant, we can only do something if bit positions,
2319 sizes, and signedness are the same. */
2320 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2321 &rmode, &runsignedp, &rvolatilep);
2323 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2324 || lunsignedp != runsignedp || offset != 0)
2328 /* See if we can find a mode to refer to this field. We should be able to,
2329 but fail if we can't. */
2330 lnmode = get_best_mode (lbitsize, lbitpos,
2331 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2333 if (lnmode == VOIDmode)
2336 /* Set signed and unsigned types of the precision of this mode for the
2338 signed_type = type_for_mode (lnmode, 0);
2339 unsigned_type = type_for_mode (lnmode, 1);
2343 rnmode = get_best_mode (rbitsize, rbitpos,
2344 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2346 if (rnmode == VOIDmode)
2350 /* Compute the bit position and size for the new reference and our offset
2351 within it. If the new reference is the same size as the original, we
2352 won't optimize anything, so return zero. */
2353 lnbitsize = GET_MODE_BITSIZE (lnmode);
2354 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2355 lbitpos -= lnbitpos;
2356 if (lnbitsize == lbitsize)
2361 rnbitsize = GET_MODE_BITSIZE (rnmode);
2362 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2363 rbitpos -= rnbitpos;
2364 if (rnbitsize == rbitsize)
2368 #if BYTES_BIG_ENDIAN
2369 lbitpos = lnbitsize - lbitsize - lbitpos;
2372 /* Make the mask to be used against the extracted field. */
2373 mask = build_int_2 (~0, ~0);
2374 TREE_TYPE (mask) = unsigned_type;
2375 force_fit_type (mask, 0);
2376 mask = convert (unsigned_type, mask);
2377 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2378 mask = const_binop (RSHIFT_EXPR, mask,
2379 size_int (lnbitsize - lbitsize - lbitpos), 0);
2382 /* If not comparing with constant, just rework the comparison
2384 return build (code, compare_type,
2385 build (BIT_AND_EXPR, unsigned_type,
2386 make_bit_field_ref (linner, unsigned_type,
2387 lnbitsize, lnbitpos, 1),
2389 build (BIT_AND_EXPR, unsigned_type,
2390 make_bit_field_ref (rinner, unsigned_type,
2391 rnbitsize, rnbitpos, 1),
2394 /* Otherwise, we are handling the constant case. See if the constant is too
2395 big for the field. Warn and return a tree of for 0 (false) if so. We do
2396 this not only for its own sake, but to avoid having to test for this
2397 error case below. If we didn't, we might generate wrong code.
2399 For unsigned fields, the constant shifted right by the field length should
2400 be all zero. For signed fields, the high-order bits should agree with
2405 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2406 convert (unsigned_type, rhs),
2407 size_int (lbitsize), 0)))
2409 warning ("comparison is always %s due to width of bitfield",
2410 code == NE_EXPR ? "one" : "zero");
2411 return convert (compare_type,
2413 ? integer_one_node : integer_zero_node));
2418 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2419 size_int (lbitsize - 1), 0);
2420 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2422 warning ("comparison is always %s due to width of bitfield",
2423 code == NE_EXPR ? "one" : "zero");
2424 return convert (compare_type,
2426 ? integer_one_node : integer_zero_node));
2430 /* Single-bit compares should always be against zero. */
2431 if (lbitsize == 1 && ! integer_zerop (rhs))
2433 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2434 rhs = convert (type, integer_zero_node);
2437 /* Make a new bitfield reference, shift the constant over the
2438 appropriate number of bits and mask it with the computed mask
2439 (in case this was a signed field). If we changed it, make a new one. */
2440 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2443 TREE_SIDE_EFFECTS (lhs) = 1;
2444 TREE_THIS_VOLATILE (lhs) = 1;
2447 rhs = fold (const_binop (BIT_AND_EXPR,
2448 const_binop (LSHIFT_EXPR,
2449 convert (unsigned_type, rhs),
2450 size_int (lbitpos), 0),
2453 return build (code, compare_type,
2454 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2458 /* Subroutine for fold_truthop: decode a field reference.
2460 If EXP is a comparison reference, we return the innermost reference.
2462 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2463 set to the starting bit number.
2465 If the innermost field can be completely contained in a mode-sized
2466 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2468 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2469 otherwise it is not changed.
2471 *PUNSIGNEDP is set to the signedness of the field.
2473 *PMASK is set to the mask used. This is either contained in a
2474 BIT_AND_EXPR or derived from the width of the field.
2476 Return 0 if this is not a component reference or is one that we can't
2477 do anything with. */
2480 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2483 int *pbitsize, *pbitpos;
2484 enum machine_mode *pmode;
2485 int *punsignedp, *pvolatilep;
2492 /* All the optimizations using this function assume integer fields.
2493 There are problems with FP fields since the type_for_size call
2494 below can fail for, e.g., XFmode. */
2495 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2500 if (TREE_CODE (exp) == BIT_AND_EXPR)
2502 mask = TREE_OPERAND (exp, 1);
2503 exp = TREE_OPERAND (exp, 0);
2504 STRIP_NOPS (exp); STRIP_NOPS (mask);
2505 if (TREE_CODE (mask) != INTEGER_CST)
2509 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2510 && TREE_CODE (exp) != BIT_FIELD_REF)
2513 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2514 punsignedp, pvolatilep);
2515 if (inner == exp || *pbitsize < 0 || offset != 0)
2520 tree unsigned_type = type_for_size (*pbitsize, 1);
2521 int precision = TYPE_PRECISION (unsigned_type);
2523 mask = build_int_2 (~0, ~0);
2524 TREE_TYPE (mask) = unsigned_type;
2525 force_fit_type (mask, 0);
2526 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2527 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2534 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2538 all_ones_mask_p (mask, size)
2542 tree type = TREE_TYPE (mask);
2543 int precision = TYPE_PRECISION (type);
2546 tmask = build_int_2 (~0, ~0);
2547 TREE_TYPE (tmask) = signed_type (type);
2548 force_fit_type (tmask, 0);
2550 operand_equal_p (mask,
2551 const_binop (RSHIFT_EXPR,
2552 const_binop (LSHIFT_EXPR, tmask,
2553 size_int (precision - size), 0),
2554 size_int (precision - size), 0),
2558 /* Subroutine for fold_truthop: determine if an operand is simple enough
2559 to be evaluated unconditionally. */
2562 simple_operand_p (exp)
2565 /* Strip any conversions that don't change the machine mode. */
2566 while ((TREE_CODE (exp) == NOP_EXPR
2567 || TREE_CODE (exp) == CONVERT_EXPR)
2568 && (TYPE_MODE (TREE_TYPE (exp))
2569 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2570 exp = TREE_OPERAND (exp, 0);
2572 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2573 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2574 && ! TREE_ADDRESSABLE (exp)
2575 && ! TREE_THIS_VOLATILE (exp)
2576 && ! DECL_NONLOCAL (exp)
2577 /* Don't regard global variables as simple. They may be
2578 allocated in ways unknown to the compiler (shared memory,
2579 #pragma weak, etc). */
2580 && ! TREE_PUBLIC (exp)
2581 && ! DECL_EXTERNAL (exp)
2582 /* Loading a static variable is unduly expensive, but global
2583 registers aren't expensive. */
2584 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2587 /* Subroutine for fold_truthop: try to optimize a range test.
2589 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2591 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2592 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2593 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2596 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2597 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2598 larger than HI_CST (they may be equal).
2600 We return the simplified tree or 0 if no optimization is possible. */
2603 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2604 enum tree_code jcode, lo_code, hi_code;
2605 tree type, var, lo_cst, hi_cst;
2608 enum tree_code rcode;
2610 /* See if this is a range test and normalize the constant terms. */
2612 if (jcode == TRUTH_AND_EXPR)
2617 /* See if we have VAR != CST && VAR != CST+1. */
2618 if (! (hi_code == NE_EXPR
2619 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2620 && tree_int_cst_equal (integer_one_node,
2621 const_binop (MINUS_EXPR,
2622 hi_cst, lo_cst, 0))))
2630 if (hi_code == LT_EXPR)
2631 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2632 else if (hi_code != LE_EXPR)
2635 if (lo_code == GT_EXPR)
2636 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2638 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2651 /* See if we have VAR == CST || VAR == CST+1. */
2652 if (! (hi_code == EQ_EXPR
2653 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2654 && tree_int_cst_equal (integer_one_node,
2655 const_binop (MINUS_EXPR,
2656 hi_cst, lo_cst, 0))))
2664 if (hi_code == GE_EXPR)
2665 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2666 else if (hi_code != GT_EXPR)
2669 if (lo_code == LE_EXPR)
2670 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2672 /* We now have VAR < LO_CST || VAR > HI_CST. */
2681 /* When normalizing, it is possible to both increment the smaller constant
2682 and decrement the larger constant. See if they are still ordered. */
2683 if (tree_int_cst_lt (hi_cst, lo_cst))
2686 /* Fail if VAR isn't an integer. */
2687 utype = TREE_TYPE (var);
2688 if (! INTEGRAL_TYPE_P (utype))
2691 /* The range test is invalid if subtracting the two constants results
2692 in overflow. This can happen in traditional mode. */
2693 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2694 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2697 if (! TREE_UNSIGNED (utype))
2699 utype = unsigned_type (utype);
2700 var = convert (utype, var);
2701 lo_cst = convert (utype, lo_cst);
2702 hi_cst = convert (utype, hi_cst);
2705 return fold (convert (type,
2706 build (rcode, utype,
2707 build (MINUS_EXPR, utype, var, lo_cst),
2708 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2711 /* Find ways of folding logical expressions of LHS and RHS:
2712 Try to merge two comparisons to the same innermost item.
2713 Look for range tests like "ch >= '0' && ch <= '9'".
2714 Look for combinations of simple terms on machines with expensive branches
2715 and evaluate the RHS unconditionally.
2717 For example, if we have p->a == 2 && p->b == 4 and we can make an
2718 object large enough to span both A and B, we can do this with a comparison
2719 against the object ANDed with the a mask.
2721 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2722 operations to do this with one comparison.
2724 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2725 function and the one above.
2727 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2728 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2730 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2733 We return the simplified tree or 0 if no optimization is possible. */
2736 fold_truthop (code, truth_type, lhs, rhs)
2737 enum tree_code code;
2738 tree truth_type, lhs, rhs;
2740 /* If this is the "or" of two comparisons, we can do something if we
2741 the comparisons are NE_EXPR. If this is the "and", we can do something
2742 if the comparisons are EQ_EXPR. I.e.,
2743 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2745 WANTED_CODE is this operation code. For single bit fields, we can
2746 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2747 comparison for one-bit fields. */
2749 enum tree_code wanted_code;
2750 enum tree_code lcode, rcode;
2751 tree ll_arg, lr_arg, rl_arg, rr_arg;
2752 tree ll_inner, lr_inner, rl_inner, rr_inner;
2753 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2754 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2755 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2756 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2757 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2758 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2759 enum machine_mode lnmode, rnmode;
2760 tree ll_mask, lr_mask, rl_mask, rr_mask;
2761 tree l_const, r_const;
2763 int first_bit, end_bit;
2766 /* Start by getting the comparison codes and seeing if this looks like
2767 a range test. Fail if anything is volatile. If one operand is a
2768 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2771 if (TREE_SIDE_EFFECTS (lhs)
2772 || TREE_SIDE_EFFECTS (rhs))
2775 lcode = TREE_CODE (lhs);
2776 rcode = TREE_CODE (rhs);
2778 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2779 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2781 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2782 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2784 if (TREE_CODE_CLASS (lcode) != '<'
2785 || TREE_CODE_CLASS (rcode) != '<')
2788 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2789 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2791 ll_arg = TREE_OPERAND (lhs, 0);
2792 lr_arg = TREE_OPERAND (lhs, 1);
2793 rl_arg = TREE_OPERAND (rhs, 0);
2794 rr_arg = TREE_OPERAND (rhs, 1);
2796 if (TREE_CODE (lr_arg) == INTEGER_CST
2797 && TREE_CODE (rr_arg) == INTEGER_CST
2798 && operand_equal_p (ll_arg, rl_arg, 0))
2800 if (tree_int_cst_lt (lr_arg, rr_arg))
2801 result = range_test (code, truth_type, lcode, rcode,
2802 ll_arg, lr_arg, rr_arg);
2804 result = range_test (code, truth_type, rcode, lcode,
2805 ll_arg, rr_arg, lr_arg);
2807 /* If this isn't a range test, it also isn't a comparison that
2808 can be merged. However, it wins to evaluate the RHS unconditionally
2809 on machines with expensive branches. */
2811 if (result == 0 && BRANCH_COST >= 2)
2813 if (TREE_CODE (ll_arg) != VAR_DECL
2814 && TREE_CODE (ll_arg) != PARM_DECL)
2816 /* Avoid evaluating the variable part twice. */
2817 ll_arg = save_expr (ll_arg);
2818 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2819 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2821 return build (code, truth_type, lhs, rhs);
2826 /* If the RHS can be evaluated unconditionally and its operands are
2827 simple, it wins to evaluate the RHS unconditionally on machines
2828 with expensive branches. In this case, this isn't a comparison
2829 that can be merged. */
2831 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2832 are with zero (tmw). */
2834 if (BRANCH_COST >= 2
2835 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2836 && simple_operand_p (rl_arg)
2837 && simple_operand_p (rr_arg))
2838 return build (code, truth_type, lhs, rhs);
2840 /* See if the comparisons can be merged. Then get all the parameters for
2843 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2844 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2848 ll_inner = decode_field_reference (ll_arg,
2849 &ll_bitsize, &ll_bitpos, &ll_mode,
2850 &ll_unsignedp, &volatilep, &ll_mask);
2851 lr_inner = decode_field_reference (lr_arg,
2852 &lr_bitsize, &lr_bitpos, &lr_mode,
2853 &lr_unsignedp, &volatilep, &lr_mask);
2854 rl_inner = decode_field_reference (rl_arg,
2855 &rl_bitsize, &rl_bitpos, &rl_mode,
2856 &rl_unsignedp, &volatilep, &rl_mask);
2857 rr_inner = decode_field_reference (rr_arg,
2858 &rr_bitsize, &rr_bitpos, &rr_mode,
2859 &rr_unsignedp, &volatilep, &rr_mask);
2861 /* It must be true that the inner operation on the lhs of each
2862 comparison must be the same if we are to be able to do anything.
2863 Then see if we have constants. If not, the same must be true for
2865 if (volatilep || ll_inner == 0 || rl_inner == 0
2866 || ! operand_equal_p (ll_inner, rl_inner, 0))
2869 if (TREE_CODE (lr_arg) == INTEGER_CST
2870 && TREE_CODE (rr_arg) == INTEGER_CST)
2871 l_const = lr_arg, r_const = rr_arg;
2872 else if (lr_inner == 0 || rr_inner == 0
2873 || ! operand_equal_p (lr_inner, rr_inner, 0))
2876 l_const = r_const = 0;
2878 /* If either comparison code is not correct for our logical operation,
2879 fail. However, we can convert a one-bit comparison against zero into
2880 the opposite comparison against that bit being set in the field. */
2882 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2883 if (lcode != wanted_code)
2885 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2891 if (rcode != wanted_code)
2893 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2899 /* See if we can find a mode that contains both fields being compared on
2900 the left. If we can't, fail. Otherwise, update all constants and masks
2901 to be relative to a field of that size. */
2902 first_bit = MIN (ll_bitpos, rl_bitpos);
2903 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2904 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2905 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2907 if (lnmode == VOIDmode)
2910 lnbitsize = GET_MODE_BITSIZE (lnmode);
2911 lnbitpos = first_bit & ~ (lnbitsize - 1);
2912 type = type_for_size (lnbitsize, 1);
2913 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2915 #if BYTES_BIG_ENDIAN
2916 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2917 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2920 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2921 size_int (xll_bitpos), 0);
2922 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2923 size_int (xrl_bitpos), 0);
2925 /* Make sure the constants are interpreted as unsigned, so we
2926 don't have sign bits outside the range of their type. */
2930 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2931 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2932 size_int (xll_bitpos), 0);
2936 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2937 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2938 size_int (xrl_bitpos), 0);
2941 /* If the right sides are not constant, do the same for it. Also,
2942 disallow this optimization if a size or signedness mismatch occurs
2943 between the left and right sides. */
2946 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2947 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2948 /* Make sure the two fields on the right
2949 correspond to the left without being swapped. */
2950 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2953 first_bit = MIN (lr_bitpos, rr_bitpos);
2954 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2955 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2956 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2958 if (rnmode == VOIDmode)
2961 rnbitsize = GET_MODE_BITSIZE (rnmode);
2962 rnbitpos = first_bit & ~ (rnbitsize - 1);
2963 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2965 #if BYTES_BIG_ENDIAN
2966 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2967 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2970 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2971 size_int (xlr_bitpos), 0);
2972 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2973 size_int (xrr_bitpos), 0);
2975 /* Make a mask that corresponds to both fields being compared.
2976 Do this for both items being compared. If the masks agree,
2977 we can do this by masking both and comparing the masked
2979 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2980 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2981 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2983 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2984 ll_unsignedp || rl_unsignedp);
2985 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2986 lr_unsignedp || rr_unsignedp);
2987 if (! all_ones_mask_p (ll_mask, lnbitsize))
2989 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2990 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2992 return build (wanted_code, truth_type, lhs, rhs);
2995 /* There is still another way we can do something: If both pairs of
2996 fields being compared are adjacent, we may be able to make a wider
2997 field containing them both. */
2998 if ((ll_bitsize + ll_bitpos == rl_bitpos
2999 && lr_bitsize + lr_bitpos == rr_bitpos)
3000 || (ll_bitpos == rl_bitpos + rl_bitsize
3001 && lr_bitpos == rr_bitpos + rr_bitsize))
3002 return build (wanted_code, truth_type,
3003 make_bit_field_ref (ll_inner, type,
3004 ll_bitsize + rl_bitsize,
3005 MIN (ll_bitpos, rl_bitpos),
3007 make_bit_field_ref (lr_inner, type,
3008 lr_bitsize + rr_bitsize,
3009 MIN (lr_bitpos, rr_bitpos),
3015 /* Handle the case of comparisons with constants. If there is something in
3016 common between the masks, those bits of the constants must be the same.
3017 If not, the condition is always false. Test for this to avoid generating
3018 incorrect code below. */
3019 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
3020 if (! integer_zerop (result)
3021 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
3022 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
3024 if (wanted_code == NE_EXPR)
3026 warning ("`or' of unmatched not-equal tests is always 1");
3027 return convert (truth_type, integer_one_node);
3031 warning ("`and' of mutually exclusive equal-tests is always zero");
3032 return convert (truth_type, integer_zero_node);
3036 /* Construct the expression we will return. First get the component
3037 reference we will make. Unless the mask is all ones the width of
3038 that field, perform the mask operation. Then compare with the
3040 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
3041 ll_unsignedp || rl_unsignedp);
3043 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
3044 if (! all_ones_mask_p (ll_mask, lnbitsize))
3045 result = build (BIT_AND_EXPR, type, result, ll_mask);
3047 return build (wanted_code, truth_type, result,
3048 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
3051 /* Perform constant folding and related simplification of EXPR.
3052 The related simplifications include x*1 => x, x*0 => 0, etc.,
3053 and application of the associative law.
3054 NOP_EXPR conversions may be removed freely (as long as we
3055 are careful not to change the C type of the overall expression)
3056 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3057 but we can constant-fold them if they have constant operands. */
3063 register tree t = expr;
3064 tree t1 = NULL_TREE;
3066 tree type = TREE_TYPE (expr);
3067 register tree arg0, arg1;
3068 register enum tree_code code = TREE_CODE (t);
3072 /* WINS will be nonzero when the switch is done
3073 if all operands are constant. */
3077 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3078 if (code == RTL_EXPR)
3081 /* Return right away if already constant. */
3082 if (TREE_CONSTANT (t))
3084 if (code == CONST_DECL)
3085 return DECL_INITIAL (t);
3089 kind = TREE_CODE_CLASS (code);
3090 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3094 /* Special case for conversion ops that can have fixed point args. */
3095 arg0 = TREE_OPERAND (t, 0);
3097 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3099 STRIP_TYPE_NOPS (arg0);
3101 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3102 subop = TREE_REALPART (arg0);
3106 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3107 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3108 && TREE_CODE (subop) != REAL_CST
3109 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3111 /* Note that TREE_CONSTANT isn't enough:
3112 static var addresses are constant but we can't
3113 do arithmetic on them. */
3116 else if (kind == 'e' || kind == '<'
3117 || kind == '1' || kind == '2' || kind == 'r')
3119 register int len = tree_code_length[(int) code];
3121 for (i = 0; i < len; i++)
3123 tree op = TREE_OPERAND (t, i);
3127 continue; /* Valid for CALL_EXPR, at least. */
3129 if (kind == '<' || code == RSHIFT_EXPR)
3131 /* Signedness matters here. Perhaps we can refine this
3133 STRIP_TYPE_NOPS (op);
3137 /* Strip any conversions that don't change the mode. */
3141 if (TREE_CODE (op) == COMPLEX_CST)
3142 subop = TREE_REALPART (op);
3146 if (TREE_CODE (subop) != INTEGER_CST
3147 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3148 && TREE_CODE (subop) != REAL_CST
3149 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3151 /* Note that TREE_CONSTANT isn't enough:
3152 static var addresses are constant but we can't
3153 do arithmetic on them. */
3163 /* If this is a commutative operation, and ARG0 is a constant, move it
3164 to ARG1 to reduce the number of tests below. */
3165 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3166 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3167 || code == BIT_AND_EXPR)
3168 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3170 tem = arg0; arg0 = arg1; arg1 = tem;
3172 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3173 TREE_OPERAND (t, 1) = tem;
3176 /* Now WINS is set as described above,
3177 ARG0 is the first operand of EXPR,
3178 and ARG1 is the second operand (if it has more than one operand).
3180 First check for cases where an arithmetic operation is applied to a
3181 compound, conditional, or comparison operation. Push the arithmetic
3182 operation inside the compound or conditional to see if any folding
3183 can then be done. Convert comparison to conditional for this purpose.
3184 The also optimizes non-constant cases that used to be done in
3187 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3188 one of the operands is a comparison and the other is either a comparison
3189 or a BIT_AND_EXPR with the constant 1. In that case, the code below
3190 would make the expression more complex. Change it to a
3191 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3192 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3194 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3195 || code == EQ_EXPR || code == NE_EXPR)
3196 && ((TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
3197 && (TREE_CODE_CLASS (TREE_CODE (arg1)) == '<'
3198 || (TREE_CODE (arg1) == BIT_AND_EXPR
3199 && integer_onep (TREE_OPERAND (arg1, 1)))))
3200 || (TREE_CODE_CLASS (TREE_CODE (arg1)) == '<'
3201 && (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
3202 || (TREE_CODE (arg0) == BIT_AND_EXPR
3203 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3205 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3206 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3210 if (code == EQ_EXPR)
3211 t = invert_truthvalue (t);
3216 if (TREE_CODE_CLASS (code) == '1')
3218 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3219 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3220 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3221 else if (TREE_CODE (arg0) == COND_EXPR)
3223 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3224 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3225 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3227 /* If this was a conversion, and all we did was to move into
3228 inside the COND_EXPR, bring it back out. Then return so we
3229 don't get into an infinite recursion loop taking the conversion
3230 out and then back in. */
3232 if ((code == NOP_EXPR || code == CONVERT_EXPR
3233 || code == NON_LVALUE_EXPR)
3234 && TREE_CODE (t) == COND_EXPR
3235 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3236 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3237 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3238 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
3239 t = build1 (code, type,
3241 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3242 TREE_OPERAND (t, 0),
3243 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3244 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3247 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3248 return fold (build (COND_EXPR, type, arg0,
3249 fold (build1 (code, type, integer_one_node)),
3250 fold (build1 (code, type, integer_zero_node))));
3252 else if (TREE_CODE_CLASS (code) == '2'
3253 || TREE_CODE_CLASS (code) == '<')
3255 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3256 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3257 fold (build (code, type,
3258 arg0, TREE_OPERAND (arg1, 1))));
3259 else if (TREE_CODE (arg1) == COND_EXPR
3260 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3262 tree test, true_value, false_value;
3264 if (TREE_CODE (arg1) == COND_EXPR)
3266 test = TREE_OPERAND (arg1, 0);
3267 true_value = TREE_OPERAND (arg1, 1);
3268 false_value = TREE_OPERAND (arg1, 2);
3273 true_value = integer_one_node;
3274 false_value = integer_zero_node;
3277 /* If ARG0 is complex we want to make sure we only evaluate
3278 it once. Though this is only required if it is volatile, it
3279 might be more efficient even if it is not. However, if we
3280 succeed in folding one part to a constant, we do not need
3281 to make this SAVE_EXPR. Since we do this optimization
3282 primarily to see if we do end up with constant and this
3283 SAVE_EXPR interfers with later optimizations, suppressing
3284 it when we can is important. */
3286 if ((TREE_CODE (arg0) != VAR_DECL && TREE_CODE (arg0) != PARM_DECL)
3287 || TREE_SIDE_EFFECTS (arg0))
3289 tree lhs = fold (build (code, type, arg0, true_value));
3290 tree rhs = fold (build (code, type, arg0, false_value));
3292 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3293 return fold (build (COND_EXPR, type, test, lhs, rhs));
3295 arg0 = save_expr (arg0);
3298 test = fold (build (COND_EXPR, type, test,
3299 fold (build (code, type, arg0, true_value)),
3300 fold (build (code, type, arg0, false_value))));
3301 if (TREE_CODE (arg0) == SAVE_EXPR)
3302 return build (COMPOUND_EXPR, type,
3303 convert (void_type_node, arg0), test);
3305 return convert (type, test);
3308 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3309 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3310 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3311 else if (TREE_CODE (arg0) == COND_EXPR
3312 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3314 tree test, true_value, false_value;
3316 if (TREE_CODE (arg0) == COND_EXPR)
3318 test = TREE_OPERAND (arg0, 0);
3319 true_value = TREE_OPERAND (arg0, 1);
3320 false_value = TREE_OPERAND (arg0, 2);
3325 true_value = integer_one_node;
3326 false_value = integer_zero_node;
3329 if ((TREE_CODE (arg1) != VAR_DECL && TREE_CODE (arg1) != PARM_DECL)
3330 || TREE_SIDE_EFFECTS (arg1))
3332 tree lhs = fold (build (code, type, true_value, arg1));
3333 tree rhs = fold (build (code, type, false_value, arg1));
3335 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3336 return fold (build (COND_EXPR, type, test, lhs, rhs));
3338 arg1 = save_expr (arg1);
3341 test = fold (build (COND_EXPR, type, test,
3342 fold (build (code, type, true_value, arg1)),
3343 fold (build (code, type, false_value, arg1))));
3344 if (TREE_CODE (arg1) == SAVE_EXPR)
3345 return build (COMPOUND_EXPR, type,
3346 convert (void_type_node, arg1), test);
3348 return convert (type, test);
3351 else if (TREE_CODE_CLASS (code) == '<'
3352 && TREE_CODE (arg0) == COMPOUND_EXPR)
3353 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3354 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3355 else if (TREE_CODE_CLASS (code) == '<'
3356 && TREE_CODE (arg1) == COMPOUND_EXPR)
3357 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3358 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3370 return fold (DECL_INITIAL (t));
3375 case FIX_TRUNC_EXPR:
3376 /* Other kinds of FIX are not handled properly by fold_convert. */
3378 /* In addition to the cases of two conversions in a row
3379 handled below, if we are converting something to its own
3380 type via an object of identical or wider precision, neither
3381 conversion is needed. */
3382 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3383 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3384 && TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == TREE_TYPE (t)
3385 && ((INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3386 && INTEGRAL_TYPE_P (TREE_TYPE (t)))
3387 || (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3388 && FLOAT_TYPE_P (TREE_TYPE (t))))
3389 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3390 >= TYPE_PRECISION (TREE_TYPE (t))))
3391 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3393 /* Two conversions in a row are not needed unless:
3394 - the intermediate type is narrower than both initial and final, or
3395 - the intermediate type and innermost type differ in signedness,
3396 and the outermost type is wider than the intermediate, or
3397 - the initial type is a pointer type and the precisions of the
3398 intermediate and final types differ, or
3399 - the final type is a pointer type and the precisions of the
3400 initial and intermediate types differ. */
3401 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3402 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3403 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3404 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3406 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3407 > TYPE_PRECISION (TREE_TYPE (t)))
3408 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3410 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3412 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3413 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3414 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3415 < TYPE_PRECISION (TREE_TYPE (t))))
3416 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3417 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3418 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3420 (TREE_UNSIGNED (TREE_TYPE (t))
3421 && (TYPE_PRECISION (TREE_TYPE (t))
3422 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3423 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3425 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3426 != TYPE_PRECISION (TREE_TYPE (t))))
3427 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3428 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3429 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3430 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3432 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3433 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3434 /* Detect assigning a bitfield. */
3435 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3436 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3438 /* Don't leave an assignment inside a conversion
3439 unless assigning a bitfield. */
3440 tree prev = TREE_OPERAND (t, 0);
3441 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3442 /* First do the assignment, then return converted constant. */
3443 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3449 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3452 return fold_convert (t, arg0);
3454 #if 0 /* This loses on &"foo"[0]. */
3459 /* Fold an expression like: "foo"[2] */
3460 if (TREE_CODE (arg0) == STRING_CST
3461 && TREE_CODE (arg1) == INTEGER_CST
3462 && !TREE_INT_CST_HIGH (arg1)
3463 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3465 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3466 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3467 force_fit_type (t, 0);
3474 TREE_CONSTANT (t) = wins;
3480 if (TREE_CODE (arg0) == INTEGER_CST)
3482 HOST_WIDE_INT low, high;
3483 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3484 TREE_INT_CST_HIGH (arg0),
3486 t = build_int_2 (low, high);
3487 TREE_TYPE (t) = type;
3489 = (TREE_OVERFLOW (arg0)
3490 | force_fit_type (t, overflow));
3491 TREE_CONSTANT_OVERFLOW (t)
3492 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3494 else if (TREE_CODE (arg0) == REAL_CST)
3495 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3496 TREE_TYPE (t) = type;
3498 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3499 return TREE_OPERAND (arg0, 0);
3501 /* Convert - (a - b) to (b - a) for non-floating-point. */
3502 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3503 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3504 TREE_OPERAND (arg0, 0));
3511 if (TREE_CODE (arg0) == INTEGER_CST)
3513 if (! TREE_UNSIGNED (type)
3514 && TREE_INT_CST_HIGH (arg0) < 0)
3516 HOST_WIDE_INT low, high;
3517 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3518 TREE_INT_CST_HIGH (arg0),
3520 t = build_int_2 (low, high);
3521 TREE_TYPE (t) = type;
3523 = (TREE_OVERFLOW (arg0)
3524 | force_fit_type (t, overflow));
3525 TREE_CONSTANT_OVERFLOW (t)
3526 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3529 else if (TREE_CODE (arg0) == REAL_CST)
3531 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3532 t = build_real (type,
3533 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3535 TREE_TYPE (t) = type;
3537 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3538 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3542 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3544 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3545 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3546 TREE_OPERAND (arg0, 0),
3547 fold (build1 (NEGATE_EXPR,
3548 TREE_TYPE (TREE_TYPE (arg0)),
3549 TREE_OPERAND (arg0, 1))));
3550 else if (TREE_CODE (arg0) == COMPLEX_CST)
3551 return build_complex (TREE_OPERAND (arg0, 0),
3552 fold (build1 (NEGATE_EXPR,
3553 TREE_TYPE (TREE_TYPE (arg0)),
3554 TREE_OPERAND (arg0, 1))));
3555 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3556 return fold (build (TREE_CODE (arg0), type,
3557 fold (build1 (CONJ_EXPR, type,
3558 TREE_OPERAND (arg0, 0))),
3559 fold (build1 (CONJ_EXPR,
3560 type, TREE_OPERAND (arg0, 1)))));
3561 else if (TREE_CODE (arg0) == CONJ_EXPR)
3562 return TREE_OPERAND (arg0, 0);
3568 if (TREE_CODE (arg0) == INTEGER_CST)
3569 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3570 ~ TREE_INT_CST_HIGH (arg0));
3571 TREE_TYPE (t) = type;
3572 force_fit_type (t, 0);
3573 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3574 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3576 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3577 return TREE_OPERAND (arg0, 0);
3581 /* A + (-B) -> A - B */
3582 if (TREE_CODE (arg1) == NEGATE_EXPR)
3583 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3584 else if (! FLOAT_TYPE_P (type))
3586 if (integer_zerop (arg1))
3587 return non_lvalue (convert (type, arg0));
3589 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3590 with a constant, and the two constants have no bits in common,
3591 we should treat this as a BIT_IOR_EXPR since this may produce more
3593 if (TREE_CODE (arg0) == BIT_AND_EXPR
3594 && TREE_CODE (arg1) == BIT_AND_EXPR
3595 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3596 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3597 && integer_zerop (const_binop (BIT_AND_EXPR,
3598 TREE_OPERAND (arg0, 1),
3599 TREE_OPERAND (arg1, 1), 0)))
3601 code = BIT_IOR_EXPR;
3605 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3606 about the case where C is a constant, just try one of the
3607 four possibilities. */
3609 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3610 && operand_equal_p (TREE_OPERAND (arg0, 1),
3611 TREE_OPERAND (arg1, 1), 0))
3612 return fold (build (MULT_EXPR, type,
3613 fold (build (PLUS_EXPR, type,
3614 TREE_OPERAND (arg0, 0),
3615 TREE_OPERAND (arg1, 0))),
3616 TREE_OPERAND (arg0, 1)));
3618 /* In IEEE floating point, x+0 may not equal x. */
3619 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3620 && real_zerop (arg1))
3621 return non_lvalue (convert (type, arg0));
3623 /* In most languages, can't associate operations on floats
3624 through parentheses. Rather than remember where the parentheses
3625 were, we don't associate floats at all. It shouldn't matter much. */
3626 if (FLOAT_TYPE_P (type))
3628 /* The varsign == -1 cases happen only for addition and subtraction.
3629 It says that the arg that was split was really CON minus VAR.
3630 The rest of the code applies to all associative operations. */
3636 if (split_tree (arg0, code, &var, &con, &varsign))
3640 /* EXPR is (CON-VAR) +- ARG1. */
3641 /* If it is + and VAR==ARG1, return just CONST. */
3642 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3643 return convert (TREE_TYPE (t), con);
3645 /* If ARG0 is a constant, don't change things around;
3646 instead keep all the constant computations together. */
3648 if (TREE_CONSTANT (arg0))
3651 /* Otherwise return (CON +- ARG1) - VAR. */
3652 TREE_SET_CODE (t, MINUS_EXPR);
3653 TREE_OPERAND (t, 1) = var;
3655 = fold (build (code, TREE_TYPE (t), con, arg1));
3659 /* EXPR is (VAR+CON) +- ARG1. */
3660 /* If it is - and VAR==ARG1, return just CONST. */
3661 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3662 return convert (TREE_TYPE (t), con);
3664 /* If ARG0 is a constant, don't change things around;
3665 instead keep all the constant computations together. */
3667 if (TREE_CONSTANT (arg0))
3670 /* Otherwise return VAR +- (ARG1 +- CON). */
3671 TREE_OPERAND (t, 1) = tem
3672 = fold (build (code, TREE_TYPE (t), arg1, con));
3673 TREE_OPERAND (t, 0) = var;
3674 if (integer_zerop (tem)
3675 && (code == PLUS_EXPR || code == MINUS_EXPR))
3676 return convert (type, var);
3677 /* If we have x +/- (c - d) [c an explicit integer]
3678 change it to x -/+ (d - c) since if d is relocatable
3679 then the latter can be a single immediate insn
3680 and the former cannot. */
3681 if (TREE_CODE (tem) == MINUS_EXPR
3682 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3684 tree tem1 = TREE_OPERAND (tem, 1);
3685 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3686 TREE_OPERAND (tem, 0) = tem1;
3688 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3694 if (split_tree (arg1, code, &var, &con, &varsign))
3696 if (TREE_CONSTANT (arg1))
3701 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3703 /* EXPR is ARG0 +- (CON +- VAR). */
3704 if (TREE_CODE (t) == MINUS_EXPR
3705 && operand_equal_p (var, arg0, 0))
3707 /* If VAR and ARG0 cancel, return just CON or -CON. */
3708 if (code == PLUS_EXPR)
3709 return convert (TREE_TYPE (t), con);
3710 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3711 convert (TREE_TYPE (t), con)));
3715 = fold (build (code, TREE_TYPE (t), arg0, con));
3716 TREE_OPERAND (t, 1) = var;
3717 if (integer_zerop (TREE_OPERAND (t, 0))
3718 && TREE_CODE (t) == PLUS_EXPR)
3719 return convert (TREE_TYPE (t), var);
3724 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3725 if (TREE_CODE (arg1) == REAL_CST)
3727 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3729 t1 = const_binop (code, arg0, arg1, 0);
3730 if (t1 != NULL_TREE)
3732 /* The return value should always have
3733 the same type as the original expression. */
3734 TREE_TYPE (t1) = TREE_TYPE (t);
3740 if (! FLOAT_TYPE_P (type))
3742 if (! wins && integer_zerop (arg0))
3743 return build1 (NEGATE_EXPR, type, arg1);
3744 if (integer_zerop (arg1))
3745 return non_lvalue (convert (type, arg0));
3747 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3748 about the case where C is a constant, just try one of the
3749 four possibilities. */
3751 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3752 && operand_equal_p (TREE_OPERAND (arg0, 1),
3753 TREE_OPERAND (arg1, 1), 0))
3754 return fold (build (MULT_EXPR, type,
3755 fold (build (MINUS_EXPR, type,
3756 TREE_OPERAND (arg0, 0),
3757 TREE_OPERAND (arg1, 0))),
3758 TREE_OPERAND (arg0, 1)));
3760 /* Convert A - (-B) to A + B. */
3761 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3762 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3763 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
3765 /* Except with IEEE floating point, 0-x equals -x. */
3766 if (! wins && real_zerop (arg0))
3767 return build1 (NEGATE_EXPR, type, arg1);
3768 /* Except with IEEE floating point, x-0 equals x. */
3769 if (real_zerop (arg1))
3770 return non_lvalue (convert (type, arg0));
3772 /* Fold &x - &x. This can happen from &x.foo - &x.
3773 This is unsafe for certain floats even in non-IEEE formats.
3774 In IEEE, it is unsafe because it does wrong for NaNs.
3775 Also note that operand_equal_p is always false if an operand
3778 if (operand_equal_p (arg0, arg1, FLOAT_TYPE_P (type)))
3779 return convert (type, integer_zero_node);
3784 if (! FLOAT_TYPE_P (type))
3786 if (integer_zerop (arg1))
3787 return omit_one_operand (type, arg1, arg0);
3788 if (integer_onep (arg1))
3789 return non_lvalue (convert (type, arg0));
3791 /* (a * (1 << b)) is (a << b) */
3792 if (TREE_CODE (arg1) == LSHIFT_EXPR
3793 && integer_onep (TREE_OPERAND (arg1, 0)))
3794 return fold (build (LSHIFT_EXPR, type, arg0,
3795 TREE_OPERAND (arg1, 1)));
3796 if (TREE_CODE (arg0) == LSHIFT_EXPR
3797 && integer_onep (TREE_OPERAND (arg0, 0)))
3798 return fold (build (LSHIFT_EXPR, type, arg1,
3799 TREE_OPERAND (arg0, 1)));
3803 /* x*0 is 0, except for IEEE floating point. */
3804 if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3805 && real_zerop (arg1))
3806 return omit_one_operand (type, arg1, arg0);
3807 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3808 However, ANSI says we can drop signals,
3809 so we can do this anyway. */
3810 if (real_onep (arg1))
3811 return non_lvalue (convert (type, arg0));
3813 if (! wins && real_twop (arg1))
3815 tree arg = save_expr (arg0);
3816 return build (PLUS_EXPR, type, arg, arg);
3823 if (integer_all_onesp (arg1))
3824 return omit_one_operand (type, arg1, arg0);
3825 if (integer_zerop (arg1))
3826 return non_lvalue (convert (type, arg0));
3827 t1 = distribute_bit_expr (code, type, arg0, arg1);
3828 if (t1 != NULL_TREE)
3831 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3832 is a rotate of A by C1 bits. */
3834 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3835 || TREE_CODE (arg0) == LSHIFT_EXPR)
3836 && (TREE_CODE (arg1) == RSHIFT_EXPR
3837 || TREE_CODE (arg1) == LSHIFT_EXPR)
3838 && TREE_CODE (arg0) != TREE_CODE (arg1)
3839 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3840 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3841 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3842 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3843 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3844 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3845 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3846 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3847 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3848 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3849 TREE_CODE (arg0) == LSHIFT_EXPR
3850 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3855 if (integer_zerop (arg1))
3856 return non_lvalue (convert (type, arg0));
3857 if (integer_all_onesp (arg1))
3858 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3863 if (integer_all_onesp (arg1))
3864 return non_lvalue (convert (type, arg0));
3865 if (integer_zerop (arg1))
3866 return omit_one_operand (type, arg1, arg0);
3867 t1 = distribute_bit_expr (code, type, arg0, arg1);
3868 if (t1 != NULL_TREE)
3870 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3871 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3872 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3874 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3875 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3876 && (~TREE_INT_CST_LOW (arg0)
3877 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3878 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3880 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3881 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3883 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3884 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3885 && (~TREE_INT_CST_LOW (arg1)
3886 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3887 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3891 case BIT_ANDTC_EXPR:
3892 if (integer_all_onesp (arg0))
3893 return non_lvalue (convert (type, arg1));
3894 if (integer_zerop (arg0))
3895 return omit_one_operand (type, arg0, arg1);
3896 if (TREE_CODE (arg1) == INTEGER_CST)
3898 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3899 code = BIT_AND_EXPR;
3904 case TRUNC_DIV_EXPR:
3905 case ROUND_DIV_EXPR:
3906 case FLOOR_DIV_EXPR:
3908 case EXACT_DIV_EXPR:
3910 if (integer_onep (arg1))
3911 return non_lvalue (convert (type, arg0));
3912 if (integer_zerop (arg1))
3915 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
3916 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
3917 expressions, which often appear in the offsets or sizes of
3918 objects with a varying size. Only deal with positive divisors
3921 Look for NOPs and SAVE_EXPRs inside. */
3923 if (TREE_CODE (arg1) == INTEGER_CST
3924 && tree_int_cst_lt (integer_zero_node, arg1))
3926 int have_save_expr = 0;
3927 tree c2 = integer_zero_node;
3930 if (TREE_CODE (xarg0) == SAVE_EXPR)
3931 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3935 if (TREE_CODE (xarg0) == PLUS_EXPR
3936 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3937 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
3938 else if (TREE_CODE (xarg0) == MINUS_EXPR
3939 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3941 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
3942 xarg0 = TREE_OPERAND (xarg0, 0);
3945 if (TREE_CODE (xarg0) == SAVE_EXPR)
3946 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3950 if (TREE_CODE (xarg0) == MULT_EXPR
3951 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3952 && tree_int_cst_lt (integer_zero_node, TREE_OPERAND (xarg0, 1))
3953 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
3954 TREE_OPERAND (xarg0, 1), arg1, 1))
3955 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
3956 TREE_OPERAND (xarg0, 1), 1))))
3958 tree outer_div = integer_one_node;
3959 tree c1 = TREE_OPERAND (xarg0, 1);
3962 /* If C3 > C1, set them equal and do a divide by
3963 C3/C1 at the end of the operation. */
3964 if (tree_int_cst_lt (c1, c3))
3965 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
3967 /* The result is A * (C1/C3) + (C2/C3). */
3968 t = fold (build (PLUS_EXPR, type,
3969 fold (build (MULT_EXPR, type,
3970 TREE_OPERAND (xarg0, 0),
3971 const_binop (code, c1, c3, 1))),
3972 const_binop (code, c2, c3, 1)));
3974 if (! integer_onep (outer_div))
3975 t = fold (build (code, type, t, outer_div));
3984 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3985 #ifndef REAL_INFINITY
3986 if (TREE_CODE (arg1) == REAL_CST
3987 && real_zerop (arg1))
3990 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3995 case FLOOR_MOD_EXPR:
3996 case ROUND_MOD_EXPR:
3997 case TRUNC_MOD_EXPR:
3998 if (integer_onep (arg1))
3999 return omit_one_operand (type, integer_zero_node, arg0);
4000 if (integer_zerop (arg1))
4003 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4004 where C1 % C3 == 0. Handle similarly to the division case,
4005 but don't bother with SAVE_EXPRs. */
4007 if (TREE_CODE (arg1) == INTEGER_CST
4008 && ! integer_zerop (arg1))
4010 tree c2 = integer_zero_node;
4013 if (TREE_CODE (xarg0) == PLUS_EXPR
4014 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4015 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4016 else if (TREE_CODE (xarg0) == MINUS_EXPR
4017 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4019 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4020 xarg0 = TREE_OPERAND (xarg0, 0);
4025 if (TREE_CODE (xarg0) == MULT_EXPR
4026 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4027 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4028 TREE_OPERAND (xarg0, 1),
4030 /* The result is (C2%C3). */
4031 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4032 TREE_OPERAND (xarg0, 0));
4041 if (integer_zerop (arg1))
4042 return non_lvalue (convert (type, arg0));
4043 /* Since negative shift count is not well-defined,
4044 don't try to compute it in the compiler. */
4045 if (tree_int_cst_lt (arg1, integer_zero_node))
4050 if (operand_equal_p (arg0, arg1, 0))
4052 if (INTEGRAL_TYPE_P (type)
4053 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4054 return omit_one_operand (type, arg1, arg0);
4058 if (operand_equal_p (arg0, arg1, 0))
4060 if (INTEGRAL_TYPE_P (type)
4061 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4062 return omit_one_operand (type, arg1, arg0);
4065 case TRUTH_NOT_EXPR:
4066 /* Note that the operand of this must be an int
4067 and its values must be 0 or 1.
4068 ("true" is a fixed value perhaps depending on the language,
4069 but we don't handle values other than 1 correctly yet.) */
4070 return invert_truthvalue (arg0);
4072 case TRUTH_ANDIF_EXPR:
4073 /* Note that the operands of this must be ints
4074 and their values must be 0 or 1.
4075 ("true" is a fixed value perhaps depending on the language.) */
4076 /* If first arg is constant zero, return it. */
4077 if (integer_zerop (arg0))
4079 case TRUTH_AND_EXPR:
4080 /* If either arg is constant true, drop it. */
4081 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4082 return non_lvalue (arg1);
4083 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4084 return non_lvalue (arg0);
4085 /* If second arg is constant zero, result is zero, but first arg
4086 must be evaluated. */
4087 if (integer_zerop (arg1))
4088 return omit_one_operand (type, arg1, arg0);
4091 /* Check for the possibility of merging component references. If our
4092 lhs is another similar operation, try to merge its rhs with our
4093 rhs. Then try to merge our lhs and rhs. */
4096 if (TREE_CODE (arg0) == code)
4098 tem = fold_truthop (code, type,
4099 TREE_OPERAND (arg0, 1), arg1);
4101 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4104 tem = fold_truthop (code, type, arg0, arg1);
4110 case TRUTH_ORIF_EXPR:
4111 /* Note that the operands of this must be ints
4112 and their values must be 0 or true.
4113 ("true" is a fixed value perhaps depending on the language.) */
4114 /* If first arg is constant true, return it. */
4115 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4118 /* If either arg is constant zero, drop it. */
4119 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4120 return non_lvalue (arg1);
4121 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4122 return non_lvalue (arg0);
4123 /* If second arg is constant true, result is true, but we must
4124 evaluate first arg. */
4125 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4126 return omit_one_operand (type, arg1, arg0);
4129 case TRUTH_XOR_EXPR:
4130 /* If either arg is constant zero, drop it. */
4131 if (integer_zerop (arg0))
4132 return non_lvalue (arg1);
4133 if (integer_zerop (arg1))
4134 return non_lvalue (arg0);
4135 /* If either arg is constant true, this is a logical inversion. */
4136 if (integer_onep (arg0))
4137 return non_lvalue (invert_truthvalue (arg1));
4138 if (integer_onep (arg1))
4139 return non_lvalue (invert_truthvalue (arg0));
4148 /* If one arg is a constant integer, put it last. */
4149 if (TREE_CODE (arg0) == INTEGER_CST
4150 && TREE_CODE (arg1) != INTEGER_CST)
4152 TREE_OPERAND (t, 0) = arg1;
4153 TREE_OPERAND (t, 1) = arg0;
4154 arg0 = TREE_OPERAND (t, 0);
4155 arg1 = TREE_OPERAND (t, 1);
4156 code = swap_tree_comparison (code);
4157 TREE_SET_CODE (t, code);
4160 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4161 First, see if one arg is constant; find the constant arg
4162 and the other one. */
4164 tree constop = 0, varop;
4167 if (TREE_CONSTANT (arg1))
4168 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
4169 if (TREE_CONSTANT (arg0))
4170 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
4172 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4174 /* This optimization is invalid for ordered comparisons
4175 if CONST+INCR overflows or if foo+incr might overflow.
4176 This optimization is invalid for floating point due to rounding.
4177 For pointer types we assume overflow doesn't happen. */
4178 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4179 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4180 && (code == EQ_EXPR || code == NE_EXPR)))
4183 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4184 constop, TREE_OPERAND (varop, 1)));
4185 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4186 *constoploc = newconst;
4190 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4192 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4193 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4194 && (code == EQ_EXPR || code == NE_EXPR)))
4197 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4198 constop, TREE_OPERAND (varop, 1)));
4199 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4200 *constoploc = newconst;
4206 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4207 if (TREE_CODE (arg1) == INTEGER_CST
4208 && TREE_CODE (arg0) != INTEGER_CST
4209 && ! tree_int_cst_lt (arg1, integer_one_node))
4211 switch (TREE_CODE (t))
4215 TREE_SET_CODE (t, code);
4216 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4217 TREE_OPERAND (t, 1) = arg1;
4222 TREE_SET_CODE (t, code);
4223 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4224 TREE_OPERAND (t, 1) = arg1;
4228 /* If this is an EQ or NE comparison with zero and ARG0 is
4229 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4230 two operations, but the latter can be done in one less insn
4231 one machine that have only two-operand insns or on which a
4232 constant cannot be the first operand. */
4233 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4234 && TREE_CODE (arg0) == BIT_AND_EXPR)
4236 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4237 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4239 fold (build (code, type,
4240 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4242 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4243 TREE_OPERAND (arg0, 1),
4244 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4245 convert (TREE_TYPE (arg0),
4248 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4249 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4251 fold (build (code, type,
4252 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4254 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4255 TREE_OPERAND (arg0, 0),
4256 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4257 convert (TREE_TYPE (arg0),
4262 /* If this is an NE or EQ comparison of zero against the result of a
4263 signed MOD operation whose second operand is a power of 2, make
4264 the MOD operation unsigned since it is simpler and equivalent. */
4265 if ((code == NE_EXPR || code == EQ_EXPR)
4266 && integer_zerop (arg1)
4267 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4268 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4269 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4270 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4271 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4272 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4274 tree newtype = unsigned_type (TREE_TYPE (arg0));
4275 tree newmod = build (TREE_CODE (arg0), newtype,
4276 convert (newtype, TREE_OPERAND (arg0, 0)),
4277 convert (newtype, TREE_OPERAND (arg0, 1)));
4279 return build (code, type, newmod, convert (newtype, arg1));
4282 /* If this is an NE comparison of zero with an AND of one, remove the
4283 comparison since the AND will give the correct value. */
4284 if (code == NE_EXPR && integer_zerop (arg1)
4285 && TREE_CODE (arg0) == BIT_AND_EXPR
4286 && integer_onep (TREE_OPERAND (arg0, 1)))
4287 return convert (type, arg0);
4289 /* If we have (A & C) == C where C is a power of 2, convert this into
4290 (A & C) != 0. Similarly for NE_EXPR. */
4291 if ((code == EQ_EXPR || code == NE_EXPR)
4292 && TREE_CODE (arg0) == BIT_AND_EXPR
4293 && integer_pow2p (TREE_OPERAND (arg0, 1))
4294 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4295 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4296 arg0, integer_zero_node);
4298 /* Simplify comparison of something with itself. (For IEEE
4299 floating-point, we can only do some of these simplifications.) */
4300 if (operand_equal_p (arg0, arg1, 0))
4307 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4309 t = build_int_2 (1, 0);
4310 TREE_TYPE (t) = type;
4314 TREE_SET_CODE (t, code);
4318 /* For NE, we can only do this simplification if integer. */
4319 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4321 /* ... fall through ... */
4324 t = build_int_2 (0, 0);
4325 TREE_TYPE (t) = type;
4330 /* An unsigned comparison against 0 can be simplified. */
4331 if (integer_zerop (arg1)
4332 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4333 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4334 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4336 switch (TREE_CODE (t))
4340 TREE_SET_CODE (t, NE_EXPR);
4344 TREE_SET_CODE (t, EQ_EXPR);
4347 return omit_one_operand (type,
4348 convert (type, integer_one_node),
4351 return omit_one_operand (type,
4352 convert (type, integer_zero_node),
4357 /* If we are comparing an expression that just has comparisons
4358 of two integer values, arithmetic expressions of those comparisons,
4359 and constants, we can simplify it. There are only three cases
4360 to check: the two values can either be equal, the first can be
4361 greater, or the second can be greater. Fold the expression for
4362 those three values. Since each value must be 0 or 1, we have
4363 eight possibilities, each of which corresponds to the constant 0
4364 or 1 or one of the six possible comparisons.
4366 This handles common cases like (a > b) == 0 but also handles
4367 expressions like ((x > y) - (y > x)) > 0, which supposedly
4368 occur in macroized code. */
4370 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4372 tree cval1 = 0, cval2 = 0;
4375 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4376 /* Don't handle degenerate cases here; they should already
4377 have been handled anyway. */
4378 && cval1 != 0 && cval2 != 0
4379 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4380 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4381 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4382 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4383 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4385 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4386 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4388 /* We can't just pass T to eval_subst in case cval1 or cval2
4389 was the same as ARG1. */
4392 = fold (build (code, type,
4393 eval_subst (arg0, cval1, maxval, cval2, minval),
4396 = fold (build (code, type,
4397 eval_subst (arg0, cval1, maxval, cval2, maxval),
4400 = fold (build (code, type,
4401 eval_subst (arg0, cval1, minval, cval2, maxval),
4404 /* All three of these results should be 0 or 1. Confirm they
4405 are. Then use those values to select the proper code
4408 if ((integer_zerop (high_result)
4409 || integer_onep (high_result))
4410 && (integer_zerop (equal_result)
4411 || integer_onep (equal_result))
4412 && (integer_zerop (low_result)
4413 || integer_onep (low_result)))
4415 /* Make a 3-bit mask with the high-order bit being the
4416 value for `>', the next for '=', and the low for '<'. */
4417 switch ((integer_onep (high_result) * 4)
4418 + (integer_onep (equal_result) * 2)
4419 + integer_onep (low_result))
4423 return omit_one_operand (type, integer_zero_node, arg0);
4444 return omit_one_operand (type, integer_one_node, arg0);
4447 t = build (code, type, cval1, cval2);
4449 return save_expr (t);
4456 /* If this is a comparison of a field, we may be able to simplify it. */
4457 if ((TREE_CODE (arg0) == COMPONENT_REF
4458 || TREE_CODE (arg0) == BIT_FIELD_REF)
4459 && (code == EQ_EXPR || code == NE_EXPR)
4460 /* Handle the constant case even without -O
4461 to make sure the warnings are given. */
4462 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4464 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4468 /* From here on, the only cases we handle are when the result is
4469 known to be a constant.
4471 To compute GT, swap the arguments and do LT.
4472 To compute GE, do LT and invert the result.
4473 To compute LE, swap the arguments, do LT and invert the result.
4474 To compute NE, do EQ and invert the result.
4476 Therefore, the code below must handle only EQ and LT. */
4478 if (code == LE_EXPR || code == GT_EXPR)
4480 tem = arg0, arg0 = arg1, arg1 = tem;
4481 code = swap_tree_comparison (code);
4484 /* Note that it is safe to invert for real values here because we
4485 will check below in the one case that it matters. */
4488 if (code == NE_EXPR || code == GE_EXPR)
4491 code = invert_tree_comparison (code);
4494 /* Compute a result for LT or EQ if args permit;
4495 otherwise return T. */
4496 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4498 if (code == EQ_EXPR)
4499 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4500 == TREE_INT_CST_LOW (arg1))
4501 && (TREE_INT_CST_HIGH (arg0)
4502 == TREE_INT_CST_HIGH (arg1)),
4505 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4506 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4507 : INT_CST_LT (arg0, arg1)),
4511 /* Assume a nonexplicit constant cannot equal an explicit one,
4512 since such code would be undefined anyway.
4513 Exception: on sysvr4, using #pragma weak,
4514 a label can come out as 0. */
4515 else if (TREE_CODE (arg1) == INTEGER_CST
4516 && !integer_zerop (arg1)
4517 && TREE_CONSTANT (arg0)
4518 && TREE_CODE (arg0) == ADDR_EXPR
4520 t1 = build_int_2 (0, 0);
4522 /* Two real constants can be compared explicitly. */
4523 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4525 /* If either operand is a NaN, the result is false with two
4526 exceptions: First, an NE_EXPR is true on NaNs, but that case
4527 is already handled correctly since we will be inverting the
4528 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4529 or a GE_EXPR into a LT_EXPR, we must return true so that it
4530 will be inverted into false. */
4532 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4533 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4534 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4536 else if (code == EQ_EXPR)
4537 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4538 TREE_REAL_CST (arg1)),
4541 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4542 TREE_REAL_CST (arg1)),
4546 if (t1 == NULL_TREE)
4550 TREE_INT_CST_LOW (t1) ^= 1;
4552 TREE_TYPE (t1) = type;
4556 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4557 so all simple results must be passed through pedantic_non_lvalue. */
4558 if (TREE_CODE (arg0) == INTEGER_CST)
4559 return pedantic_non_lvalue
4560 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4561 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4562 return pedantic_non_lvalue (omit_one_operand (type, arg1, arg0));
4564 /* If the second operand is zero, invert the comparison and swap
4565 the second and third operands. Likewise if the second operand
4566 is constant and the third is not or if the third operand is
4567 equivalent to the first operand of the comparison. */
4569 if (integer_zerop (arg1)
4570 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4571 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4572 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4573 TREE_OPERAND (t, 2),
4574 TREE_OPERAND (arg0, 1))))
4576 /* See if this can be inverted. If it can't, possibly because
4577 it was a floating-point inequality comparison, don't do
4579 tem = invert_truthvalue (arg0);
4581 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4583 arg0 = TREE_OPERAND (t, 0) = tem;
4584 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4585 TREE_OPERAND (t, 2) = arg1;
4586 arg1 = TREE_OPERAND (t, 1);
4590 /* If we have A op B ? A : C, we may be able to convert this to a
4591 simpler expression, depending on the operation and the values
4592 of B and C. IEEE floating point prevents this though,
4593 because A or B might be -0.0 or a NaN. */
4595 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4596 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4597 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0))))
4598 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4599 arg1, TREE_OPERAND (arg0, 1)))
4601 tree arg2 = TREE_OPERAND (t, 2);
4602 enum tree_code comp_code = TREE_CODE (arg0);
4604 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4605 depending on the comparison operation. */
4606 if (integer_zerop (TREE_OPERAND (arg0, 1))
4607 && TREE_CODE (arg2) == NEGATE_EXPR
4608 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4612 return pedantic_non_lvalue
4613 (fold (build1 (NEGATE_EXPR, type, arg1)));
4615 return pedantic_non_lvalue (convert (type, arg1));
4618 return pedantic_non_lvalue
4619 (fold (build1 (ABS_EXPR, type, arg1)));
4622 return pedantic_non_lvalue
4623 (fold (build1 (NEGATE_EXPR, type,
4624 fold (build1 (ABS_EXPR, type, arg1)))));
4627 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4630 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4632 if (comp_code == NE_EXPR)
4633 return pedantic_non_lvalue (convert (type, arg1));
4634 else if (comp_code == EQ_EXPR)
4635 return pedantic_non_lvalue (convert (type, integer_zero_node));
4638 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4639 or max (A, B), depending on the operation. */
4641 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4642 arg2, TREE_OPERAND (arg0, 0)))
4646 return pedantic_non_lvalue (convert (type, arg2));
4648 return pedantic_non_lvalue (convert (type, arg1));
4651 return pedantic_non_lvalue
4652 (fold (build (MIN_EXPR, type, arg1, arg2)));
4655 return pedantic_non_lvalue
4656 (fold (build (MAX_EXPR, type, arg1, arg2)));
4659 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4660 we might still be able to simplify this. For example,
4661 if C1 is one less or one more than C2, this might have started
4662 out as a MIN or MAX and been transformed by this function.
4663 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4665 if (INTEGRAL_TYPE_P (type)
4666 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4667 && TREE_CODE (arg2) == INTEGER_CST)
4671 /* We can replace A with C1 in this case. */
4672 arg1 = TREE_OPERAND (t, 1)
4673 = convert (type, TREE_OPERAND (arg0, 1));
4677 /* If C1 is C2 + 1, this is min(A, C2). */
4678 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4679 && operand_equal_p (TREE_OPERAND (arg0, 1),
4680 const_binop (PLUS_EXPR, arg2,
4681 integer_one_node, 0), 1))
4682 return pedantic_non_lvalue
4683 (fold (build (MIN_EXPR, type, arg1, arg2)));
4687 /* If C1 is C2 - 1, this is min(A, C2). */
4688 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4689 && operand_equal_p (TREE_OPERAND (arg0, 1),
4690 const_binop (MINUS_EXPR, arg2,
4691 integer_one_node, 0), 1))
4692 return pedantic_non_lvalue
4693 (fold (build (MIN_EXPR, type, arg1, arg2)));
4697 /* If C1 is C2 - 1, this is max(A, C2). */
4698 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4699 && operand_equal_p (TREE_OPERAND (arg0, 1),
4700 const_binop (MINUS_EXPR, arg2,
4701 integer_one_node, 0), 1))
4702 return pedantic_non_lvalue
4703 (fold (build (MAX_EXPR, type, arg1, arg2)));
4707 /* If C1 is C2 + 1, this is max(A, C2). */
4708 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4709 && operand_equal_p (TREE_OPERAND (arg0, 1),
4710 const_binop (PLUS_EXPR, arg2,
4711 integer_one_node, 0), 1))
4712 return pedantic_non_lvalue
4713 (fold (build (MAX_EXPR, type, arg1, arg2)));
4718 /* Convert A ? 1 : 0 to simply A. */
4719 if (integer_onep (TREE_OPERAND (t, 1))
4720 && integer_zerop (TREE_OPERAND (t, 2))
4721 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4722 call to fold will try to move the conversion inside
4723 a COND, which will recurse. In that case, the COND_EXPR
4724 is probably the best choice, so leave it alone. */
4725 && type == TREE_TYPE (arg0))
4726 return pedantic_non_lvalue (arg0);
4729 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4730 operation is simply A & 2. */
4732 if (integer_zerop (TREE_OPERAND (t, 2))
4733 && TREE_CODE (arg0) == NE_EXPR
4734 && integer_zerop (TREE_OPERAND (arg0, 1))
4735 && integer_pow2p (arg1)
4736 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4737 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4739 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
4744 /* When pedantic, a compound expression can be neither an lvalue
4745 nor an integer constant expression. */
4746 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4748 /* Don't let (0, 0) be null pointer constant. */
4749 if (integer_zerop (arg1))
4750 return non_lvalue (arg1);
4755 return build_complex (arg0, arg1);
4759 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4761 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4762 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4763 TREE_OPERAND (arg0, 1));
4764 else if (TREE_CODE (arg0) == COMPLEX_CST)
4765 return TREE_REALPART (arg0);
4766 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4767 return fold (build (TREE_CODE (arg0), type,
4768 fold (build1 (REALPART_EXPR, type,
4769 TREE_OPERAND (arg0, 0))),
4770 fold (build1 (REALPART_EXPR,
4771 type, TREE_OPERAND (arg0, 1)))));
4775 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4776 return convert (type, integer_zero_node);
4777 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4778 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4779 TREE_OPERAND (arg0, 0));
4780 else if (TREE_CODE (arg0) == COMPLEX_CST)
4781 return TREE_IMAGPART (arg0);
4782 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4783 return fold (build (TREE_CODE (arg0), type,
4784 fold (build1 (IMAGPART_EXPR, type,
4785 TREE_OPERAND (arg0, 0))),
4786 fold (build1 (IMAGPART_EXPR, type,
4787 TREE_OPERAND (arg0, 1)))));
4792 } /* switch (code) */