1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ Fix lossage on folding division of big integers. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
31 /* The entry points in this file are fold, size_int and size_binop.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'. */
48 /* Handle floating overflow for `const_binop'. */
49 static jmp_buf float_error;
51 static void encode PROTO((short *, HOST_WIDE_INT, HOST_WIDE_INT));
52 static void decode PROTO((short *, HOST_WIDE_INT *, HOST_WIDE_INT *));
53 static int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
54 HOST_WIDE_INT, HOST_WIDE_INT,
55 HOST_WIDE_INT, HOST_WIDE_INT *,
56 HOST_WIDE_INT *, HOST_WIDE_INT *,
58 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
59 static tree const_binop PROTO((enum tree_code, tree, tree, int));
60 static tree fold_convert PROTO((tree, tree));
61 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
62 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
63 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
64 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
65 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
66 static tree omit_one_operand PROTO((tree, tree, tree));
67 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
68 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
69 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
71 static tree decode_field_reference PROTO((tree, int *, int *,
72 enum machine_mode *, int *,
74 static int all_ones_mask_p PROTO((tree, int));
75 static int simple_operand_p PROTO((tree));
76 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
77 enum tree_code, tree, tree, tree));
78 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
84 /* Yield nonzero if a signed left shift of A by B bits overflows. */
85 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
87 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
88 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
89 Then this yields nonzero if overflow occurred during the addition.
90 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
91 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
92 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
94 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
95 We do that by representing the two-word integer as MAX_SHORTS shorts,
96 with only 8 bits stored in each short, as a positive number. */
98 /* Unpack a two-word integer into MAX_SHORTS shorts.
99 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
100 SHORTS points to the array of shorts. */
103 encode (shorts, low, hi)
105 HOST_WIDE_INT low, hi;
109 for (i = 0; i < MAX_SHORTS / 2; i++)
111 shorts[i] = (low >> (i * 8)) & 0xff;
112 shorts[i + MAX_SHORTS / 2] = (hi >> (i * 8) & 0xff);
116 /* Pack an array of MAX_SHORTS shorts into a two-word integer.
117 SHORTS points to the array of shorts.
118 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
121 decode (shorts, low, hi)
123 HOST_WIDE_INT *low, *hi;
126 HOST_WIDE_INT lv = 0, hv = 0;
128 for (i = 0; i < MAX_SHORTS / 2; i++)
130 lv |= (HOST_WIDE_INT) shorts[i] << (i * 8);
131 hv |= (HOST_WIDE_INT) shorts[i + MAX_SHORTS / 2] << (i * 8);
137 /* Make the integer constant T valid for its type
138 by setting to 0 or 1 all the bits in the constant
139 that don't belong in the type.
140 Yield 1 if a signed overflow occurs, 0 otherwise.
141 If OVERFLOW is nonzero, a signed overflow has already occurred
142 in calculating T, so propagate it. */
145 force_fit_type (t, overflow)
149 HOST_WIDE_INT low, high;
152 if (TREE_CODE (t) != INTEGER_CST)
155 low = TREE_INT_CST_LOW (t);
156 high = TREE_INT_CST_HIGH (t);
158 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
161 prec = TYPE_PRECISION (TREE_TYPE (t));
163 /* First clear all bits that are beyond the type's precision. */
165 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
167 else if (prec > HOST_BITS_PER_WIDE_INT)
169 TREE_INT_CST_HIGH (t)
170 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
174 TREE_INT_CST_HIGH (t) = 0;
175 if (prec < HOST_BITS_PER_WIDE_INT)
176 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
179 /* Unsigned types do not suffer sign extension or overflow. */
180 if (TREE_UNSIGNED (TREE_TYPE (t)))
183 /* If the value's sign bit is set, extend the sign. */
184 if (prec != 2 * HOST_BITS_PER_WIDE_INT
185 && (prec > HOST_BITS_PER_WIDE_INT
186 ? (TREE_INT_CST_HIGH (t)
187 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
188 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
190 /* Value is negative:
191 set to 1 all the bits that are outside this type's precision. */
192 if (prec > HOST_BITS_PER_WIDE_INT)
194 TREE_INT_CST_HIGH (t)
195 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
199 TREE_INT_CST_HIGH (t) = -1;
200 if (prec < HOST_BITS_PER_WIDE_INT)
201 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
205 /* Yield nonzero if signed overflow occurred. */
207 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
211 /* Add two doubleword integers with doubleword result.
212 Each argument is given as two `HOST_WIDE_INT' pieces.
213 One argument is L1 and H1; the other, L2 and H2.
214 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
215 We use the 8-shorts representation internally. */
218 add_double (l1, h1, l2, h2, lv, hv)
219 HOST_WIDE_INT l1, h1, l2, h2;
220 HOST_WIDE_INT *lv, *hv;
222 short arg1[MAX_SHORTS];
223 short arg2[MAX_SHORTS];
224 register int carry = 0;
227 encode (arg1, l1, h1);
228 encode (arg2, l2, h2);
230 for (i = 0; i < MAX_SHORTS; i++)
232 carry += arg1[i] + arg2[i];
233 arg1[i] = carry & 0xff;
237 decode (arg1, lv, hv);
238 return overflow_sum_sign (h1, h2, *hv);
241 /* Negate a doubleword integer with doubleword result.
242 Return nonzero if the operation overflows, assuming it's signed.
243 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
244 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
245 We use the 8-shorts representation internally. */
248 neg_double (l1, h1, lv, hv)
249 HOST_WIDE_INT l1, h1;
250 HOST_WIDE_INT *lv, *hv;
256 return (*hv & h1) < 0;
266 /* Multiply two doubleword integers with doubleword result.
267 Return nonzero if the operation overflows, assuming it's signed.
268 Each argument is given as two `HOST_WIDE_INT' pieces.
269 One argument is L1 and H1; the other, L2 and H2.
270 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
271 We use the 8-shorts representation internally. */
274 mul_double (l1, h1, l2, h2, lv, hv)
275 HOST_WIDE_INT l1, h1, l2, h2;
276 HOST_WIDE_INT *lv, *hv;
278 short arg1[MAX_SHORTS];
279 short arg2[MAX_SHORTS];
280 short prod[MAX_SHORTS * 2];
281 register int carry = 0;
282 register int i, j, k;
283 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
285 /* These cases are used extensively, arising from pointer combinations. */
290 int overflow = left_shift_overflows (h1, 1);
291 unsigned HOST_WIDE_INT temp = l1 + l1;
292 *hv = (h1 << 1) + (temp < l1);
298 int overflow = left_shift_overflows (h1, 2);
299 unsigned HOST_WIDE_INT temp = l1 + l1;
300 h1 = (h1 << 2) + ((temp < l1) << 1);
310 int overflow = left_shift_overflows (h1, 3);
311 unsigned HOST_WIDE_INT temp = l1 + l1;
312 h1 = (h1 << 3) + ((temp < l1) << 2);
315 h1 += (temp < l1) << 1;
325 encode (arg1, l1, h1);
326 encode (arg2, l2, h2);
328 bzero (prod, sizeof prod);
330 for (i = 0; i < MAX_SHORTS; i++)
331 for (j = 0; j < MAX_SHORTS; j++)
334 carry = arg1[i] * arg2[j];
338 prod[k] = carry & 0xff;
344 decode (prod, lv, hv); /* This ignores
345 prod[MAX_SHORTS] -> prod[MAX_SHORTS*2-1] */
347 /* Check for overflow by calculating the top half of the answer in full;
348 it should agree with the low half's sign bit. */
349 decode (prod+MAX_SHORTS, &toplow, &tophigh);
352 neg_double (l2, h2, &neglow, &neghigh);
353 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
357 neg_double (l1, h1, &neglow, &neghigh);
358 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
360 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
363 /* Shift the doubleword integer in L1, H1 left by COUNT places
364 keeping only PREC bits of result.
365 Shift right if COUNT is negative.
366 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
367 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
370 lshift_double (l1, h1, count, prec, lv, hv, arith)
371 HOST_WIDE_INT l1, h1, count;
373 HOST_WIDE_INT *lv, *hv;
376 short arg1[MAX_SHORTS];
382 rshift_double (l1, h1, - count, prec, lv, hv, arith);
386 encode (arg1, l1, h1);
394 for (i = 0; i < MAX_SHORTS; i++)
396 carry += arg1[i] << 1;
397 arg1[i] = carry & 0xff;
403 decode (arg1, lv, hv);
406 /* Shift the doubleword integer in L1, H1 right by COUNT places
407 keeping only PREC bits of result. COUNT must be positive.
408 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
409 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
412 rshift_double (l1, h1, count, prec, lv, hv, arith)
413 HOST_WIDE_INT l1, h1, count;
415 HOST_WIDE_INT *lv, *hv;
418 short arg1[MAX_SHORTS];
422 encode (arg1, l1, h1);
429 carry = arith && arg1[7] >> 7;
430 for (i = MAX_SHORTS - 1; i >= 0; i--)
434 arg1[i] = (carry >> 1) & 0xff;
439 decode (arg1, lv, hv);
442 /* Rotate the doubldword integer in L1, H1 left by COUNT places
443 keeping only PREC bits of result.
444 Rotate right if COUNT is negative.
445 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
448 lrotate_double (l1, h1, count, prec, lv, hv)
449 HOST_WIDE_INT l1, h1, count;
451 HOST_WIDE_INT *lv, *hv;
453 short arg1[MAX_SHORTS];
459 rrotate_double (l1, h1, - count, prec, lv, hv);
463 encode (arg1, l1, h1);
468 carry = arg1[MAX_SHORTS - 1] >> 7;
471 for (i = 0; i < MAX_SHORTS; i++)
473 carry += arg1[i] << 1;
474 arg1[i] = carry & 0xff;
480 decode (arg1, lv, hv);
483 /* Rotate the doubleword integer in L1, H1 left by COUNT places
484 keeping only PREC bits of result. COUNT must be positive.
485 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
488 rrotate_double (l1, h1, count, prec, lv, hv)
489 HOST_WIDE_INT l1, h1, count;
491 HOST_WIDE_INT *lv, *hv;
493 short arg1[MAX_SHORTS];
497 encode (arg1, l1, h1);
505 for (i = MAX_SHORTS - 1; i >= 0; i--)
509 arg1[i] = (carry >> 1) & 0xff;
514 decode (arg1, lv, hv);
517 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
518 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
519 CODE is a tree code for a kind of division, one of
520 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
522 It controls how the quotient is rounded to a integer.
523 Return nonzero if the operation overflows.
524 UNS nonzero says do unsigned division. */
527 div_and_round_double (code, uns,
528 lnum_orig, hnum_orig, lden_orig, hden_orig,
529 lquo, hquo, lrem, hrem)
532 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
533 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
534 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
537 short num[MAX_SHORTS + 1]; /* extra element for scaling. */
538 short den[MAX_SHORTS], quo[MAX_SHORTS];
539 register int i, j, work;
540 register int carry = 0;
541 HOST_WIDE_INT lnum = lnum_orig;
542 HOST_WIDE_INT hnum = hnum_orig;
543 HOST_WIDE_INT lden = lden_orig;
544 HOST_WIDE_INT hden = hden_orig;
547 if ((hden == 0) && (lden == 0))
550 /* calculate quotient sign and convert operands to unsigned. */
556 /* (minimum integer) / (-1) is the only overflow case. */
557 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
563 neg_double (lden, hden, &lden, &hden);
567 if (hnum == 0 && hden == 0)
568 { /* single precision */
570 /* This unsigned division rounds toward zero. */
571 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
576 { /* trivial case: dividend < divisor */
577 /* hden != 0 already checked. */
584 bzero (quo, sizeof quo);
586 bzero (num, sizeof num); /* to zero 9th element */
587 bzero (den, sizeof den);
589 encode (num, lnum, hnum);
590 encode (den, lden, hden);
592 /* This code requires more than just hden == 0.
593 We also have to require that we don't need more than three bytes
594 to hold CARRY. If we ever did need four bytes to hold it, we
595 would lose part of it when computing WORK on the next round. */
596 if (hden == 0 && (((unsigned HOST_WIDE_INT) lden << 8) >> 8) == lden)
597 { /* simpler algorithm */
598 /* hnum != 0 already checked. */
599 for (i = MAX_SHORTS - 1; i >= 0; i--)
601 work = num[i] + (carry << 8);
602 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
603 carry = work % (unsigned HOST_WIDE_INT) lden;
606 else { /* full double precision,
607 with thanks to Don Knuth's
608 "Seminumerical Algorithms". */
610 int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
612 /* Find the highest non-zero divisor digit. */
613 for (i = MAX_SHORTS - 1; ; i--)
618 for (i = MAX_SHORTS - 1; ; i--)
623 quo_hi_sig = num_hi_sig - den_hi_sig + 1;
625 /* Insure that the first digit of the divisor is at least BASE/2.
626 This is required by the quotient digit estimation algorithm. */
628 scale = BASE / (den[den_hi_sig] + 1);
629 if (scale > 1) { /* scale divisor and dividend */
631 for (i = 0; i <= MAX_SHORTS - 1; i++) {
632 work = (num[i] * scale) + carry;
633 num[i] = work & 0xff;
635 if (num[i] != 0) num_hi_sig = i;
638 for (i = 0; i <= MAX_SHORTS - 1; i++) {
639 work = (den[i] * scale) + carry;
640 den[i] = work & 0xff;
642 if (den[i] != 0) den_hi_sig = i;
647 for (i = quo_hi_sig; i > 0; i--) {
648 /* guess the next quotient digit, quo_est, by dividing the first
649 two remaining dividend digits by the high order quotient digit.
650 quo_est is never low and is at most 2 high. */
652 int num_hi; /* index of highest remaining dividend digit */
654 num_hi = i + den_hi_sig;
656 work = (num[num_hi] * BASE) + (num_hi > 0 ? num[num_hi - 1] : 0);
657 if (num[num_hi] != den[den_hi_sig]) {
658 quo_est = work / den[den_hi_sig];
664 /* refine quo_est so it's usually correct, and at most one high. */
665 while ((den[den_hi_sig - 1] * quo_est)
666 > (((work - (quo_est * den[den_hi_sig])) * BASE)
667 + ((num_hi - 1) > 0 ? num[num_hi - 2] : 0)))
670 /* Try QUO_EST as the quotient digit, by multiplying the
671 divisor by QUO_EST and subtracting from the remaining dividend.
672 Keep in mind that QUO_EST is the I - 1st digit. */
676 for (j = 0; j <= den_hi_sig; j++)
680 work = num[i + j - 1] - (quo_est * den[j]) + carry;
688 num[i + j - 1] = digit;
691 /* if quo_est was high by one, then num[i] went negative and
692 we need to correct things. */
697 carry = 0; /* add divisor back in */
698 for (j = 0; j <= den_hi_sig; j++)
700 work = num[i + j - 1] + den[j] + carry;
710 num[i + j - 1] = work;
712 num [num_hi] += carry;
715 /* store the quotient digit. */
716 quo[i - 1] = quo_est;
720 decode (quo, lquo, hquo);
723 /* if result is negative, make it so. */
725 neg_double (*lquo, *hquo, lquo, hquo);
727 /* compute trial remainder: rem = num - (quo * den) */
728 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
729 neg_double (*lrem, *hrem, lrem, hrem);
730 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
735 case TRUNC_MOD_EXPR: /* round toward zero */
736 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
740 case FLOOR_MOD_EXPR: /* round toward negative infinity */
741 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
744 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
747 else return overflow;
751 case CEIL_MOD_EXPR: /* round toward positive infinity */
752 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
754 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
757 else return overflow;
761 case ROUND_MOD_EXPR: /* round to closest integer */
763 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
764 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
766 /* get absolute values */
767 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
768 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
770 /* if (2 * abs (lrem) >= abs (lden)) */
771 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
772 labs_rem, habs_rem, <wice, &htwice);
773 if (((unsigned HOST_WIDE_INT) habs_den
774 < (unsigned HOST_WIDE_INT) htwice)
775 || (((unsigned HOST_WIDE_INT) habs_den
776 == (unsigned HOST_WIDE_INT) htwice)
777 && ((HOST_WIDE_INT unsigned) labs_den
778 < (unsigned HOST_WIDE_INT) ltwice)))
782 add_double (*lquo, *hquo,
783 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
786 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
789 else return overflow;
797 /* compute true remainder: rem = num - (quo * den) */
798 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
799 neg_double (*lrem, *hrem, lrem, hrem);
800 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
804 #ifndef REAL_ARITHMETIC
805 /* Effectively truncate a real value to represent
806 the nearest possible value in a narrower mode.
807 The result is actually represented in the same data type as the argument,
808 but its value is usually different. */
811 real_value_truncate (mode, arg)
812 enum machine_mode mode;
816 /* Make sure the value is actually stored in memory before we turn off
820 REAL_VALUE_TYPE value;
821 jmp_buf handler, old_handler;
824 if (setjmp (handler))
826 error ("floating overflow");
829 handled = push_float_handler (handler, old_handler);
830 value = REAL_VALUE_TRUNCATE (mode, arg);
831 pop_float_handler (handled, old_handler);
835 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
837 /* Check for infinity in an IEEE double precision number. */
843 /* The IEEE 64-bit double format. */
848 unsigned exponent : 11;
849 unsigned mantissa1 : 20;
854 unsigned mantissa1 : 20;
855 unsigned exponent : 11;
861 if (u.big_endian.sign == 1)
864 return (u.big_endian.exponent == 2047
865 && u.big_endian.mantissa1 == 0
866 && u.big_endian.mantissa2 == 0);
871 return (u.little_endian.exponent == 2047
872 && u.little_endian.mantissa1 == 0
873 && u.little_endian.mantissa2 == 0);
877 /* Check whether an IEEE double precision number is a NaN. */
883 /* The IEEE 64-bit double format. */
888 unsigned exponent : 11;
889 unsigned mantissa1 : 20;
894 unsigned mantissa1 : 20;
895 unsigned exponent : 11;
901 if (u.big_endian.sign == 1)
904 return (u.big_endian.exponent == 2047
905 && (u.big_endian.mantissa1 != 0
906 || u.big_endian.mantissa2 != 0));
911 return (u.little_endian.exponent == 2047
912 && (u.little_endian.mantissa1 != 0
913 || u.little_endian.mantissa2 != 0));
917 /* Check for a negative IEEE double precision number. */
923 /* The IEEE 64-bit double format. */
928 unsigned exponent : 11;
929 unsigned mantissa1 : 20;
934 unsigned mantissa1 : 20;
935 unsigned exponent : 11;
941 if (u.big_endian.sign == 1)
944 return u.big_endian.sign;
949 return u.little_endian.sign;
952 #else /* Target not IEEE */
954 /* Let's assume other float formats don't have infinity.
955 (This can be overridden by redefining REAL_VALUE_ISINF.) */
963 /* Let's assume other float formats don't have NaNs.
964 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
972 /* Let's assume other float formats don't have minus zero.
973 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
980 #endif /* Target not IEEE */
981 #endif /* no REAL_ARITHMETIC */
983 /* Split a tree IN into a constant and a variable part
984 that could be combined with CODE to make IN.
985 CODE must be a commutative arithmetic operation.
986 Store the constant part into *CONP and the variable in &VARP.
987 Return 1 if this was done; zero means the tree IN did not decompose
990 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
991 Therefore, we must tell the caller whether the variable part
992 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
993 The value stored is the coefficient for the variable term.
994 The constant term we return should always be added;
995 we negate it if necessary. */
998 split_tree (in, code, varp, conp, varsignp)
1000 enum tree_code code;
1004 register tree outtype = TREE_TYPE (in);
1008 /* Strip any conversions that don't change the machine mode. */
1009 while ((TREE_CODE (in) == NOP_EXPR
1010 || TREE_CODE (in) == CONVERT_EXPR)
1011 && (TYPE_MODE (TREE_TYPE (in))
1012 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
1013 in = TREE_OPERAND (in, 0);
1015 if (TREE_CODE (in) == code
1016 || (! FLOAT_TYPE_P (TREE_TYPE (in))
1017 /* We can associate addition and subtraction together
1018 (even though the C standard doesn't say so)
1019 for integers because the value is not affected.
1020 For reals, the value might be affected, so we can't. */
1021 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1022 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1024 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
1025 if (code == INTEGER_CST)
1027 *conp = TREE_OPERAND (in, 0);
1028 *varp = TREE_OPERAND (in, 1);
1029 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1030 && TREE_TYPE (*varp) != outtype)
1031 *varp = convert (outtype, *varp);
1032 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1035 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
1037 *conp = TREE_OPERAND (in, 1);
1038 *varp = TREE_OPERAND (in, 0);
1040 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1041 && TREE_TYPE (*varp) != outtype)
1042 *varp = convert (outtype, *varp);
1043 if (TREE_CODE (in) == MINUS_EXPR)
1045 /* If operation is subtraction and constant is second,
1046 must negate it to get an additive constant.
1047 And this cannot be done unless it is a manifest constant.
1048 It could also be the address of a static variable.
1049 We cannot negate that, so give up. */
1050 if (TREE_CODE (*conp) == INTEGER_CST)
1051 /* Subtracting from integer_zero_node loses for long long. */
1052 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
1058 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
1060 *conp = TREE_OPERAND (in, 0);
1061 *varp = TREE_OPERAND (in, 1);
1062 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1063 && TREE_TYPE (*varp) != outtype)
1064 *varp = convert (outtype, *varp);
1065 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1072 /* Combine two constants NUM and ARG2 under operation CODE
1073 to produce a new constant.
1074 We assume ARG1 and ARG2 have the same data type,
1075 or at least are the same kind of constant and the same machine mode.
1077 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1080 const_binop (code, arg1, arg2, notrunc)
1081 enum tree_code code;
1082 register tree arg1, arg2;
1085 if (TREE_CODE (arg1) == INTEGER_CST)
1087 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
1088 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
1089 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
1090 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
1091 HOST_WIDE_INT low, hi;
1092 HOST_WIDE_INT garbagel, garbageh;
1094 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1100 t = build_int_2 (int1l | int2l, int1h | int2h);
1104 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1108 t = build_int_2 (int1l & int2l, int1h & int2h);
1111 case BIT_ANDTC_EXPR:
1112 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1118 /* It's unclear from the C standard whether shifts can overflow.
1119 The following code ignores overflow; perhaps a C standard
1120 interpretation ruling is needed. */
1121 lshift_double (int1l, int1h, int2l,
1122 TYPE_PRECISION (TREE_TYPE (arg1)),
1125 t = build_int_2 (low, hi);
1126 TREE_TYPE (t) = TREE_TYPE (arg1);
1128 force_fit_type (t, 0);
1129 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1130 TREE_CONSTANT_OVERFLOW (t)
1131 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1137 lrotate_double (int1l, int1h, int2l,
1138 TYPE_PRECISION (TREE_TYPE (arg1)),
1140 t = build_int_2 (low, hi);
1147 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1150 overflow = int2h < hi;
1152 t = build_int_2 (int2l, int2h);
1158 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1161 overflow = int1h < hi;
1163 t = build_int_2 (int1l, int1h);
1166 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1167 t = build_int_2 (low, hi);
1171 if (int2h == 0 && int2l == 0)
1173 t = build_int_2 (int1l, int1h);
1176 neg_double (int2l, int2h, &low, &hi);
1177 add_double (int1l, int1h, low, hi, &low, &hi);
1178 overflow = overflow_sum_sign (hi, int2h, int1h);
1179 t = build_int_2 (low, hi);
1183 /* Optimize simple cases. */
1186 unsigned HOST_WIDE_INT temp;
1191 t = build_int_2 (0, 0);
1194 t = build_int_2 (int2l, int2h);
1197 overflow = left_shift_overflows (int2h, 1);
1198 temp = int2l + int2l;
1199 int2h = (int2h << 1) + (temp < int2l);
1200 t = build_int_2 (temp, int2h);
1202 #if 0 /* This code can lose carries. */
1204 temp = int2l + int2l + int2l;
1205 int2h = int2h * 3 + (temp < int2l);
1206 t = build_int_2 (temp, int2h);
1210 overflow = left_shift_overflows (int2h, 2);
1211 temp = int2l + int2l;
1212 int2h = (int2h << 2) + ((temp < int2l) << 1);
1215 int2h += (temp < int2l);
1216 t = build_int_2 (temp, int2h);
1219 overflow = left_shift_overflows (int2h, 3);
1220 temp = int2l + int2l;
1221 int2h = (int2h << 3) + ((temp < int2l) << 2);
1224 int2h += (temp < int2l) << 1;
1227 int2h += (temp < int2l);
1228 t = build_int_2 (temp, int2h);
1239 t = build_int_2 (0, 0);
1244 t = build_int_2 (int1l, int1h);
1249 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1250 t = build_int_2 (low, hi);
1253 case TRUNC_DIV_EXPR:
1254 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1255 case EXACT_DIV_EXPR:
1256 /* This is a shortcut for a common special case.
1257 It reduces the number of tree nodes generated
1259 if (int2h == 0 && int2l > 0
1260 && TREE_TYPE (arg1) == sizetype
1261 && int1h == 0 && int1l >= 0)
1263 if (code == CEIL_DIV_EXPR)
1265 return size_int (int1l / int2l);
1267 case ROUND_DIV_EXPR:
1268 if (int2h == 0 && int2l == 1)
1270 t = build_int_2 (int1l, int1h);
1273 if (int1l == int2l && int1h == int2h)
1275 if ((int1l | int1h) == 0)
1277 t = build_int_2 (1, 0);
1280 overflow = div_and_round_double (code, uns,
1281 int1l, int1h, int2l, int2h,
1282 &low, &hi, &garbagel, &garbageh);
1283 t = build_int_2 (low, hi);
1286 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1287 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1288 overflow = div_and_round_double (code, uns,
1289 int1l, int1h, int2l, int2h,
1290 &garbagel, &garbageh, &low, &hi);
1291 t = build_int_2 (low, hi);
1298 low = (((unsigned HOST_WIDE_INT) int1h
1299 < (unsigned HOST_WIDE_INT) int2h)
1300 || (((unsigned HOST_WIDE_INT) int1h
1301 == (unsigned HOST_WIDE_INT) int2h)
1302 && ((unsigned HOST_WIDE_INT) int1l
1303 < (unsigned HOST_WIDE_INT) int2l)));
1307 low = ((int1h < int2h)
1308 || ((int1h == int2h)
1309 && ((unsigned HOST_WIDE_INT) int1l
1310 < (unsigned HOST_WIDE_INT) int2l)));
1312 if (low == (code == MIN_EXPR))
1313 t = build_int_2 (int1l, int1h);
1315 t = build_int_2 (int2l, int2h);
1322 TREE_TYPE (t) = TREE_TYPE (arg1);
1324 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1325 | TREE_OVERFLOW (arg1)
1326 | TREE_OVERFLOW (arg2));
1327 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1328 | TREE_CONSTANT_OVERFLOW (arg1)
1329 | TREE_CONSTANT_OVERFLOW (arg2));
1332 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1333 if (TREE_CODE (arg1) == REAL_CST)
1337 REAL_VALUE_TYPE value;
1340 d1 = TREE_REAL_CST (arg1);
1341 d2 = TREE_REAL_CST (arg2);
1342 if (setjmp (float_error))
1344 pedwarn ("floating overflow in constant expression");
1345 return build (code, TREE_TYPE (arg1), arg1, arg2);
1347 set_float_handler (float_error);
1349 #ifdef REAL_ARITHMETIC
1350 REAL_ARITHMETIC (value, code, d1, d2);
1367 #ifndef REAL_INFINITY
1376 value = MIN (d1, d2);
1380 value = MAX (d1, d2);
1386 #endif /* no REAL_ARITHMETIC */
1387 t = build_real (TREE_TYPE (arg1),
1388 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1389 set_float_handler (NULL_PTR);
1392 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1393 if (TREE_CODE (arg1) == COMPLEX_CST)
1395 register tree r1 = TREE_REALPART (arg1);
1396 register tree i1 = TREE_IMAGPART (arg1);
1397 register tree r2 = TREE_REALPART (arg2);
1398 register tree i2 = TREE_IMAGPART (arg2);
1404 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1405 const_binop (PLUS_EXPR, i1, i2, notrunc));
1409 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1410 const_binop (MINUS_EXPR, i1, i2, notrunc));
1414 t = build_complex (const_binop (MINUS_EXPR,
1415 const_binop (MULT_EXPR,
1417 const_binop (MULT_EXPR,
1420 const_binop (PLUS_EXPR,
1421 const_binop (MULT_EXPR,
1423 const_binop (MULT_EXPR,
1430 register tree magsquared
1431 = const_binop (PLUS_EXPR,
1432 const_binop (MULT_EXPR, r2, r2, notrunc),
1433 const_binop (MULT_EXPR, i2, i2, notrunc),
1435 t = build_complex (const_binop (RDIV_EXPR,
1436 const_binop (PLUS_EXPR,
1437 const_binop (MULT_EXPR, r1, r2, notrunc),
1438 const_binop (MULT_EXPR, i1, i2, notrunc),
1440 magsquared, notrunc),
1441 const_binop (RDIV_EXPR,
1442 const_binop (MINUS_EXPR,
1443 const_binop (MULT_EXPR, i1, r2, notrunc),
1444 const_binop (MULT_EXPR, r1, i2, notrunc),
1446 magsquared, notrunc));
1453 TREE_TYPE (t) = TREE_TYPE (arg1);
1459 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1463 unsigned int number;
1466 /* Type-size nodes already made for small sizes. */
1467 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1469 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1470 && size_table[number] != 0)
1471 return size_table[number];
1472 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1474 push_obstacks_nochange ();
1475 /* Make this a permanent node. */
1476 end_temporary_allocation ();
1477 t = build_int_2 (number, 0);
1478 TREE_TYPE (t) = sizetype;
1479 size_table[number] = t;
1484 t = build_int_2 (number, 0);
1485 TREE_TYPE (t) = sizetype;
1490 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1491 CODE is a tree code. Data type is taken from `sizetype',
1492 If the operands are constant, so is the result. */
1495 size_binop (code, arg0, arg1)
1496 enum tree_code code;
1499 /* Handle the special case of two integer constants faster. */
1500 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1502 /* And some specific cases even faster than that. */
1503 if (code == PLUS_EXPR
1504 && TREE_INT_CST_LOW (arg0) == 0
1505 && TREE_INT_CST_HIGH (arg0) == 0)
1507 if (code == MINUS_EXPR
1508 && TREE_INT_CST_LOW (arg1) == 0
1509 && TREE_INT_CST_HIGH (arg1) == 0)
1511 if (code == MULT_EXPR
1512 && TREE_INT_CST_LOW (arg0) == 1
1513 && TREE_INT_CST_HIGH (arg0) == 0)
1515 /* Handle general case of two integer constants. */
1516 return const_binop (code, arg0, arg1, 1);
1519 if (arg0 == error_mark_node || arg1 == error_mark_node)
1520 return error_mark_node;
1522 return fold (build (code, sizetype, arg0, arg1));
1525 /* Given T, a tree representing type conversion of ARG1, a constant,
1526 return a constant tree representing the result of conversion. */
1529 fold_convert (t, arg1)
1533 register tree type = TREE_TYPE (t);
1535 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1537 if (TREE_CODE (arg1) == INTEGER_CST)
1539 /* Given an integer constant, make new constant with new type,
1540 appropriately sign-extended or truncated. */
1541 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1542 TREE_INT_CST_HIGH (arg1));
1543 TREE_TYPE (t) = type;
1544 /* Indicate an overflow if (1) ARG1 already overflowed,
1545 or (2) force_fit_type indicates an overflow.
1546 Tell force_fit_type that an overflow has already occurred
1547 if ARG1 is a too-large unsigned value and T is signed. */
1549 = (TREE_OVERFLOW (arg1)
1550 | force_fit_type (t,
1551 (TREE_INT_CST_HIGH (arg1) < 0
1552 & (TREE_UNSIGNED (type)
1553 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1554 TREE_CONSTANT_OVERFLOW (t)
1555 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1557 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1558 else if (TREE_CODE (arg1) == REAL_CST)
1560 REAL_VALUE_TYPE l, x, u;
1562 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1563 x = TREE_REAL_CST (arg1);
1564 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1566 /* See if X will be in range after truncation towards 0.
1567 To compensate for truncation, move the bounds away from 0,
1568 but reject if X exactly equals the adjusted bounds. */
1569 #ifdef REAL_ARITHMETIC
1570 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1571 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1576 if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1578 pedwarn ("real constant out of range for integer conversion");
1581 #ifndef REAL_ARITHMETIC
1584 HOST_WIDE_INT low, high;
1585 HOST_WIDE_INT half_word
1586 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1588 d = TREE_REAL_CST (arg1);
1592 high = (HOST_WIDE_INT) (d / half_word / half_word);
1593 d -= (REAL_VALUE_TYPE) high * half_word * half_word;
1594 if (d >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1596 low = d - (REAL_VALUE_TYPE) half_word * half_word / 2;
1597 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1600 low = (HOST_WIDE_INT) d;
1601 if (TREE_REAL_CST (arg1) < 0)
1602 neg_double (low, high, &low, &high);
1603 t = build_int_2 (low, high);
1607 HOST_WIDE_INT low, high;
1608 REAL_VALUE_TO_INT (&low, &high, (TREE_REAL_CST (arg1)));
1609 t = build_int_2 (low, high);
1612 TREE_TYPE (t) = type;
1613 force_fit_type (t, 0);
1615 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1616 TREE_TYPE (t) = type;
1618 else if (TREE_CODE (type) == REAL_TYPE)
1620 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1621 if (TREE_CODE (arg1) == INTEGER_CST)
1622 return build_real_from_int_cst (type, arg1);
1623 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1624 if (TREE_CODE (arg1) == REAL_CST)
1626 if (setjmp (float_error))
1628 pedwarn ("floating overflow in constant expression");
1631 set_float_handler (float_error);
1633 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1634 TREE_REAL_CST (arg1)));
1635 set_float_handler (NULL_PTR);
1639 TREE_CONSTANT (t) = 1;
1643 /* Return an expr equal to X but certainly not valid as an lvalue.
1644 Also make sure it is not valid as an null pointer constant. */
1652 /* These things are certainly not lvalues. */
1653 if (TREE_CODE (x) == NON_LVALUE_EXPR
1654 || TREE_CODE (x) == INTEGER_CST
1655 || TREE_CODE (x) == REAL_CST
1656 || TREE_CODE (x) == STRING_CST
1657 || TREE_CODE (x) == ADDR_EXPR)
1659 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1661 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1662 so convert_for_assignment won't strip it.
1663 This is so this 0 won't be treated as a null pointer constant. */
1664 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1665 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1671 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1672 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1676 /* When pedantic, return an expr equal to X but certainly not valid as a
1677 pedantic lvalue. Otherwise, return X. */
1680 pedantic_non_lvalue (x)
1684 return non_lvalue (x);
1689 /* Given a tree comparison code, return the code that is the logical inverse
1690 of the given code. It is not safe to do this for floating-point
1691 comparisons, except for NE_EXPR and EQ_EXPR. */
1693 static enum tree_code
1694 invert_tree_comparison (code)
1695 enum tree_code code;
1716 /* Similar, but return the comparison that results if the operands are
1717 swapped. This is safe for floating-point. */
1719 static enum tree_code
1720 swap_tree_comparison (code)
1721 enum tree_code code;
1741 /* Return nonzero if two operands are necessarily equal.
1742 If ONLY_CONST is non-zero, only return non-zero for constants.
1743 This function tests whether the operands are indistinguishable;
1744 it does not test whether they are equal using C's == operation.
1745 The distinction is important for IEEE floating point, because
1746 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1747 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1750 operand_equal_p (arg0, arg1, only_const)
1754 /* If both types don't have the same signedness, then we can't consider
1755 them equal. We must check this before the STRIP_NOPS calls
1756 because they may change the signedness of the arguments. */
1757 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1763 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1764 We don't care about side effects in that case because the SAVE_EXPR
1765 takes care of that for us. */
1766 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1767 return ! only_const;
1769 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1772 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1773 && TREE_CODE (arg0) == ADDR_EXPR
1774 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1777 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1778 && TREE_CODE (arg0) == INTEGER_CST
1779 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1780 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1783 /* Detect when real constants are equal. */
1784 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1785 && TREE_CODE (arg0) == REAL_CST)
1786 return !bcmp (&TREE_REAL_CST (arg0), &TREE_REAL_CST (arg1),
1787 sizeof (REAL_VALUE_TYPE));
1795 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1797 /* This is needed for conversions and for COMPONENT_REF.
1798 Might as well play it safe and always test this. */
1799 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1802 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1805 /* Two conversions are equal only if signedness and modes match. */
1806 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1807 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1808 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1811 return operand_equal_p (TREE_OPERAND (arg0, 0),
1812 TREE_OPERAND (arg1, 0), 0);
1816 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1817 TREE_OPERAND (arg1, 0), 0)
1818 && operand_equal_p (TREE_OPERAND (arg0, 1),
1819 TREE_OPERAND (arg1, 1), 0));
1822 switch (TREE_CODE (arg0))
1825 return operand_equal_p (TREE_OPERAND (arg0, 0),
1826 TREE_OPERAND (arg1, 0), 0);
1830 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1831 TREE_OPERAND (arg1, 0), 0)
1832 && operand_equal_p (TREE_OPERAND (arg0, 1),
1833 TREE_OPERAND (arg1, 1), 0));
1836 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1837 TREE_OPERAND (arg1, 0), 0)
1838 && operand_equal_p (TREE_OPERAND (arg0, 1),
1839 TREE_OPERAND (arg1, 1), 0)
1840 && operand_equal_p (TREE_OPERAND (arg0, 2),
1841 TREE_OPERAND (arg1, 2), 0));
1849 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1850 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1852 When in doubt, return 0. */
1855 operand_equal_for_comparison_p (arg0, arg1, other)
1859 int unsignedp1, unsignedpo;
1860 tree primarg1, primother;
1863 if (operand_equal_p (arg0, arg1, 0))
1866 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1869 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1870 actual comparison operand, ARG0.
1872 First throw away any conversions to wider types
1873 already present in the operands. */
1875 primarg1 = get_narrower (arg1, &unsignedp1);
1876 primother = get_narrower (other, &unsignedpo);
1878 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1879 if (unsignedp1 == unsignedpo
1880 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1881 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1883 tree type = TREE_TYPE (arg0);
1885 /* Make sure shorter operand is extended the right way
1886 to match the longer operand. */
1887 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1888 TREE_TYPE (primarg1)),
1891 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1898 /* See if ARG is an expression that is either a comparison or is performing
1899 arithmetic on comparisons. The comparisons must only be comparing
1900 two different values, which will be stored in *CVAL1 and *CVAL2; if
1901 they are non-zero it means that some operands have already been found.
1902 No variables may be used anywhere else in the expression except in the
1903 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1904 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1906 If this is true, return 1. Otherwise, return zero. */
1909 twoval_comparison_p (arg, cval1, cval2, save_p)
1911 tree *cval1, *cval2;
1914 enum tree_code code = TREE_CODE (arg);
1915 char class = TREE_CODE_CLASS (code);
1917 /* We can handle some of the 'e' cases here. */
1918 if (class == 'e' && code == TRUTH_NOT_EXPR)
1920 else if (class == 'e'
1921 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1922 || code == COMPOUND_EXPR))
1924 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1926 /* If we've already found a CVAL1 or CVAL2, this expression is
1927 two complex to handle. */
1928 if (*cval1 || *cval2)
1938 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1941 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1942 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1943 cval1, cval2, save_p));
1949 if (code == COND_EXPR)
1950 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1951 cval1, cval2, save_p)
1952 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1953 cval1, cval2, save_p)
1954 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1955 cval1, cval2, save_p));
1959 /* First see if we can handle the first operand, then the second. For
1960 the second operand, we know *CVAL1 can't be zero. It must be that
1961 one side of the comparison is each of the values; test for the
1962 case where this isn't true by failing if the two operands
1965 if (operand_equal_p (TREE_OPERAND (arg, 0),
1966 TREE_OPERAND (arg, 1), 0))
1970 *cval1 = TREE_OPERAND (arg, 0);
1971 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1973 else if (*cval2 == 0)
1974 *cval2 = TREE_OPERAND (arg, 0);
1975 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1980 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1982 else if (*cval2 == 0)
1983 *cval2 = TREE_OPERAND (arg, 1);
1984 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1995 /* ARG is a tree that is known to contain just arithmetic operations and
1996 comparisons. Evaluate the operations in the tree substituting NEW0 for
1997 any occurrence of OLD0 as an operand of a comparison and likewise for
2001 eval_subst (arg, old0, new0, old1, new1)
2003 tree old0, new0, old1, new1;
2005 tree type = TREE_TYPE (arg);
2006 enum tree_code code = TREE_CODE (arg);
2007 char class = TREE_CODE_CLASS (code);
2009 /* We can handle some of the 'e' cases here. */
2010 if (class == 'e' && code == TRUTH_NOT_EXPR)
2012 else if (class == 'e'
2013 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2019 return fold (build1 (code, type,
2020 eval_subst (TREE_OPERAND (arg, 0),
2021 old0, new0, old1, new1)));
2024 return fold (build (code, type,
2025 eval_subst (TREE_OPERAND (arg, 0),
2026 old0, new0, old1, new1),
2027 eval_subst (TREE_OPERAND (arg, 1),
2028 old0, new0, old1, new1)));
2034 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
2037 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
2040 return fold (build (code, type,
2041 eval_subst (TREE_OPERAND (arg, 0),
2042 old0, new0, old1, new1),
2043 eval_subst (TREE_OPERAND (arg, 1),
2044 old0, new0, old1, new1),
2045 eval_subst (TREE_OPERAND (arg, 2),
2046 old0, new0, old1, new1)));
2051 tree arg0 = TREE_OPERAND (arg, 0);
2052 tree arg1 = TREE_OPERAND (arg, 1);
2054 /* We need to check both for exact equality and tree equality. The
2055 former will be true if the operand has a side-effect. In that
2056 case, we know the operand occurred exactly once. */
2058 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
2060 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
2063 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2065 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2068 return fold (build (code, type, arg0, arg1));
2075 /* Return a tree for the case when the result of an expression is RESULT
2076 converted to TYPE and OMITTED was previously an operand of the expression
2077 but is now not needed (e.g., we folded OMITTED * 0).
2079 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2080 the conversion of RESULT to TYPE. */
2083 omit_one_operand (type, result, omitted)
2084 tree type, result, omitted;
2086 tree t = convert (type, result);
2088 if (TREE_SIDE_EFFECTS (omitted))
2089 return build (COMPOUND_EXPR, type, omitted, t);
2091 return non_lvalue (t);
2094 /* Return a simplified tree node for the truth-negation of ARG. This
2095 never alters ARG itself. We assume that ARG is an operation that
2096 returns a truth value (0 or 1). */
2099 invert_truthvalue (arg)
2102 tree type = TREE_TYPE (arg);
2103 enum tree_code code = TREE_CODE (arg);
2105 if (code == ERROR_MARK)
2108 /* If this is a comparison, we can simply invert it, except for
2109 floating-point non-equality comparisons, in which case we just
2110 enclose a TRUTH_NOT_EXPR around what we have. */
2112 if (TREE_CODE_CLASS (code) == '<')
2114 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2115 && code != NE_EXPR && code != EQ_EXPR)
2116 return build1 (TRUTH_NOT_EXPR, type, arg);
2118 return build (invert_tree_comparison (code), type,
2119 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2125 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2126 && TREE_INT_CST_HIGH (arg) == 0, 0));
2128 case TRUTH_AND_EXPR:
2129 return build (TRUTH_OR_EXPR, type,
2130 invert_truthvalue (TREE_OPERAND (arg, 0)),
2131 invert_truthvalue (TREE_OPERAND (arg, 1)));
2134 return build (TRUTH_AND_EXPR, type,
2135 invert_truthvalue (TREE_OPERAND (arg, 0)),
2136 invert_truthvalue (TREE_OPERAND (arg, 1)));
2138 case TRUTH_XOR_EXPR:
2139 /* Here we can invert either operand. We invert the first operand
2140 unless the second operand is a TRUTH_NOT_EXPR in which case our
2141 result is the XOR of the first operand with the inside of the
2142 negation of the second operand. */
2144 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2145 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2146 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2148 return build (TRUTH_XOR_EXPR, type,
2149 invert_truthvalue (TREE_OPERAND (arg, 0)),
2150 TREE_OPERAND (arg, 1));
2152 case TRUTH_ANDIF_EXPR:
2153 return build (TRUTH_ORIF_EXPR, type,
2154 invert_truthvalue (TREE_OPERAND (arg, 0)),
2155 invert_truthvalue (TREE_OPERAND (arg, 1)));
2157 case TRUTH_ORIF_EXPR:
2158 return build (TRUTH_ANDIF_EXPR, type,
2159 invert_truthvalue (TREE_OPERAND (arg, 0)),
2160 invert_truthvalue (TREE_OPERAND (arg, 1)));
2162 case TRUTH_NOT_EXPR:
2163 return TREE_OPERAND (arg, 0);
2166 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2167 invert_truthvalue (TREE_OPERAND (arg, 1)),
2168 invert_truthvalue (TREE_OPERAND (arg, 2)));
2171 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2172 invert_truthvalue (TREE_OPERAND (arg, 1)));
2174 case NON_LVALUE_EXPR:
2175 return invert_truthvalue (TREE_OPERAND (arg, 0));
2180 return build1 (TREE_CODE (arg), type,
2181 invert_truthvalue (TREE_OPERAND (arg, 0)));
2184 if (! integer_onep (TREE_OPERAND (arg, 1)))
2186 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2192 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2193 operands are another bit-wise operation with a common input. If so,
2194 distribute the bit operations to save an operation and possibly two if
2195 constants are involved. For example, convert
2196 (A | B) & (A | C) into A | (B & C)
2197 Further simplification will occur if B and C are constants.
2199 If this optimization cannot be done, 0 will be returned. */
2202 distribute_bit_expr (code, type, arg0, arg1)
2203 enum tree_code code;
2210 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2211 || TREE_CODE (arg0) == code
2212 || (TREE_CODE (arg0) != BIT_AND_EXPR
2213 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2216 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2218 common = TREE_OPERAND (arg0, 0);
2219 left = TREE_OPERAND (arg0, 1);
2220 right = TREE_OPERAND (arg1, 1);
2222 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2224 common = TREE_OPERAND (arg0, 0);
2225 left = TREE_OPERAND (arg0, 1);
2226 right = TREE_OPERAND (arg1, 0);
2228 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2230 common = TREE_OPERAND (arg0, 1);
2231 left = TREE_OPERAND (arg0, 0);
2232 right = TREE_OPERAND (arg1, 1);
2234 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2236 common = TREE_OPERAND (arg0, 1);
2237 left = TREE_OPERAND (arg0, 0);
2238 right = TREE_OPERAND (arg1, 0);
2243 return fold (build (TREE_CODE (arg0), type, common,
2244 fold (build (code, type, left, right))));
2247 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2248 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2251 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2254 int bitsize, bitpos;
2257 tree result = build (BIT_FIELD_REF, type, inner,
2258 size_int (bitsize), size_int (bitpos));
2260 TREE_UNSIGNED (result) = unsignedp;
2265 /* Optimize a bit-field compare.
2267 There are two cases: First is a compare against a constant and the
2268 second is a comparison of two items where the fields are at the same
2269 bit position relative to the start of a chunk (byte, halfword, word)
2270 large enough to contain it. In these cases we can avoid the shift
2271 implicit in bitfield extractions.
2273 For constants, we emit a compare of the shifted constant with the
2274 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2275 compared. For two fields at the same position, we do the ANDs with the
2276 similar mask and compare the result of the ANDs.
2278 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2279 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2280 are the left and right operands of the comparison, respectively.
2282 If the optimization described above can be done, we return the resulting
2283 tree. Otherwise we return zero. */
2286 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2287 enum tree_code code;
2291 int lbitpos, lbitsize, rbitpos, rbitsize;
2292 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2293 tree type = TREE_TYPE (lhs);
2294 tree signed_type, unsigned_type;
2295 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2296 enum machine_mode lmode, rmode, lnmode, rnmode;
2297 int lunsignedp, runsignedp;
2298 int lvolatilep = 0, rvolatilep = 0;
2299 tree linner, rinner;
2303 /* Get all the information about the extractions being done. If the bit size
2304 if the same as the size of the underlying object, we aren't doing an
2305 extraction at all and so can do nothing. */
2306 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2307 &lunsignedp, &lvolatilep);
2308 if (lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2314 /* If this is not a constant, we can only do something if bit positions,
2315 sizes, and signedness are the same. */
2316 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2317 &rmode, &runsignedp, &rvolatilep);
2319 if (lbitpos != rbitpos || lbitsize != rbitsize
2320 || lunsignedp != runsignedp || offset != 0)
2324 /* See if we can find a mode to refer to this field. We should be able to,
2325 but fail if we can't. */
2326 lnmode = get_best_mode (lbitsize, lbitpos,
2327 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2329 if (lnmode == VOIDmode)
2332 /* Set signed and unsigned types of the precision of this mode for the
2334 signed_type = type_for_mode (lnmode, 0);
2335 unsigned_type = type_for_mode (lnmode, 1);
2339 rnmode = get_best_mode (rbitsize, rbitpos,
2340 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2342 if (rnmode == VOIDmode)
2346 /* Compute the bit position and size for the new reference and our offset
2347 within it. If the new reference is the same size as the original, we
2348 won't optimize anything, so return zero. */
2349 lnbitsize = GET_MODE_BITSIZE (lnmode);
2350 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2351 lbitpos -= lnbitpos;
2352 if (lnbitsize == lbitsize)
2357 rnbitsize = GET_MODE_BITSIZE (rnmode);
2358 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2359 rbitpos -= rnbitpos;
2360 if (rnbitsize == rbitsize)
2364 #if BYTES_BIG_ENDIAN
2365 lbitpos = lnbitsize - lbitsize - lbitpos;
2368 /* Make the mask to be used against the extracted field. */
2369 mask = build_int_2 (~0, ~0);
2370 TREE_TYPE (mask) = unsigned_type;
2371 force_fit_type (mask, 0);
2372 mask = convert (unsigned_type, mask);
2373 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2374 mask = const_binop (RSHIFT_EXPR, mask,
2375 size_int (lnbitsize - lbitsize - lbitpos), 0);
2378 /* If not comparing with constant, just rework the comparison
2380 return build (code, compare_type,
2381 build (BIT_AND_EXPR, unsigned_type,
2382 make_bit_field_ref (linner, unsigned_type,
2383 lnbitsize, lnbitpos, 1),
2385 build (BIT_AND_EXPR, unsigned_type,
2386 make_bit_field_ref (rinner, unsigned_type,
2387 rnbitsize, rnbitpos, 1),
2390 /* Otherwise, we are handling the constant case. See if the constant is too
2391 big for the field. Warn and return a tree of for 0 (false) if so. We do
2392 this not only for its own sake, but to avoid having to test for this
2393 error case below. If we didn't, we might generate wrong code.
2395 For unsigned fields, the constant shifted right by the field length should
2396 be all zero. For signed fields, the high-order bits should agree with
2401 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2402 convert (unsigned_type, rhs),
2403 size_int (lbitsize), 0)))
2405 warning ("comparison is always %s due to width of bitfield",
2406 code == NE_EXPR ? "one" : "zero");
2407 return convert (compare_type,
2409 ? integer_one_node : integer_zero_node));
2414 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2415 size_int (lbitsize - 1), 0);
2416 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2418 warning ("comparison is always %s due to width of bitfield",
2419 code == NE_EXPR ? "one" : "zero");
2420 return convert (compare_type,
2422 ? integer_one_node : integer_zero_node));
2426 /* Single-bit compares should always be against zero. */
2427 if (lbitsize == 1 && ! integer_zerop (rhs))
2429 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2430 rhs = convert (type, integer_zero_node);
2433 /* Make a new bitfield reference, shift the constant over the
2434 appropriate number of bits and mask it with the computed mask
2435 (in case this was a signed field). If we changed it, make a new one. */
2436 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2439 TREE_SIDE_EFFECTS (lhs) = 1;
2440 TREE_THIS_VOLATILE (lhs) = 1;
2443 rhs = fold (const_binop (BIT_AND_EXPR,
2444 const_binop (LSHIFT_EXPR,
2445 convert (unsigned_type, rhs),
2446 size_int (lbitpos), 0),
2449 return build (code, compare_type,
2450 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2454 /* Subroutine for fold_truthop: decode a field reference.
2456 If EXP is a comparison reference, we return the innermost reference.
2458 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2459 set to the starting bit number.
2461 If the innermost field can be completely contained in a mode-sized
2462 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2464 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2465 otherwise it is not changed.
2467 *PUNSIGNEDP is set to the signedness of the field.
2469 *PMASK is set to the mask used. This is either contained in a
2470 BIT_AND_EXPR or derived from the width of the field.
2472 Return 0 if this is not a component reference or is one that we can't
2473 do anything with. */
2476 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2479 int *pbitsize, *pbitpos;
2480 enum machine_mode *pmode;
2481 int *punsignedp, *pvolatilep;
2488 /* All the optimizations using this function assume integer fields.
2489 There are problems with FP fields since the type_for_size call
2490 below can fail for, e.g., XFmode. */
2491 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2496 if (TREE_CODE (exp) == BIT_AND_EXPR)
2498 mask = TREE_OPERAND (exp, 1);
2499 exp = TREE_OPERAND (exp, 0);
2500 STRIP_NOPS (exp); STRIP_NOPS (mask);
2501 if (TREE_CODE (mask) != INTEGER_CST)
2505 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2506 && TREE_CODE (exp) != BIT_FIELD_REF)
2509 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2510 punsignedp, pvolatilep);
2511 if (*pbitsize < 0 || offset != 0)
2516 tree unsigned_type = type_for_size (*pbitsize, 1);
2517 int precision = TYPE_PRECISION (unsigned_type);
2519 mask = build_int_2 (~0, ~0);
2520 TREE_TYPE (mask) = unsigned_type;
2521 force_fit_type (mask, 0);
2522 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2523 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2530 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2534 all_ones_mask_p (mask, size)
2538 tree type = TREE_TYPE (mask);
2539 int precision = TYPE_PRECISION (type);
2542 tmask = build_int_2 (~0, ~0);
2543 TREE_TYPE (tmask) = signed_type (type);
2544 force_fit_type (tmask, 0);
2546 operand_equal_p (mask,
2547 const_binop (RSHIFT_EXPR,
2548 const_binop (LSHIFT_EXPR, tmask,
2549 size_int (precision - size), 0),
2550 size_int (precision - size), 0),
2554 /* Subroutine for fold_truthop: determine if an operand is simple enough
2555 to be evaluated unconditionally. */
2558 simple_operand_p (exp)
2561 /* Strip any conversions that don't change the machine mode. */
2562 while ((TREE_CODE (exp) == NOP_EXPR
2563 || TREE_CODE (exp) == CONVERT_EXPR)
2564 && (TYPE_MODE (TREE_TYPE (exp))
2565 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2566 exp = TREE_OPERAND (exp, 0);
2568 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2569 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2570 && ! TREE_ADDRESSABLE (exp)
2571 && ! TREE_THIS_VOLATILE (exp)
2572 && ! DECL_NONLOCAL (exp)
2573 /* Don't regard global variables as simple. They may be
2574 allocated in ways unknown to the compiler (shared memory,
2575 #pragma weak, etc). */
2576 && ! TREE_PUBLIC (exp)
2577 && ! DECL_EXTERNAL (exp)
2578 /* Loading a static variable is unduly expensive, but global
2579 registers aren't expensive. */
2580 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2583 /* Subroutine for fold_truthop: try to optimize a range test.
2585 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2587 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2588 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2589 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2592 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2593 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2594 larger than HI_CST (they may be equal).
2596 We return the simplified tree or 0 if no optimization is possible. */
2599 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2600 enum tree_code jcode, lo_code, hi_code;
2601 tree type, var, lo_cst, hi_cst;
2604 enum tree_code rcode;
2606 /* See if this is a range test and normalize the constant terms. */
2608 if (jcode == TRUTH_AND_EXPR)
2613 /* See if we have VAR != CST && VAR != CST+1. */
2614 if (! (hi_code == NE_EXPR
2615 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2616 && tree_int_cst_equal (integer_one_node,
2617 const_binop (MINUS_EXPR,
2618 hi_cst, lo_cst, 0))))
2626 if (hi_code == LT_EXPR)
2627 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2628 else if (hi_code != LE_EXPR)
2631 if (lo_code == GT_EXPR)
2632 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2634 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2647 /* See if we have VAR == CST || VAR == CST+1. */
2648 if (! (hi_code == EQ_EXPR
2649 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2650 && tree_int_cst_equal (integer_one_node,
2651 const_binop (MINUS_EXPR,
2652 hi_cst, lo_cst, 0))))
2660 if (hi_code == GE_EXPR)
2661 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2662 else if (hi_code != GT_EXPR)
2665 if (lo_code == LE_EXPR)
2666 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2668 /* We now have VAR < LO_CST || VAR > HI_CST. */
2677 /* When normalizing, it is possible to both increment the smaller constant
2678 and decrement the larger constant. See if they are still ordered. */
2679 if (tree_int_cst_lt (hi_cst, lo_cst))
2682 /* Fail if VAR isn't an integer. */
2683 utype = TREE_TYPE (var);
2684 if (! INTEGRAL_TYPE_P (utype))
2687 /* The range test is invalid if subtracting the two constants results
2688 in overflow. This can happen in traditional mode. */
2689 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2690 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2693 if (! TREE_UNSIGNED (utype))
2695 utype = unsigned_type (utype);
2696 var = convert (utype, var);
2697 lo_cst = convert (utype, lo_cst);
2698 hi_cst = convert (utype, hi_cst);
2701 return fold (convert (type,
2702 build (rcode, utype,
2703 build (MINUS_EXPR, utype, var, lo_cst),
2704 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2707 /* Find ways of folding logical expressions of LHS and RHS:
2708 Try to merge two comparisons to the same innermost item.
2709 Look for range tests like "ch >= '0' && ch <= '9'".
2710 Look for combinations of simple terms on machines with expensive branches
2711 and evaluate the RHS unconditionally.
2713 For example, if we have p->a == 2 && p->b == 4 and we can make an
2714 object large enough to span both A and B, we can do this with a comparison
2715 against the object ANDed with the a mask.
2717 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2718 operations to do this with one comparison.
2720 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2721 function and the one above.
2723 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2724 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2726 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2729 We return the simplified tree or 0 if no optimization is possible. */
2732 fold_truthop (code, truth_type, lhs, rhs)
2733 enum tree_code code;
2734 tree truth_type, lhs, rhs;
2736 /* If this is the "or" of two comparisons, we can do something if we
2737 the comparisons are NE_EXPR. If this is the "and", we can do something
2738 if the comparisons are EQ_EXPR. I.e.,
2739 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2741 WANTED_CODE is this operation code. For single bit fields, we can
2742 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2743 comparison for one-bit fields. */
2745 enum tree_code wanted_code;
2746 enum tree_code lcode, rcode;
2747 tree ll_arg, lr_arg, rl_arg, rr_arg;
2748 tree ll_inner, lr_inner, rl_inner, rr_inner;
2749 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2750 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2751 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2752 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2753 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2754 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2755 enum machine_mode lnmode, rnmode;
2756 tree ll_mask, lr_mask, rl_mask, rr_mask;
2757 tree l_const, r_const;
2759 int first_bit, end_bit;
2762 /* Start by getting the comparison codes and seeing if this looks like
2763 a range test. Fail if anything is volatile. If one operand is a
2764 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2767 if (TREE_SIDE_EFFECTS (lhs)
2768 || TREE_SIDE_EFFECTS (rhs))
2771 lcode = TREE_CODE (lhs);
2772 rcode = TREE_CODE (rhs);
2774 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2775 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2777 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2778 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2780 if (TREE_CODE_CLASS (lcode) != '<'
2781 || TREE_CODE_CLASS (rcode) != '<')
2784 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2785 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2787 ll_arg = TREE_OPERAND (lhs, 0);
2788 lr_arg = TREE_OPERAND (lhs, 1);
2789 rl_arg = TREE_OPERAND (rhs, 0);
2790 rr_arg = TREE_OPERAND (rhs, 1);
2792 if (TREE_CODE (lr_arg) == INTEGER_CST
2793 && TREE_CODE (rr_arg) == INTEGER_CST
2794 && operand_equal_p (ll_arg, rl_arg, 0))
2796 if (tree_int_cst_lt (lr_arg, rr_arg))
2797 result = range_test (code, truth_type, lcode, rcode,
2798 ll_arg, lr_arg, rr_arg);
2800 result = range_test (code, truth_type, rcode, lcode,
2801 ll_arg, rr_arg, lr_arg);
2803 /* If this isn't a range test, it also isn't a comparison that
2804 can be merged. However, it wins to evaluate the RHS unconditionally
2805 on machines with expensive branches. */
2807 if (result == 0 && BRANCH_COST >= 2)
2809 if (TREE_CODE (ll_arg) != VAR_DECL
2810 && TREE_CODE (ll_arg) != PARM_DECL)
2812 /* Avoid evaluating the variable part twice. */
2813 ll_arg = save_expr (ll_arg);
2814 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2815 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2817 return build (code, truth_type, lhs, rhs);
2822 /* If the RHS can be evaluated unconditionally and its operands are
2823 simple, it wins to evaluate the RHS unconditionally on machines
2824 with expensive branches. In this case, this isn't a comparison
2825 that can be merged. */
2827 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2828 are with zero (tmw). */
2830 if (BRANCH_COST >= 2
2831 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2832 && simple_operand_p (rl_arg)
2833 && simple_operand_p (rr_arg))
2834 return build (code, truth_type, lhs, rhs);
2836 /* See if the comparisons can be merged. Then get all the parameters for
2839 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2840 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2844 ll_inner = decode_field_reference (ll_arg,
2845 &ll_bitsize, &ll_bitpos, &ll_mode,
2846 &ll_unsignedp, &volatilep, &ll_mask);
2847 lr_inner = decode_field_reference (lr_arg,
2848 &lr_bitsize, &lr_bitpos, &lr_mode,
2849 &lr_unsignedp, &volatilep, &lr_mask);
2850 rl_inner = decode_field_reference (rl_arg,
2851 &rl_bitsize, &rl_bitpos, &rl_mode,
2852 &rl_unsignedp, &volatilep, &rl_mask);
2853 rr_inner = decode_field_reference (rr_arg,
2854 &rr_bitsize, &rr_bitpos, &rr_mode,
2855 &rr_unsignedp, &volatilep, &rr_mask);
2857 /* It must be true that the inner operation on the lhs of each
2858 comparison must be the same if we are to be able to do anything.
2859 Then see if we have constants. If not, the same must be true for
2861 if (volatilep || ll_inner == 0 || rl_inner == 0
2862 || ! operand_equal_p (ll_inner, rl_inner, 0))
2865 if (TREE_CODE (lr_arg) == INTEGER_CST
2866 && TREE_CODE (rr_arg) == INTEGER_CST)
2867 l_const = lr_arg, r_const = rr_arg;
2868 else if (lr_inner == 0 || rr_inner == 0
2869 || ! operand_equal_p (lr_inner, rr_inner, 0))
2872 l_const = r_const = 0;
2874 /* If either comparison code is not correct for our logical operation,
2875 fail. However, we can convert a one-bit comparison against zero into
2876 the opposite comparison against that bit being set in the field. */
2878 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2879 if (lcode != wanted_code)
2881 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2887 if (rcode != wanted_code)
2889 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2895 /* See if we can find a mode that contains both fields being compared on
2896 the left. If we can't, fail. Otherwise, update all constants and masks
2897 to be relative to a field of that size. */
2898 first_bit = MIN (ll_bitpos, rl_bitpos);
2899 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2900 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2901 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2903 if (lnmode == VOIDmode)
2906 lnbitsize = GET_MODE_BITSIZE (lnmode);
2907 lnbitpos = first_bit & ~ (lnbitsize - 1);
2908 type = type_for_size (lnbitsize, 1);
2909 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2911 #if BYTES_BIG_ENDIAN
2912 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2913 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2916 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2917 size_int (xll_bitpos), 0);
2918 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2919 size_int (xrl_bitpos), 0);
2921 /* Make sure the constants are interpreted as unsigned, so we
2922 don't have sign bits outside the range of their type. */
2926 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2927 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2928 size_int (xll_bitpos), 0);
2932 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2933 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2934 size_int (xrl_bitpos), 0);
2937 /* If the right sides are not constant, do the same for it. Also,
2938 disallow this optimization if a size or signedness mismatch occurs
2939 between the left and right sides. */
2942 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2943 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2944 /* Make sure the two fields on the right
2945 correspond to the left without being swapped. */
2946 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2949 first_bit = MIN (lr_bitpos, rr_bitpos);
2950 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2951 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2952 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2954 if (rnmode == VOIDmode)
2957 rnbitsize = GET_MODE_BITSIZE (rnmode);
2958 rnbitpos = first_bit & ~ (rnbitsize - 1);
2959 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2961 #if BYTES_BIG_ENDIAN
2962 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2963 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2966 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2967 size_int (xlr_bitpos), 0);
2968 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2969 size_int (xrr_bitpos), 0);
2971 /* Make a mask that corresponds to both fields being compared.
2972 Do this for both items being compared. If the masks agree,
2973 we can do this by masking both and comparing the masked
2975 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2976 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2977 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2979 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2980 ll_unsignedp || rl_unsignedp);
2981 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2982 lr_unsignedp || rr_unsignedp);
2983 if (! all_ones_mask_p (ll_mask, lnbitsize))
2985 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2986 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2988 return build (wanted_code, truth_type, lhs, rhs);
2991 /* There is still another way we can do something: If both pairs of
2992 fields being compared are adjacent, we may be able to make a wider
2993 field containing them both. */
2994 if ((ll_bitsize + ll_bitpos == rl_bitpos
2995 && lr_bitsize + lr_bitpos == rr_bitpos)
2996 || (ll_bitpos == rl_bitpos + rl_bitsize
2997 && lr_bitpos == rr_bitpos + rr_bitsize))
2998 return build (wanted_code, truth_type,
2999 make_bit_field_ref (ll_inner, type,
3000 ll_bitsize + rl_bitsize,
3001 MIN (ll_bitpos, rl_bitpos),
3003 make_bit_field_ref (lr_inner, type,
3004 lr_bitsize + rr_bitsize,
3005 MIN (lr_bitpos, rr_bitpos),
3011 /* Handle the case of comparisons with constants. If there is something in
3012 common between the masks, those bits of the constants must be the same.
3013 If not, the condition is always false. Test for this to avoid generating
3014 incorrect code below. */
3015 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
3016 if (! integer_zerop (result)
3017 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
3018 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
3020 if (wanted_code == NE_EXPR)
3022 warning ("`or' of unmatched not-equal tests is always 1");
3023 return convert (truth_type, integer_one_node);
3027 warning ("`and' of mutually exclusive equal-tests is always zero");
3028 return convert (truth_type, integer_zero_node);
3032 /* Construct the expression we will return. First get the component
3033 reference we will make. Unless the mask is all ones the width of
3034 that field, perform the mask operation. Then compare with the
3036 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
3037 ll_unsignedp || rl_unsignedp);
3039 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
3040 if (! all_ones_mask_p (ll_mask, lnbitsize))
3041 result = build (BIT_AND_EXPR, type, result, ll_mask);
3043 return build (wanted_code, truth_type, result,
3044 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
3047 /* Perform constant folding and related simplification of EXPR.
3048 The related simplifications include x*1 => x, x*0 => 0, etc.,
3049 and application of the associative law.
3050 NOP_EXPR conversions may be removed freely (as long as we
3051 are careful not to change the C type of the overall expression)
3052 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3053 but we can constant-fold them if they have constant operands. */
3059 register tree t = expr;
3060 tree t1 = NULL_TREE;
3062 tree type = TREE_TYPE (expr);
3063 register tree arg0, arg1;
3064 register enum tree_code code = TREE_CODE (t);
3068 /* WINS will be nonzero when the switch is done
3069 if all operands are constant. */
3073 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3074 if (code == RTL_EXPR)
3077 /* Return right away if already constant. */
3078 if (TREE_CONSTANT (t))
3080 if (code == CONST_DECL)
3081 return DECL_INITIAL (t);
3085 kind = TREE_CODE_CLASS (code);
3086 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3090 /* Special case for conversion ops that can have fixed point args. */
3091 arg0 = TREE_OPERAND (t, 0);
3093 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3095 STRIP_TYPE_NOPS (arg0);
3097 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3098 subop = TREE_REALPART (arg0);
3102 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3103 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3104 && TREE_CODE (subop) != REAL_CST
3105 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3107 /* Note that TREE_CONSTANT isn't enough:
3108 static var addresses are constant but we can't
3109 do arithmetic on them. */
3112 else if (kind == 'e' || kind == '<'
3113 || kind == '1' || kind == '2' || kind == 'r')
3115 register int len = tree_code_length[(int) code];
3117 for (i = 0; i < len; i++)
3119 tree op = TREE_OPERAND (t, i);
3123 continue; /* Valid for CALL_EXPR, at least. */
3125 if (kind == '<' || code == RSHIFT_EXPR)
3127 /* Signedness matters here. Perhaps we can refine this
3129 STRIP_TYPE_NOPS (op);
3133 /* Strip any conversions that don't change the mode. */
3137 if (TREE_CODE (op) == COMPLEX_CST)
3138 subop = TREE_REALPART (op);
3142 if (TREE_CODE (subop) != INTEGER_CST
3143 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3144 && TREE_CODE (subop) != REAL_CST
3145 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3147 /* Note that TREE_CONSTANT isn't enough:
3148 static var addresses are constant but we can't
3149 do arithmetic on them. */
3159 /* If this is a commutative operation, and ARG0 is a constant, move it
3160 to ARG1 to reduce the number of tests below. */
3161 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3162 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3163 || code == BIT_AND_EXPR)
3164 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3166 tem = arg0; arg0 = arg1; arg1 = tem;
3168 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3169 TREE_OPERAND (t, 1) = tem;
3172 /* Now WINS is set as described above,
3173 ARG0 is the first operand of EXPR,
3174 and ARG1 is the second operand (if it has more than one operand).
3176 First check for cases where an arithmetic operation is applied to a
3177 compound, conditional, or comparison operation. Push the arithmetic
3178 operation inside the compound or conditional to see if any folding
3179 can then be done. Convert comparison to conditional for this purpose.
3180 The also optimizes non-constant cases that used to be done in
3183 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3184 one of the operands is a comparison and the other is either a comparison
3185 or a BIT_AND_EXPR with the constant 1. In that case, the code below
3186 would make the expression more complex. Change it to a
3187 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3188 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3190 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3191 || code == EQ_EXPR || code == NE_EXPR)
3192 && ((TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
3193 && (TREE_CODE_CLASS (TREE_CODE (arg1)) == '<'
3194 || (TREE_CODE (arg1) == BIT_AND_EXPR
3195 && integer_onep (TREE_OPERAND (arg1, 1)))))
3196 || (TREE_CODE_CLASS (TREE_CODE (arg1)) == '<'
3197 && (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
3198 || (TREE_CODE (arg0) == BIT_AND_EXPR
3199 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3201 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3202 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3206 if (code == EQ_EXPR)
3207 t = invert_truthvalue (t);
3212 if (TREE_CODE_CLASS (code) == '1')
3214 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3215 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3216 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3217 else if (TREE_CODE (arg0) == COND_EXPR)
3219 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3220 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3221 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3223 /* If this was a conversion, and all we did was to move into
3224 inside the COND_EXPR, bring it back out. Then return so we
3225 don't get into an infinite recursion loop taking the conversion
3226 out and then back in. */
3228 if ((code == NOP_EXPR || code == CONVERT_EXPR
3229 || code == NON_LVALUE_EXPR)
3230 && TREE_CODE (t) == COND_EXPR
3231 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3232 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3233 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3234 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
3235 t = build1 (code, type,
3237 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3238 TREE_OPERAND (t, 0),
3239 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3240 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3243 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3244 return fold (build (COND_EXPR, type, arg0,
3245 fold (build1 (code, type, integer_one_node)),
3246 fold (build1 (code, type, integer_zero_node))));
3248 else if (TREE_CODE_CLASS (code) == '2'
3249 || TREE_CODE_CLASS (code) == '<')
3251 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3252 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3253 fold (build (code, type,
3254 arg0, TREE_OPERAND (arg1, 1))));
3255 else if (TREE_CODE (arg1) == COND_EXPR
3256 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3258 tree test, true_value, false_value;
3260 if (TREE_CODE (arg1) == COND_EXPR)
3262 test = TREE_OPERAND (arg1, 0);
3263 true_value = TREE_OPERAND (arg1, 1);
3264 false_value = TREE_OPERAND (arg1, 2);
3269 true_value = integer_one_node;
3270 false_value = integer_zero_node;
3273 /* If ARG0 is complex we want to make sure we only evaluate
3274 it once. Though this is only required if it is volatile, it
3275 might be more efficient even if it is not. However, if we
3276 succeed in folding one part to a constant, we do not need
3277 to make this SAVE_EXPR. Since we do this optimization
3278 primarily to see if we do end up with constant and this
3279 SAVE_EXPR interfers with later optimizations, suppressing
3280 it when we can is important. */
3282 if ((TREE_CODE (arg0) != VAR_DECL && TREE_CODE (arg0) != PARM_DECL)
3283 || TREE_SIDE_EFFECTS (arg0))
3285 tree lhs = fold (build (code, type, arg0, true_value));
3286 tree rhs = fold (build (code, type, arg0, false_value));
3288 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3289 return fold (build (COND_EXPR, type, test, lhs, rhs));
3291 arg0 = save_expr (arg0);
3294 test = fold (build (COND_EXPR, type, test,
3295 fold (build (code, type, arg0, true_value)),
3296 fold (build (code, type, arg0, false_value))));
3297 if (TREE_CODE (arg0) == SAVE_EXPR)
3298 return build (COMPOUND_EXPR, type,
3299 convert (void_type_node, arg0), test);
3301 return convert (type, test);
3304 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3305 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3306 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3307 else if (TREE_CODE (arg0) == COND_EXPR
3308 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3310 tree test, true_value, false_value;
3312 if (TREE_CODE (arg0) == COND_EXPR)
3314 test = TREE_OPERAND (arg0, 0);
3315 true_value = TREE_OPERAND (arg0, 1);
3316 false_value = TREE_OPERAND (arg0, 2);
3321 true_value = integer_one_node;
3322 false_value = integer_zero_node;
3325 if ((TREE_CODE (arg1) != VAR_DECL && TREE_CODE (arg1) != PARM_DECL)
3326 || TREE_SIDE_EFFECTS (arg1))
3328 tree lhs = fold (build (code, type, true_value, arg1));
3329 tree rhs = fold (build (code, type, false_value, arg1));
3331 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3332 return fold (build (COND_EXPR, type, test, lhs, rhs));
3334 arg1 = save_expr (arg1);
3337 test = fold (build (COND_EXPR, type, test,
3338 fold (build (code, type, true_value, arg1)),
3339 fold (build (code, type, false_value, arg1))));
3340 if (TREE_CODE (arg1) == SAVE_EXPR)
3341 return build (COMPOUND_EXPR, type,
3342 convert (void_type_node, arg1), test);
3344 return convert (type, test);
3347 else if (TREE_CODE_CLASS (code) == '<'
3348 && TREE_CODE (arg0) == COMPOUND_EXPR)
3349 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3350 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3351 else if (TREE_CODE_CLASS (code) == '<'
3352 && TREE_CODE (arg1) == COMPOUND_EXPR)
3353 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3354 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3366 return fold (DECL_INITIAL (t));
3371 case FIX_TRUNC_EXPR:
3372 /* Other kinds of FIX are not handled properly by fold_convert. */
3374 /* In addition to the cases of two conversions in a row
3375 handled below, if we are converting something to its own
3376 type via an object of identical or wider precision, neither
3377 conversion is needed. */
3378 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3379 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3380 && TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == TREE_TYPE (t)
3381 && ((INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3382 && INTEGRAL_TYPE_P (TREE_TYPE (t)))
3383 || (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3384 && FLOAT_TYPE_P (TREE_TYPE (t))))
3385 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3386 >= TYPE_PRECISION (TREE_TYPE (t))))
3387 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3389 /* Two conversions in a row are not needed unless:
3390 - the intermediate type is narrower than both initial and final, or
3391 - the intermediate type and innermost type differ in signedness,
3392 and the outermost type is wider than the intermediate, or
3393 - the initial type is a pointer type and the precisions of the
3394 intermediate and final types differ, or
3395 - the final type is a pointer type and the precisions of the
3396 initial and intermediate types differ. */
3397 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3398 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3399 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3400 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3402 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3403 > TYPE_PRECISION (TREE_TYPE (t)))
3404 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3406 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3408 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3409 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3410 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3411 < TYPE_PRECISION (TREE_TYPE (t))))
3412 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3413 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3414 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3416 (TREE_UNSIGNED (TREE_TYPE (t))
3417 && (TYPE_PRECISION (TREE_TYPE (t))
3418 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3419 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3421 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3422 != TYPE_PRECISION (TREE_TYPE (t))))
3423 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3424 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3425 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3426 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3428 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3429 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3430 /* Detect assigning a bitfield. */
3431 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3432 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3434 /* Don't leave an assignment inside a conversion
3435 unless assigning a bitfield. */
3436 tree prev = TREE_OPERAND (t, 0);
3437 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3438 /* First do the assignment, then return converted constant. */
3439 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3445 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3448 return fold_convert (t, arg0);
3450 #if 0 /* This loses on &"foo"[0]. */
3455 /* Fold an expression like: "foo"[2] */
3456 if (TREE_CODE (arg0) == STRING_CST
3457 && TREE_CODE (arg1) == INTEGER_CST
3458 && !TREE_INT_CST_HIGH (arg1)
3459 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3461 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3462 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3463 force_fit_type (t, 0);
3470 TREE_CONSTANT (t) = wins;
3476 if (TREE_CODE (arg0) == INTEGER_CST)
3478 HOST_WIDE_INT low, high;
3479 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3480 TREE_INT_CST_HIGH (arg0),
3482 t = build_int_2 (low, high);
3483 TREE_TYPE (t) = type;
3485 = (TREE_OVERFLOW (arg0)
3486 | force_fit_type (t, overflow));
3487 TREE_CONSTANT_OVERFLOW (t)
3488 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3490 else if (TREE_CODE (arg0) == REAL_CST)
3491 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3492 TREE_TYPE (t) = type;
3494 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3495 return TREE_OPERAND (arg0, 0);
3497 /* Convert - (a - b) to (b - a) for non-floating-point. */
3498 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3499 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3500 TREE_OPERAND (arg0, 0));
3507 if (TREE_CODE (arg0) == INTEGER_CST)
3509 if (! TREE_UNSIGNED (type)
3510 && TREE_INT_CST_HIGH (arg0) < 0)
3512 HOST_WIDE_INT low, high;
3513 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3514 TREE_INT_CST_HIGH (arg0),
3516 t = build_int_2 (low, high);
3517 TREE_TYPE (t) = type;
3519 = (TREE_OVERFLOW (arg0)
3520 | force_fit_type (t, overflow));
3521 TREE_CONSTANT_OVERFLOW (t)
3522 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3525 else if (TREE_CODE (arg0) == REAL_CST)
3527 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3528 t = build_real (type,
3529 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3531 TREE_TYPE (t) = type;
3533 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3534 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3538 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3540 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3541 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3542 TREE_OPERAND (arg0, 0),
3543 fold (build1 (NEGATE_EXPR,
3544 TREE_TYPE (TREE_TYPE (arg0)),
3545 TREE_OPERAND (arg0, 1))));
3546 else if (TREE_CODE (arg0) == COMPLEX_CST)
3547 return build_complex (TREE_OPERAND (arg0, 0),
3548 fold (build1 (NEGATE_EXPR,
3549 TREE_TYPE (TREE_TYPE (arg0)),
3550 TREE_OPERAND (arg0, 1))));
3551 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3552 return fold (build (TREE_CODE (arg0), type,
3553 fold (build1 (CONJ_EXPR, type,
3554 TREE_OPERAND (arg0, 0))),
3555 fold (build1 (CONJ_EXPR,
3556 type, TREE_OPERAND (arg0, 1)))));
3557 else if (TREE_CODE (arg0) == CONJ_EXPR)
3558 return TREE_OPERAND (arg0, 0);
3564 if (TREE_CODE (arg0) == INTEGER_CST)
3565 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3566 ~ TREE_INT_CST_HIGH (arg0));
3567 TREE_TYPE (t) = type;
3568 force_fit_type (t, 0);
3569 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3570 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3572 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3573 return TREE_OPERAND (arg0, 0);
3577 /* A + (-B) -> A - B */
3578 if (TREE_CODE (arg1) == NEGATE_EXPR)
3579 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3580 else if (! FLOAT_TYPE_P (type))
3582 if (integer_zerop (arg1))
3583 return non_lvalue (convert (type, arg0));
3585 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3586 with a constant, and the two constants have no bits in common,
3587 we should treat this as a BIT_IOR_EXPR since this may produce more
3589 if (TREE_CODE (arg0) == BIT_AND_EXPR
3590 && TREE_CODE (arg1) == BIT_AND_EXPR
3591 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3592 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3593 && integer_zerop (const_binop (BIT_AND_EXPR,
3594 TREE_OPERAND (arg0, 1),
3595 TREE_OPERAND (arg1, 1), 0)))
3597 code = BIT_IOR_EXPR;
3601 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3602 about the case where C is a constant, just try one of the
3603 four possibilities. */
3605 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3606 && operand_equal_p (TREE_OPERAND (arg0, 1),
3607 TREE_OPERAND (arg1, 1), 0))
3608 return fold (build (MULT_EXPR, type,
3609 fold (build (PLUS_EXPR, type,
3610 TREE_OPERAND (arg0, 0),
3611 TREE_OPERAND (arg1, 0))),
3612 TREE_OPERAND (arg0, 1)));
3614 /* In IEEE floating point, x+0 may not equal x. */
3615 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3616 && real_zerop (arg1))
3617 return non_lvalue (convert (type, arg0));
3619 /* In most languages, can't associate operations on floats
3620 through parentheses. Rather than remember where the parentheses
3621 were, we don't associate floats at all. It shouldn't matter much. */
3622 if (FLOAT_TYPE_P (type))
3624 /* The varsign == -1 cases happen only for addition and subtraction.
3625 It says that the arg that was split was really CON minus VAR.
3626 The rest of the code applies to all associative operations. */
3632 if (split_tree (arg0, code, &var, &con, &varsign))
3636 /* EXPR is (CON-VAR) +- ARG1. */
3637 /* If it is + and VAR==ARG1, return just CONST. */
3638 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3639 return convert (TREE_TYPE (t), con);
3641 /* If ARG0 is a constant, don't change things around;
3642 instead keep all the constant computations together. */
3644 if (TREE_CONSTANT (arg0))
3647 /* Otherwise return (CON +- ARG1) - VAR. */
3648 TREE_SET_CODE (t, MINUS_EXPR);
3649 TREE_OPERAND (t, 1) = var;
3651 = fold (build (code, TREE_TYPE (t), con, arg1));
3655 /* EXPR is (VAR+CON) +- ARG1. */
3656 /* If it is - and VAR==ARG1, return just CONST. */
3657 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3658 return convert (TREE_TYPE (t), con);
3660 /* If ARG0 is a constant, don't change things around;
3661 instead keep all the constant computations together. */
3663 if (TREE_CONSTANT (arg0))
3666 /* Otherwise return VAR +- (ARG1 +- CON). */
3667 TREE_OPERAND (t, 1) = tem
3668 = fold (build (code, TREE_TYPE (t), arg1, con));
3669 TREE_OPERAND (t, 0) = var;
3670 if (integer_zerop (tem)
3671 && (code == PLUS_EXPR || code == MINUS_EXPR))
3672 return convert (type, var);
3673 /* If we have x +/- (c - d) [c an explicit integer]
3674 change it to x -/+ (d - c) since if d is relocatable
3675 then the latter can be a single immediate insn
3676 and the former cannot. */
3677 if (TREE_CODE (tem) == MINUS_EXPR
3678 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3680 tree tem1 = TREE_OPERAND (tem, 1);
3681 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3682 TREE_OPERAND (tem, 0) = tem1;
3684 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3690 if (split_tree (arg1, code, &var, &con, &varsign))
3692 /* EXPR is ARG0 +- (CON +- VAR). */
3693 if (TREE_CODE (t) == MINUS_EXPR
3694 && operand_equal_p (var, arg0, 0))
3696 /* If VAR and ARG0 cancel, return just CON or -CON. */
3697 if (code == PLUS_EXPR)
3698 return convert (TREE_TYPE (t), con);
3699 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3700 convert (TREE_TYPE (t), con)));
3702 if (TREE_CONSTANT (arg1))
3706 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3708 = fold (build (code, TREE_TYPE (t), arg0, con));
3709 TREE_OPERAND (t, 1) = var;
3710 if (integer_zerop (TREE_OPERAND (t, 0))
3711 && TREE_CODE (t) == PLUS_EXPR)
3712 return convert (TREE_TYPE (t), var);
3717 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3718 if (TREE_CODE (arg1) == REAL_CST)
3720 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3722 t1 = const_binop (code, arg0, arg1, 0);
3723 if (t1 != NULL_TREE)
3725 /* The return value should always have
3726 the same type as the original expression. */
3727 TREE_TYPE (t1) = TREE_TYPE (t);
3733 if (! FLOAT_TYPE_P (type))
3735 if (! wins && integer_zerop (arg0))
3736 return build1 (NEGATE_EXPR, type, arg1);
3737 if (integer_zerop (arg1))
3738 return non_lvalue (convert (type, arg0));
3740 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3741 about the case where C is a constant, just try one of the
3742 four possibilities. */
3744 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3745 && operand_equal_p (TREE_OPERAND (arg0, 1),
3746 TREE_OPERAND (arg1, 1), 0))
3747 return fold (build (MULT_EXPR, type,
3748 fold (build (MINUS_EXPR, type,
3749 TREE_OPERAND (arg0, 0),
3750 TREE_OPERAND (arg1, 0))),
3751 TREE_OPERAND (arg0, 1)));
3753 /* Convert A - (-B) to A + B. */
3754 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3755 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3756 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
3758 /* Except with IEEE floating point, 0-x equals -x. */
3759 if (! wins && real_zerop (arg0))
3760 return build1 (NEGATE_EXPR, type, arg1);
3761 /* Except with IEEE floating point, x-0 equals x. */
3762 if (real_zerop (arg1))
3763 return non_lvalue (convert (type, arg0));
3765 /* Fold &x - &x. This can happen from &x.foo - &x.
3766 This is unsafe for certain floats even in non-IEEE formats.
3767 In IEEE, it is unsafe because it does wrong for NaNs.
3768 Also note that operand_equal_p is always false if an operand
3771 if (operand_equal_p (arg0, arg1, FLOAT_TYPE_P (type)))
3772 return convert (type, integer_zero_node);
3777 if (! FLOAT_TYPE_P (type))
3779 if (integer_zerop (arg1))
3780 return omit_one_operand (type, arg1, arg0);
3781 if (integer_onep (arg1))
3782 return non_lvalue (convert (type, arg0));
3784 /* (a * (1 << b)) is (a << b) */
3785 if (TREE_CODE (arg1) == LSHIFT_EXPR
3786 && integer_onep (TREE_OPERAND (arg1, 0)))
3787 return fold (build (LSHIFT_EXPR, type, arg0,
3788 TREE_OPERAND (arg1, 1)));
3789 if (TREE_CODE (arg0) == LSHIFT_EXPR
3790 && integer_onep (TREE_OPERAND (arg0, 0)))
3791 return fold (build (LSHIFT_EXPR, type, arg1,
3792 TREE_OPERAND (arg0, 1)));
3796 /* x*0 is 0, except for IEEE floating point. */
3797 if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3798 && real_zerop (arg1))
3799 return omit_one_operand (type, arg1, arg0);
3800 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3801 However, ANSI says we can drop signals,
3802 so we can do this anyway. */
3803 if (real_onep (arg1))
3804 return non_lvalue (convert (type, arg0));
3806 if (! wins && real_twop (arg1))
3808 tree arg = save_expr (arg0);
3809 return build (PLUS_EXPR, type, arg, arg);
3816 if (integer_all_onesp (arg1))
3817 return omit_one_operand (type, arg1, arg0);
3818 if (integer_zerop (arg1))
3819 return non_lvalue (convert (type, arg0));
3820 t1 = distribute_bit_expr (code, type, arg0, arg1);
3821 if (t1 != NULL_TREE)
3824 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3825 is a rotate of A by C1 bits. */
3827 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3828 || TREE_CODE (arg0) == LSHIFT_EXPR)
3829 && (TREE_CODE (arg1) == RSHIFT_EXPR
3830 || TREE_CODE (arg1) == LSHIFT_EXPR)
3831 && TREE_CODE (arg0) != TREE_CODE (arg1)
3832 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3833 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3834 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3835 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3836 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3837 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3838 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3839 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3840 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3841 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3842 TREE_CODE (arg0) == LSHIFT_EXPR
3843 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3848 if (integer_zerop (arg1))
3849 return non_lvalue (convert (type, arg0));
3850 if (integer_all_onesp (arg1))
3851 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3856 if (integer_all_onesp (arg1))
3857 return non_lvalue (convert (type, arg0));
3858 if (integer_zerop (arg1))
3859 return omit_one_operand (type, arg1, arg0);
3860 t1 = distribute_bit_expr (code, type, arg0, arg1);
3861 if (t1 != NULL_TREE)
3863 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3864 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3865 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3867 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3868 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3869 && (~TREE_INT_CST_LOW (arg0)
3870 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3871 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3873 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3874 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3876 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3877 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3878 && (~TREE_INT_CST_LOW (arg1)
3879 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3880 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3884 case BIT_ANDTC_EXPR:
3885 if (integer_all_onesp (arg0))
3886 return non_lvalue (convert (type, arg1));
3887 if (integer_zerop (arg0))
3888 return omit_one_operand (type, arg0, arg1);
3889 if (TREE_CODE (arg1) == INTEGER_CST)
3891 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3892 code = BIT_AND_EXPR;
3897 case TRUNC_DIV_EXPR:
3898 case ROUND_DIV_EXPR:
3899 case FLOOR_DIV_EXPR:
3901 case EXACT_DIV_EXPR:
3903 if (integer_onep (arg1))
3904 return non_lvalue (convert (type, arg0));
3905 if (integer_zerop (arg1))
3908 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
3909 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
3910 expressions, which often appear in the offsets or sizes of
3911 objects with a varying size. Only deal with positive divisors
3914 Look for NOPs and SAVE_EXPRs inside. */
3916 if (TREE_CODE (arg1) == INTEGER_CST
3917 && tree_int_cst_lt (integer_zero_node, arg1))
3919 int have_save_expr = 0;
3920 tree c2 = integer_zero_node;
3923 if (TREE_CODE (xarg0) == SAVE_EXPR)
3924 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3928 if (TREE_CODE (xarg0) == PLUS_EXPR
3929 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3930 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
3931 else if (TREE_CODE (xarg0) == MINUS_EXPR
3932 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3934 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
3935 xarg0 = TREE_OPERAND (xarg0, 0);
3938 if (TREE_CODE (xarg0) == SAVE_EXPR)
3939 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3943 if (TREE_CODE (xarg0) == MULT_EXPR
3944 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3945 && tree_int_cst_lt (integer_zero_node, TREE_OPERAND (xarg0, 1))
3946 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
3947 TREE_OPERAND (xarg0, 1), arg1, 1))
3948 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
3949 TREE_OPERAND (xarg0, 1), 1))))
3951 tree outer_div = integer_one_node;
3952 tree c1 = TREE_OPERAND (xarg0, 1);
3955 /* If C3 > C1, set them equal and do a divide by
3956 C3/C1 at the end of the operation. */
3957 if (tree_int_cst_lt (c1, c3))
3958 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
3960 /* The result is A * (C1/C3) + (C2/C3). */
3961 t = fold (build (PLUS_EXPR, type,
3962 fold (build (MULT_EXPR, type,
3963 TREE_OPERAND (xarg0, 0),
3964 const_binop (code, c1, c3, 1))),
3965 const_binop (code, c2, c3, 1)));
3967 if (! integer_onep (outer_div))
3968 t = fold (build (code, type, t, outer_div));
3977 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3978 #ifndef REAL_INFINITY
3979 if (TREE_CODE (arg1) == REAL_CST
3980 && real_zerop (arg1))
3983 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3988 case FLOOR_MOD_EXPR:
3989 case ROUND_MOD_EXPR:
3990 case TRUNC_MOD_EXPR:
3991 if (integer_onep (arg1))
3992 return omit_one_operand (type, integer_zero_node, arg0);
3993 if (integer_zerop (arg1))
3996 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
3997 where C1 % C3 == 0. Handle similarly to the division case,
3998 but don't bother with SAVE_EXPRs. */
4000 if (TREE_CODE (arg1) == INTEGER_CST
4001 && ! integer_zerop (arg1))
4003 tree c2 = integer_zero_node;
4006 if (TREE_CODE (xarg0) == PLUS_EXPR
4007 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4008 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4009 else if (TREE_CODE (xarg0) == MINUS_EXPR
4010 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4012 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4013 xarg0 = TREE_OPERAND (xarg0, 0);
4018 if (TREE_CODE (xarg0) == MULT_EXPR
4019 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4020 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4021 TREE_OPERAND (xarg0, 1),
4023 /* The result is (C2%C3). */
4024 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4025 TREE_OPERAND (xarg0, 0));
4034 if (integer_zerop (arg1))
4035 return non_lvalue (convert (type, arg0));
4036 /* Since negative shift count is not well-defined,
4037 don't try to compute it in the compiler. */
4038 if (tree_int_cst_lt (arg1, integer_zero_node))
4043 if (operand_equal_p (arg0, arg1, 0))
4045 if (INTEGRAL_TYPE_P (type)
4046 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4047 return omit_one_operand (type, arg1, arg0);
4051 if (operand_equal_p (arg0, arg1, 0))
4053 if (INTEGRAL_TYPE_P (type)
4054 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4055 return omit_one_operand (type, arg1, arg0);
4058 case TRUTH_NOT_EXPR:
4059 /* Note that the operand of this must be an int
4060 and its values must be 0 or 1.
4061 ("true" is a fixed value perhaps depending on the language,
4062 but we don't handle values other than 1 correctly yet.) */
4063 return invert_truthvalue (arg0);
4065 case TRUTH_ANDIF_EXPR:
4066 /* Note that the operands of this must be ints
4067 and their values must be 0 or 1.
4068 ("true" is a fixed value perhaps depending on the language.) */
4069 /* If first arg is constant zero, return it. */
4070 if (integer_zerop (arg0))
4072 case TRUTH_AND_EXPR:
4073 /* If either arg is constant true, drop it. */
4074 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4075 return non_lvalue (arg1);
4076 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4077 return non_lvalue (arg0);
4078 /* If second arg is constant zero, result is zero, but first arg
4079 must be evaluated. */
4080 if (integer_zerop (arg1))
4081 return omit_one_operand (type, arg1, arg0);
4084 /* Check for the possibility of merging component references. If our
4085 lhs is another similar operation, try to merge its rhs with our
4086 rhs. Then try to merge our lhs and rhs. */
4089 if (TREE_CODE (arg0) == code)
4091 tem = fold_truthop (code, type,
4092 TREE_OPERAND (arg0, 1), arg1);
4094 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4097 tem = fold_truthop (code, type, arg0, arg1);
4103 case TRUTH_ORIF_EXPR:
4104 /* Note that the operands of this must be ints
4105 and their values must be 0 or true.
4106 ("true" is a fixed value perhaps depending on the language.) */
4107 /* If first arg is constant true, return it. */
4108 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4111 /* If either arg is constant zero, drop it. */
4112 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4113 return non_lvalue (arg1);
4114 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4115 return non_lvalue (arg0);
4116 /* If second arg is constant true, result is true, but we must
4117 evaluate first arg. */
4118 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4119 return omit_one_operand (type, arg1, arg0);
4122 case TRUTH_XOR_EXPR:
4123 /* If either arg is constant zero, drop it. */
4124 if (integer_zerop (arg0))
4125 return non_lvalue (arg1);
4126 if (integer_zerop (arg1))
4127 return non_lvalue (arg0);
4128 /* If either arg is constant true, this is a logical inversion. */
4129 if (integer_onep (arg0))
4130 return non_lvalue (invert_truthvalue (arg1));
4131 if (integer_onep (arg1))
4132 return non_lvalue (invert_truthvalue (arg0));
4141 /* If one arg is a constant integer, put it last. */
4142 if (TREE_CODE (arg0) == INTEGER_CST
4143 && TREE_CODE (arg1) != INTEGER_CST)
4145 TREE_OPERAND (t, 0) = arg1;
4146 TREE_OPERAND (t, 1) = arg0;
4147 arg0 = TREE_OPERAND (t, 0);
4148 arg1 = TREE_OPERAND (t, 1);
4149 code = swap_tree_comparison (code);
4150 TREE_SET_CODE (t, code);
4153 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4154 First, see if one arg is constant; find the constant arg
4155 and the other one. */
4157 tree constop = 0, varop;
4160 if (TREE_CONSTANT (arg1))
4161 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
4162 if (TREE_CONSTANT (arg0))
4163 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
4165 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4167 /* This optimization is invalid for ordered comparisons
4168 if CONST+INCR overflows or if foo+incr might overflow.
4169 This optimization is invalid for floating point due to rounding.
4170 For pointer types we assume overflow doesn't happen. */
4171 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4172 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4173 && (code == EQ_EXPR || code == NE_EXPR)))
4176 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4177 constop, TREE_OPERAND (varop, 1)));
4178 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4179 *constoploc = newconst;
4183 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4185 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4186 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4187 && (code == EQ_EXPR || code == NE_EXPR)))
4190 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4191 constop, TREE_OPERAND (varop, 1)));
4192 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4193 *constoploc = newconst;
4199 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4200 if (TREE_CODE (arg1) == INTEGER_CST
4201 && TREE_CODE (arg0) != INTEGER_CST
4202 && ! tree_int_cst_lt (arg1, integer_one_node))
4204 switch (TREE_CODE (t))
4208 TREE_SET_CODE (t, code);
4209 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4210 TREE_OPERAND (t, 1) = arg1;
4215 TREE_SET_CODE (t, code);
4216 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4217 TREE_OPERAND (t, 1) = arg1;
4221 /* If this is an EQ or NE comparison with zero and ARG0 is
4222 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4223 two operations, but the latter can be done in one less insn
4224 one machine that have only two-operand insns or on which a
4225 constant cannot be the first operand. */
4226 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4227 && TREE_CODE (arg0) == BIT_AND_EXPR)
4229 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4230 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4232 fold (build (code, type,
4233 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4235 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4236 TREE_OPERAND (arg0, 1),
4237 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4238 convert (TREE_TYPE (arg0),
4241 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4242 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4244 fold (build (code, type,
4245 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4247 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4248 TREE_OPERAND (arg0, 0),
4249 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4250 convert (TREE_TYPE (arg0),
4255 /* If this is an NE or EQ comparison of zero against the result of a
4256 signed MOD operation whose second operand is a power of 2, make
4257 the MOD operation unsigned since it is simpler and equivalent. */
4258 if ((code == NE_EXPR || code == EQ_EXPR)
4259 && integer_zerop (arg1)
4260 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4261 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4262 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4263 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4264 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4265 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4267 tree newtype = unsigned_type (TREE_TYPE (arg0));
4268 tree newmod = build (TREE_CODE (arg0), newtype,
4269 convert (newtype, TREE_OPERAND (arg0, 0)),
4270 convert (newtype, TREE_OPERAND (arg0, 1)));
4272 return build (code, type, newmod, convert (newtype, arg1));
4275 /* If this is an NE comparison of zero with an AND of one, remove the
4276 comparison since the AND will give the correct value. */
4277 if (code == NE_EXPR && integer_zerop (arg1)
4278 && TREE_CODE (arg0) == BIT_AND_EXPR
4279 && integer_onep (TREE_OPERAND (arg0, 1)))
4280 return convert (type, arg0);
4282 /* If we have (A & C) == C where C is a power of 2, convert this into
4283 (A & C) != 0. Similarly for NE_EXPR. */
4284 if ((code == EQ_EXPR || code == NE_EXPR)
4285 && TREE_CODE (arg0) == BIT_AND_EXPR
4286 && integer_pow2p (TREE_OPERAND (arg0, 1))
4287 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4288 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4289 arg0, integer_zero_node);
4291 /* Simplify comparison of something with itself. (For IEEE
4292 floating-point, we can only do some of these simplifications.) */
4293 if (operand_equal_p (arg0, arg1, 0))
4300 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4302 t = build_int_2 (1, 0);
4303 TREE_TYPE (t) = type;
4307 TREE_SET_CODE (t, code);
4311 /* For NE, we can only do this simplification if integer. */
4312 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4314 /* ... fall through ... */
4317 t = build_int_2 (0, 0);
4318 TREE_TYPE (t) = type;
4323 /* An unsigned comparison against 0 can be simplified. */
4324 if (integer_zerop (arg1)
4325 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4326 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4327 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4329 switch (TREE_CODE (t))
4333 TREE_SET_CODE (t, NE_EXPR);
4337 TREE_SET_CODE (t, EQ_EXPR);
4340 return omit_one_operand (type,
4341 convert (type, integer_one_node),
4344 return omit_one_operand (type,
4345 convert (type, integer_zero_node),
4350 /* If we are comparing an expression that just has comparisons
4351 of two integer values, arithmetic expressions of those comparisons,
4352 and constants, we can simplify it. There are only three cases
4353 to check: the two values can either be equal, the first can be
4354 greater, or the second can be greater. Fold the expression for
4355 those three values. Since each value must be 0 or 1, we have
4356 eight possibilities, each of which corresponds to the constant 0
4357 or 1 or one of the six possible comparisons.
4359 This handles common cases like (a > b) == 0 but also handles
4360 expressions like ((x > y) - (y > x)) > 0, which supposedly
4361 occur in macroized code. */
4363 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4365 tree cval1 = 0, cval2 = 0;
4368 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4369 /* Don't handle degenerate cases here; they should already
4370 have been handled anyway. */
4371 && cval1 != 0 && cval2 != 0
4372 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4373 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4374 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4375 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4376 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4378 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4379 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4381 /* We can't just pass T to eval_subst in case cval1 or cval2
4382 was the same as ARG1. */
4385 = fold (build (code, type,
4386 eval_subst (arg0, cval1, maxval, cval2, minval),
4389 = fold (build (code, type,
4390 eval_subst (arg0, cval1, maxval, cval2, maxval),
4393 = fold (build (code, type,
4394 eval_subst (arg0, cval1, minval, cval2, maxval),
4397 /* All three of these results should be 0 or 1. Confirm they
4398 are. Then use those values to select the proper code
4401 if ((integer_zerop (high_result)
4402 || integer_onep (high_result))
4403 && (integer_zerop (equal_result)
4404 || integer_onep (equal_result))
4405 && (integer_zerop (low_result)
4406 || integer_onep (low_result)))
4408 /* Make a 3-bit mask with the high-order bit being the
4409 value for `>', the next for '=', and the low for '<'. */
4410 switch ((integer_onep (high_result) * 4)
4411 + (integer_onep (equal_result) * 2)
4412 + integer_onep (low_result))
4416 return omit_one_operand (type, integer_zero_node, arg0);
4437 return omit_one_operand (type, integer_one_node, arg0);
4440 t = build (code, type, cval1, cval2);
4442 return save_expr (t);
4449 /* If this is a comparison of a field, we may be able to simplify it. */
4450 if ((TREE_CODE (arg0) == COMPONENT_REF
4451 || TREE_CODE (arg0) == BIT_FIELD_REF)
4452 && (code == EQ_EXPR || code == NE_EXPR)
4453 /* Handle the constant case even without -O
4454 to make sure the warnings are given. */
4455 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4457 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4461 /* From here on, the only cases we handle are when the result is
4462 known to be a constant.
4464 To compute GT, swap the arguments and do LT.
4465 To compute GE, do LT and invert the result.
4466 To compute LE, swap the arguments, do LT and invert the result.
4467 To compute NE, do EQ and invert the result.
4469 Therefore, the code below must handle only EQ and LT. */
4471 if (code == LE_EXPR || code == GT_EXPR)
4473 tem = arg0, arg0 = arg1, arg1 = tem;
4474 code = swap_tree_comparison (code);
4477 /* Note that it is safe to invert for real values here because we
4478 will check below in the one case that it matters. */
4481 if (code == NE_EXPR || code == GE_EXPR)
4484 code = invert_tree_comparison (code);
4487 /* Compute a result for LT or EQ if args permit;
4488 otherwise return T. */
4489 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4491 if (code == EQ_EXPR)
4492 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4493 == TREE_INT_CST_LOW (arg1))
4494 && (TREE_INT_CST_HIGH (arg0)
4495 == TREE_INT_CST_HIGH (arg1)),
4498 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4499 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4500 : INT_CST_LT (arg0, arg1)),
4504 /* Assume a nonexplicit constant cannot equal an explicit one,
4505 since such code would be undefined anyway.
4506 Exception: on sysvr4, using #pragma weak,
4507 a label can come out as 0. */
4508 else if (TREE_CODE (arg1) == INTEGER_CST
4509 && !integer_zerop (arg1)
4510 && TREE_CONSTANT (arg0)
4511 && TREE_CODE (arg0) == ADDR_EXPR
4513 t1 = build_int_2 (0, 0);
4515 /* Two real constants can be compared explicitly. */
4516 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4518 /* If either operand is a NaN, the result is false with two
4519 exceptions: First, an NE_EXPR is true on NaNs, but that case
4520 is already handled correctly since we will be inverting the
4521 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4522 or a GE_EXPR into a LT_EXPR, we must return true so that it
4523 will be inverted into false. */
4525 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4526 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4527 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4529 else if (code == EQ_EXPR)
4530 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4531 TREE_REAL_CST (arg1)),
4534 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4535 TREE_REAL_CST (arg1)),
4539 if (t1 == NULL_TREE)
4543 TREE_INT_CST_LOW (t1) ^= 1;
4545 TREE_TYPE (t1) = type;
4549 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4550 so all simple results must be passed through pedantic_non_lvalue. */
4551 if (TREE_CODE (arg0) == INTEGER_CST)
4552 return pedantic_non_lvalue
4553 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4554 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4555 return pedantic_non_lvalue (omit_one_operand (type, arg1, arg0));
4557 /* If the second operand is zero, invert the comparison and swap
4558 the second and third operands. Likewise if the second operand
4559 is constant and the third is not or if the third operand is
4560 equivalent to the first operand of the comparison. */
4562 if (integer_zerop (arg1)
4563 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4564 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4565 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4566 TREE_OPERAND (t, 2),
4567 TREE_OPERAND (arg0, 1))))
4569 /* See if this can be inverted. If it can't, possibly because
4570 it was a floating-point inequality comparison, don't do
4572 tem = invert_truthvalue (arg0);
4574 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4576 arg0 = TREE_OPERAND (t, 0) = tem;
4577 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4578 TREE_OPERAND (t, 2) = arg1;
4579 arg1 = TREE_OPERAND (t, 1);
4583 /* If we have A op B ? A : C, we may be able to convert this to a
4584 simpler expression, depending on the operation and the values
4585 of B and C. IEEE floating point prevents this though,
4586 because A or B might be -0.0 or a NaN. */
4588 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4589 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4590 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0))))
4591 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4592 arg1, TREE_OPERAND (arg0, 1)))
4594 tree arg2 = TREE_OPERAND (t, 2);
4595 enum tree_code comp_code = TREE_CODE (arg0);
4597 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4598 depending on the comparison operation. */
4599 if (integer_zerop (TREE_OPERAND (arg0, 1))
4600 && TREE_CODE (arg2) == NEGATE_EXPR
4601 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4605 return pedantic_non_lvalue
4606 (fold (build1 (NEGATE_EXPR, type, arg1)));
4608 return pedantic_non_lvalue (convert (type, arg1));
4611 return pedantic_non_lvalue
4612 (fold (build1 (ABS_EXPR, type, arg1)));
4615 return pedantic_non_lvalue
4616 (fold (build1 (NEGATE_EXPR, type,
4617 fold (build1 (ABS_EXPR, type, arg1)))));
4620 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4623 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4625 if (comp_code == NE_EXPR)
4626 return pedantic_non_lvalue (convert (type, arg1));
4627 else if (comp_code == EQ_EXPR)
4628 return pedantic_non_lvalue (convert (type, integer_zero_node));
4631 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4632 or max (A, B), depending on the operation. */
4634 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4635 arg2, TREE_OPERAND (arg0, 0)))
4639 return pedantic_non_lvalue (convert (type, arg2));
4641 return pedantic_non_lvalue (convert (type, arg1));
4644 return pedantic_non_lvalue
4645 (fold (build (MIN_EXPR, type, arg1, arg2)));
4648 return pedantic_non_lvalue
4649 (fold (build (MAX_EXPR, type, arg1, arg2)));
4652 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4653 we might still be able to simplify this. For example,
4654 if C1 is one less or one more than C2, this might have started
4655 out as a MIN or MAX and been transformed by this function.
4656 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4658 if (INTEGRAL_TYPE_P (type)
4659 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4660 && TREE_CODE (arg2) == INTEGER_CST)
4664 /* We can replace A with C1 in this case. */
4665 arg1 = TREE_OPERAND (t, 1)
4666 = convert (type, TREE_OPERAND (arg0, 1));
4670 /* If C1 is C2 + 1, this is min(A, C2). */
4671 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4672 && operand_equal_p (TREE_OPERAND (arg0, 1),
4673 const_binop (PLUS_EXPR, arg2,
4674 integer_one_node, 0), 1))
4675 return pedantic_non_lvalue
4676 (fold (build (MIN_EXPR, type, arg1, arg2)));
4680 /* If C1 is C2 - 1, this is min(A, C2). */
4681 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4682 && operand_equal_p (TREE_OPERAND (arg0, 1),
4683 const_binop (MINUS_EXPR, arg2,
4684 integer_one_node, 0), 1))
4685 return pedantic_non_lvalue
4686 (fold (build (MIN_EXPR, type, arg1, arg2)));
4690 /* If C1 is C2 - 1, this is max(A, C2). */
4691 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4692 && operand_equal_p (TREE_OPERAND (arg0, 1),
4693 const_binop (MINUS_EXPR, arg2,
4694 integer_one_node, 0), 1))
4695 return pedantic_non_lvalue
4696 (fold (build (MAX_EXPR, type, arg1, arg2)));
4700 /* If C1 is C2 + 1, this is max(A, C2). */
4701 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4702 && operand_equal_p (TREE_OPERAND (arg0, 1),
4703 const_binop (PLUS_EXPR, arg2,
4704 integer_one_node, 0), 1))
4705 return pedantic_non_lvalue
4706 (fold (build (MAX_EXPR, type, arg1, arg2)));
4711 /* Convert A ? 1 : 0 to simply A. */
4712 if (integer_onep (TREE_OPERAND (t, 1))
4713 && integer_zerop (TREE_OPERAND (t, 2))
4714 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4715 call to fold will try to move the conversion inside
4716 a COND, which will recurse. In that case, the COND_EXPR
4717 is probably the best choice, so leave it alone. */
4718 && type == TREE_TYPE (arg0))
4719 return pedantic_non_lvalue (arg0);
4722 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4723 operation is simply A & 2. */
4725 if (integer_zerop (TREE_OPERAND (t, 2))
4726 && TREE_CODE (arg0) == NE_EXPR
4727 && integer_zerop (TREE_OPERAND (arg0, 1))
4728 && integer_pow2p (arg1)
4729 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4730 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4732 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
4737 /* When pedantic, a compound expression can be neither an lvalue
4738 nor an integer constant expression. */
4739 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4741 /* Don't let (0, 0) be null pointer constant. */
4742 if (integer_zerop (arg1))
4743 return non_lvalue (arg1);
4748 return build_complex (arg0, arg1);
4752 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4754 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4755 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4756 TREE_OPERAND (arg0, 1));
4757 else if (TREE_CODE (arg0) == COMPLEX_CST)
4758 return TREE_REALPART (arg0);
4759 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4760 return fold (build (TREE_CODE (arg0), type,
4761 fold (build1 (REALPART_EXPR, type,
4762 TREE_OPERAND (arg0, 0))),
4763 fold (build1 (REALPART_EXPR,
4764 type, TREE_OPERAND (arg0, 1)))));
4768 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4769 return convert (type, integer_zero_node);
4770 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4771 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4772 TREE_OPERAND (arg0, 0));
4773 else if (TREE_CODE (arg0) == COMPLEX_CST)
4774 return TREE_IMAGPART (arg0);
4775 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4776 return fold (build (TREE_CODE (arg0), type,
4777 fold (build1 (IMAGPART_EXPR, type,
4778 TREE_OPERAND (arg0, 0))),
4779 fold (build1 (IMAGPART_EXPR, type,
4780 TREE_OPERAND (arg0, 1)))));
4785 } /* switch (code) */