1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ Fix lossage on folding division of big integers. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
31 /* The entry points in this file are fold, size_int and size_binop.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'. */
48 /* Handle floating overflow for `const_binop'. */
49 static jmp_buf float_error;
51 static void encode PROTO((short *, HOST_WIDE_INT, HOST_WIDE_INT));
52 static void decode PROTO((short *, HOST_WIDE_INT *, HOST_WIDE_INT *));
53 static int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
54 HOST_WIDE_INT, HOST_WIDE_INT,
55 HOST_WIDE_INT, HOST_WIDE_INT *,
56 HOST_WIDE_INT *, HOST_WIDE_INT *,
58 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
59 static tree const_binop PROTO((enum tree_code, tree, tree, int));
60 static tree fold_convert PROTO((tree, tree));
61 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
62 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
63 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
64 static int twoval_comparison_p PROTO((tree, tree *, tree *));
65 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
66 static tree omit_one_operand PROTO((tree, tree, tree));
67 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
68 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
69 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
71 static tree decode_field_reference PROTO((tree, int *, int *,
72 enum machine_mode *, int *,
74 static int all_ones_mask_p PROTO((tree, int));
75 static int simple_operand_p PROTO((tree));
76 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
77 enum tree_code, tree, tree, tree));
78 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
84 /* Yield nonzero if a signed left shift of A by B bits overflows. */
85 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
87 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
88 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
89 Then this yields nonzero if overflow occurred during the addition.
90 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
91 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
92 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
94 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
95 We do that by representing the two-word integer as MAX_SHORTS shorts,
96 with only 8 bits stored in each short, as a positive number. */
98 /* Unpack a two-word integer into MAX_SHORTS shorts.
99 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
100 SHORTS points to the array of shorts. */
103 encode (shorts, low, hi)
105 HOST_WIDE_INT low, hi;
109 for (i = 0; i < MAX_SHORTS / 2; i++)
111 shorts[i] = (low >> (i * 8)) & 0xff;
112 shorts[i + MAX_SHORTS / 2] = (hi >> (i * 8) & 0xff);
116 /* Pack an array of MAX_SHORTS shorts into a two-word integer.
117 SHORTS points to the array of shorts.
118 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
121 decode (shorts, low, hi)
123 HOST_WIDE_INT *low, *hi;
126 HOST_WIDE_INT lv = 0, hv = 0;
128 for (i = 0; i < MAX_SHORTS / 2; i++)
130 lv |= (HOST_WIDE_INT) shorts[i] << (i * 8);
131 hv |= (HOST_WIDE_INT) shorts[i + MAX_SHORTS / 2] << (i * 8);
137 /* Make the integer constant T valid for its type
138 by setting to 0 or 1 all the bits in the constant
139 that don't belong in the type.
140 Yield 1 if a signed overflow occurs, 0 otherwise.
141 If OVERFLOW is nonzero, a signed overflow has already occurred
142 in calculating T, so propagate it. */
145 force_fit_type (t, overflow)
149 HOST_WIDE_INT low, high;
152 if (TREE_CODE (t) != INTEGER_CST)
155 low = TREE_INT_CST_LOW (t);
156 high = TREE_INT_CST_HIGH (t);
158 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
161 prec = TYPE_PRECISION (TREE_TYPE (t));
163 /* First clear all bits that are beyond the type's precision. */
165 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
167 else if (prec > HOST_BITS_PER_WIDE_INT)
169 TREE_INT_CST_HIGH (t)
170 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
174 TREE_INT_CST_HIGH (t) = 0;
175 if (prec < HOST_BITS_PER_WIDE_INT)
176 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
179 /* Unsigned types do not suffer sign extension or overflow. */
180 if (TREE_UNSIGNED (TREE_TYPE (t)))
183 /* If the value's sign bit is set, extend the sign. */
184 if (prec != 2 * HOST_BITS_PER_WIDE_INT
185 && (prec > HOST_BITS_PER_WIDE_INT
186 ? (TREE_INT_CST_HIGH (t)
187 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
188 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
190 /* Value is negative:
191 set to 1 all the bits that are outside this type's precision. */
192 if (prec > HOST_BITS_PER_WIDE_INT)
194 TREE_INT_CST_HIGH (t)
195 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
199 TREE_INT_CST_HIGH (t) = -1;
200 if (prec < HOST_BITS_PER_WIDE_INT)
201 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
205 /* Yield nonzero if signed overflow occurred. */
207 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
211 /* Add two doubleword integers with doubleword result.
212 Each argument is given as two `HOST_WIDE_INT' pieces.
213 One argument is L1 and H1; the other, L2 and H2.
214 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
215 We use the 8-shorts representation internally. */
218 add_double (l1, h1, l2, h2, lv, hv)
219 HOST_WIDE_INT l1, h1, l2, h2;
220 HOST_WIDE_INT *lv, *hv;
222 short arg1[MAX_SHORTS];
223 short arg2[MAX_SHORTS];
224 register int carry = 0;
227 encode (arg1, l1, h1);
228 encode (arg2, l2, h2);
230 for (i = 0; i < MAX_SHORTS; i++)
232 carry += arg1[i] + arg2[i];
233 arg1[i] = carry & 0xff;
237 decode (arg1, lv, hv);
238 return overflow_sum_sign (h1, h2, *hv);
241 /* Negate a doubleword integer with doubleword result.
242 Return nonzero if the operation overflows, assuming it's signed.
243 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
244 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
245 We use the 8-shorts representation internally. */
248 neg_double (l1, h1, lv, hv)
249 HOST_WIDE_INT l1, h1;
250 HOST_WIDE_INT *lv, *hv;
256 return (*hv & h1) < 0;
266 /* Multiply two doubleword integers with doubleword result.
267 Return nonzero if the operation overflows, assuming it's signed.
268 Each argument is given as two `HOST_WIDE_INT' pieces.
269 One argument is L1 and H1; the other, L2 and H2.
270 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
271 We use the 8-shorts representation internally. */
274 mul_double (l1, h1, l2, h2, lv, hv)
275 HOST_WIDE_INT l1, h1, l2, h2;
276 HOST_WIDE_INT *lv, *hv;
278 short arg1[MAX_SHORTS];
279 short arg2[MAX_SHORTS];
280 short prod[MAX_SHORTS * 2];
281 register int carry = 0;
282 register int i, j, k;
283 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
285 /* These cases are used extensively, arising from pointer combinations. */
290 int overflow = left_shift_overflows (h1, 1);
291 unsigned HOST_WIDE_INT temp = l1 + l1;
292 *hv = (h1 << 1) + (temp < l1);
298 int overflow = left_shift_overflows (h1, 2);
299 unsigned HOST_WIDE_INT temp = l1 + l1;
300 h1 = (h1 << 2) + ((temp < l1) << 1);
310 int overflow = left_shift_overflows (h1, 3);
311 unsigned HOST_WIDE_INT temp = l1 + l1;
312 h1 = (h1 << 3) + ((temp < l1) << 2);
315 h1 += (temp < l1) << 1;
325 encode (arg1, l1, h1);
326 encode (arg2, l2, h2);
328 bzero (prod, sizeof prod);
330 for (i = 0; i < MAX_SHORTS; i++)
331 for (j = 0; j < MAX_SHORTS; j++)
334 carry = arg1[i] * arg2[j];
338 prod[k] = carry & 0xff;
344 decode (prod, lv, hv); /* This ignores
345 prod[MAX_SHORTS] -> prod[MAX_SHORTS*2-1] */
347 /* Check for overflow by calculating the top half of the answer in full;
348 it should agree with the low half's sign bit. */
349 decode (prod+MAX_SHORTS, &toplow, &tophigh);
352 neg_double (l2, h2, &neglow, &neghigh);
353 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
357 neg_double (l1, h1, &neglow, &neghigh);
358 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
360 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
363 /* Shift the doubleword integer in L1, H1 left by COUNT places
364 keeping only PREC bits of result.
365 Shift right if COUNT is negative.
366 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
367 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
370 lshift_double (l1, h1, count, prec, lv, hv, arith)
371 HOST_WIDE_INT l1, h1, count;
373 HOST_WIDE_INT *lv, *hv;
376 short arg1[MAX_SHORTS];
382 rshift_double (l1, h1, - count, prec, lv, hv, arith);
386 encode (arg1, l1, h1);
394 for (i = 0; i < MAX_SHORTS; i++)
396 carry += arg1[i] << 1;
397 arg1[i] = carry & 0xff;
403 decode (arg1, lv, hv);
406 /* Shift the doubleword integer in L1, H1 right by COUNT places
407 keeping only PREC bits of result. COUNT must be positive.
408 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
409 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
412 rshift_double (l1, h1, count, prec, lv, hv, arith)
413 HOST_WIDE_INT l1, h1, count;
415 HOST_WIDE_INT *lv, *hv;
418 short arg1[MAX_SHORTS];
422 encode (arg1, l1, h1);
429 carry = arith && arg1[7] >> 7;
430 for (i = MAX_SHORTS - 1; i >= 0; i--)
434 arg1[i] = (carry >> 1) & 0xff;
439 decode (arg1, lv, hv);
442 /* Rotate the doubldword integer in L1, H1 left by COUNT places
443 keeping only PREC bits of result.
444 Rotate right if COUNT is negative.
445 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
448 lrotate_double (l1, h1, count, prec, lv, hv)
449 HOST_WIDE_INT l1, h1, count;
451 HOST_WIDE_INT *lv, *hv;
453 short arg1[MAX_SHORTS];
459 rrotate_double (l1, h1, - count, prec, lv, hv);
463 encode (arg1, l1, h1);
468 carry = arg1[MAX_SHORTS - 1] >> 7;
471 for (i = 0; i < MAX_SHORTS; i++)
473 carry += arg1[i] << 1;
474 arg1[i] = carry & 0xff;
480 decode (arg1, lv, hv);
483 /* Rotate the doubleword integer in L1, H1 left by COUNT places
484 keeping only PREC bits of result. COUNT must be positive.
485 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
488 rrotate_double (l1, h1, count, prec, lv, hv)
489 HOST_WIDE_INT l1, h1, count;
491 HOST_WIDE_INT *lv, *hv;
493 short arg1[MAX_SHORTS];
497 encode (arg1, l1, h1);
505 for (i = MAX_SHORTS - 1; i >= 0; i--)
509 arg1[i] = (carry >> 1) & 0xff;
514 decode (arg1, lv, hv);
517 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
518 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
519 CODE is a tree code for a kind of division, one of
520 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
522 It controls how the quotient is rounded to a integer.
523 Return nonzero if the operation overflows.
524 UNS nonzero says do unsigned division. */
527 div_and_round_double (code, uns,
528 lnum_orig, hnum_orig, lden_orig, hden_orig,
529 lquo, hquo, lrem, hrem)
532 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
533 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
534 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
537 short num[MAX_SHORTS + 1]; /* extra element for scaling. */
538 short den[MAX_SHORTS], quo[MAX_SHORTS];
539 register int i, j, work;
540 register int carry = 0;
541 HOST_WIDE_INT lnum = lnum_orig;
542 HOST_WIDE_INT hnum = hnum_orig;
543 HOST_WIDE_INT lden = lden_orig;
544 HOST_WIDE_INT hden = hden_orig;
547 if ((hden == 0) && (lden == 0))
550 /* calculate quotient sign and convert operands to unsigned. */
556 /* (minimum integer) / (-1) is the only overflow case. */
557 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
563 neg_double (lden, hden, &lden, &hden);
567 if (hnum == 0 && hden == 0)
568 { /* single precision */
570 /* This unsigned division rounds toward zero. */
571 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
576 { /* trivial case: dividend < divisor */
577 /* hden != 0 already checked. */
584 bzero (quo, sizeof quo);
586 bzero (num, sizeof num); /* to zero 9th element */
587 bzero (den, sizeof den);
589 encode (num, lnum, hnum);
590 encode (den, lden, hden);
592 /* This code requires more than just hden == 0.
593 We also have to require that we don't need more than three bytes
594 to hold CARRY. If we ever did need four bytes to hold it, we
595 would lose part of it when computing WORK on the next round. */
596 if (hden == 0 && (((unsigned HOST_WIDE_INT) lden << 8) >> 8) == lden)
597 { /* simpler algorithm */
598 /* hnum != 0 already checked. */
599 for (i = MAX_SHORTS - 1; i >= 0; i--)
601 work = num[i] + (carry << 8);
602 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
603 carry = work % (unsigned HOST_WIDE_INT) lden;
606 else { /* full double precision,
607 with thanks to Don Knuth's
608 "Seminumerical Algorithms". */
610 int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
612 /* Find the highest non-zero divisor digit. */
613 for (i = MAX_SHORTS - 1; ; i--)
618 for (i = MAX_SHORTS - 1; ; i--)
623 quo_hi_sig = num_hi_sig - den_hi_sig + 1;
625 /* Insure that the first digit of the divisor is at least BASE/2.
626 This is required by the quotient digit estimation algorithm. */
628 scale = BASE / (den[den_hi_sig] + 1);
629 if (scale > 1) { /* scale divisor and dividend */
631 for (i = 0; i <= MAX_SHORTS - 1; i++) {
632 work = (num[i] * scale) + carry;
633 num[i] = work & 0xff;
635 if (num[i] != 0) num_hi_sig = i;
638 for (i = 0; i <= MAX_SHORTS - 1; i++) {
639 work = (den[i] * scale) + carry;
640 den[i] = work & 0xff;
642 if (den[i] != 0) den_hi_sig = i;
647 for (i = quo_hi_sig; i > 0; i--) {
648 /* guess the next quotient digit, quo_est, by dividing the first
649 two remaining dividend digits by the high order quotient digit.
650 quo_est is never low and is at most 2 high. */
652 int num_hi; /* index of highest remaining dividend digit */
654 num_hi = i + den_hi_sig;
656 work = (num[num_hi] * BASE) + (num_hi > 0 ? num[num_hi - 1] : 0);
657 if (num[num_hi] != den[den_hi_sig]) {
658 quo_est = work / den[den_hi_sig];
664 /* refine quo_est so it's usually correct, and at most one high. */
665 while ((den[den_hi_sig - 1] * quo_est)
666 > (((work - (quo_est * den[den_hi_sig])) * BASE)
667 + ((num_hi - 1) > 0 ? num[num_hi - 2] : 0)))
670 /* Try QUO_EST as the quotient digit, by multiplying the
671 divisor by QUO_EST and subtracting from the remaining dividend.
672 Keep in mind that QUO_EST is the I - 1st digit. */
676 for (j = 0; j <= den_hi_sig; j++)
680 work = num[i + j - 1] - (quo_est * den[j]) + carry;
688 num[i + j - 1] = digit;
691 /* if quo_est was high by one, then num[i] went negative and
692 we need to correct things. */
697 carry = 0; /* add divisor back in */
698 for (j = 0; j <= den_hi_sig; j++)
700 work = num[i + j - 1] + den[j] + carry;
710 num[i + j - 1] = work;
712 num [num_hi] += carry;
715 /* store the quotient digit. */
716 quo[i - 1] = quo_est;
720 decode (quo, lquo, hquo);
723 /* if result is negative, make it so. */
725 neg_double (*lquo, *hquo, lquo, hquo);
727 /* compute trial remainder: rem = num - (quo * den) */
728 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
729 neg_double (*lrem, *hrem, lrem, hrem);
730 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
735 case TRUNC_MOD_EXPR: /* round toward zero */
736 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
740 case FLOOR_MOD_EXPR: /* round toward negative infinity */
741 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
744 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
747 else return overflow;
751 case CEIL_MOD_EXPR: /* round toward positive infinity */
752 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
754 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
757 else return overflow;
761 case ROUND_MOD_EXPR: /* round to closest integer */
763 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
764 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
766 /* get absolute values */
767 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
768 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
770 /* if (2 * abs (lrem) >= abs (lden)) */
771 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
772 labs_rem, habs_rem, <wice, &htwice);
773 if (((unsigned HOST_WIDE_INT) habs_den
774 < (unsigned HOST_WIDE_INT) htwice)
775 || (((unsigned HOST_WIDE_INT) habs_den
776 == (unsigned HOST_WIDE_INT) htwice)
777 && ((HOST_WIDE_INT unsigned) labs_den
778 < (unsigned HOST_WIDE_INT) ltwice)))
782 add_double (*lquo, *hquo,
783 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
786 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
789 else return overflow;
797 /* compute true remainder: rem = num - (quo * den) */
798 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
799 neg_double (*lrem, *hrem, lrem, hrem);
800 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
804 #ifndef REAL_ARITHMETIC
805 /* Effectively truncate a real value to represent
806 the nearest possible value in a narrower mode.
807 The result is actually represented in the same data type as the argument,
808 but its value is usually different. */
811 real_value_truncate (mode, arg)
812 enum machine_mode mode;
816 /* Make sure the value is actually stored in memory before we turn off
820 REAL_VALUE_TYPE value;
821 jmp_buf handler, old_handler;
824 if (setjmp (handler))
826 error ("floating overflow");
829 handled = push_float_handler (handler, old_handler);
830 value = REAL_VALUE_TRUNCATE (mode, arg);
831 pop_float_handler (handled, old_handler);
835 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
837 /* Check for infinity in an IEEE double precision number. */
843 /* The IEEE 64-bit double format. */
848 unsigned exponent : 11;
849 unsigned mantissa1 : 20;
854 unsigned mantissa1 : 20;
855 unsigned exponent : 11;
861 if (u.big_endian.sign == 1)
864 return (u.big_endian.exponent == 2047
865 && u.big_endian.mantissa1 == 0
866 && u.big_endian.mantissa2 == 0);
871 return (u.little_endian.exponent == 2047
872 && u.little_endian.mantissa1 == 0
873 && u.little_endian.mantissa2 == 0);
877 /* Check whether an IEEE double precision number is a NaN. */
883 /* The IEEE 64-bit double format. */
888 unsigned exponent : 11;
889 unsigned mantissa1 : 20;
894 unsigned mantissa1 : 20;
895 unsigned exponent : 11;
901 if (u.big_endian.sign == 1)
904 return (u.big_endian.exponent == 2047
905 && (u.big_endian.mantissa1 != 0
906 || u.big_endian.mantissa2 != 0));
911 return (u.little_endian.exponent == 2047
912 && (u.little_endian.mantissa1 != 0
913 || u.little_endian.mantissa2 != 0));
917 /* Check for a negative IEEE double precision number. */
923 /* The IEEE 64-bit double format. */
928 unsigned exponent : 11;
929 unsigned mantissa1 : 20;
934 unsigned mantissa1 : 20;
935 unsigned exponent : 11;
941 if (u.big_endian.sign == 1)
944 return u.big_endian.sign;
949 return u.little_endian.sign;
952 #else /* Target not IEEE */
954 /* Let's assume other float formats don't have infinity.
955 (This can be overridden by redefining REAL_VALUE_ISINF.) */
963 /* Let's assume other float formats don't have NaNs.
964 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
972 /* Let's assume other float formats don't have minus zero.
973 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
980 #endif /* Target not IEEE */
981 #endif /* no REAL_ARITHMETIC */
983 /* Split a tree IN into a constant and a variable part
984 that could be combined with CODE to make IN.
985 CODE must be a commutative arithmetic operation.
986 Store the constant part into *CONP and the variable in &VARP.
987 Return 1 if this was done; zero means the tree IN did not decompose
990 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
991 Therefore, we must tell the caller whether the variable part
992 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
993 The value stored is the coefficient for the variable term.
994 The constant term we return should always be added;
995 we negate it if necessary. */
998 split_tree (in, code, varp, conp, varsignp)
1000 enum tree_code code;
1004 register tree outtype = TREE_TYPE (in);
1008 /* Strip any conversions that don't change the machine mode. */
1009 while ((TREE_CODE (in) == NOP_EXPR
1010 || TREE_CODE (in) == CONVERT_EXPR)
1011 && (TYPE_MODE (TREE_TYPE (in))
1012 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
1013 in = TREE_OPERAND (in, 0);
1015 if (TREE_CODE (in) == code
1016 || (! FLOAT_TYPE_P (TREE_TYPE (in))
1017 /* We can associate addition and subtraction together
1018 (even though the C standard doesn't say so)
1019 for integers because the value is not affected.
1020 For reals, the value might be affected, so we can't. */
1021 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1022 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1024 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
1025 if (code == INTEGER_CST)
1027 *conp = TREE_OPERAND (in, 0);
1028 *varp = TREE_OPERAND (in, 1);
1029 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1030 && TREE_TYPE (*varp) != outtype)
1031 *varp = convert (outtype, *varp);
1032 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1035 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
1037 *conp = TREE_OPERAND (in, 1);
1038 *varp = TREE_OPERAND (in, 0);
1040 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1041 && TREE_TYPE (*varp) != outtype)
1042 *varp = convert (outtype, *varp);
1043 if (TREE_CODE (in) == MINUS_EXPR)
1045 /* If operation is subtraction and constant is second,
1046 must negate it to get an additive constant.
1047 And this cannot be done unless it is a manifest constant.
1048 It could also be the address of a static variable.
1049 We cannot negate that, so give up. */
1050 if (TREE_CODE (*conp) == INTEGER_CST)
1051 /* Subtracting from integer_zero_node loses for long long. */
1052 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
1058 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
1060 *conp = TREE_OPERAND (in, 0);
1061 *varp = TREE_OPERAND (in, 1);
1062 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1063 && TREE_TYPE (*varp) != outtype)
1064 *varp = convert (outtype, *varp);
1065 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1072 /* Combine two constants NUM and ARG2 under operation CODE
1073 to produce a new constant.
1074 We assume ARG1 and ARG2 have the same data type,
1075 or at least are the same kind of constant and the same machine mode.
1077 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1080 const_binop (code, arg1, arg2, notrunc)
1081 enum tree_code code;
1082 register tree arg1, arg2;
1085 if (TREE_CODE (arg1) == INTEGER_CST)
1087 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
1088 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
1089 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
1090 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
1091 HOST_WIDE_INT low, hi;
1092 HOST_WIDE_INT garbagel, garbageh;
1094 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1100 t = build_int_2 (int1l | int2l, int1h | int2h);
1104 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1108 t = build_int_2 (int1l & int2l, int1h & int2h);
1111 case BIT_ANDTC_EXPR:
1112 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1118 /* It's unclear from the C standard whether shifts can overflow.
1119 The following code ignores overflow; perhaps a C standard
1120 interpretation ruling is needed. */
1121 lshift_double (int1l, int1h, int2l,
1122 TYPE_PRECISION (TREE_TYPE (arg1)),
1125 t = build_int_2 (low, hi);
1126 TREE_TYPE (t) = TREE_TYPE (arg1);
1128 force_fit_type (t, 0);
1129 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1130 TREE_CONSTANT_OVERFLOW (t)
1131 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1137 lrotate_double (int1l, int1h, int2l,
1138 TYPE_PRECISION (TREE_TYPE (arg1)),
1140 t = build_int_2 (low, hi);
1147 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1150 overflow = int2h < hi;
1152 t = build_int_2 (int2l, int2h);
1158 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1161 overflow = int1h < hi;
1163 t = build_int_2 (int1l, int1h);
1166 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1167 t = build_int_2 (low, hi);
1171 if (int2h == 0 && int2l == 0)
1173 t = build_int_2 (int1l, int1h);
1176 neg_double (int2l, int2h, &low, &hi);
1177 add_double (int1l, int1h, low, hi, &low, &hi);
1178 overflow = overflow_sum_sign (hi, int2h, int1h);
1179 t = build_int_2 (low, hi);
1183 /* Optimize simple cases. */
1186 unsigned HOST_WIDE_INT temp;
1191 t = build_int_2 (0, 0);
1194 t = build_int_2 (int2l, int2h);
1197 overflow = left_shift_overflows (int2h, 1);
1198 temp = int2l + int2l;
1199 int2h = (int2h << 1) + (temp < int2l);
1200 t = build_int_2 (temp, int2h);
1202 #if 0 /* This code can lose carries. */
1204 temp = int2l + int2l + int2l;
1205 int2h = int2h * 3 + (temp < int2l);
1206 t = build_int_2 (temp, int2h);
1210 overflow = left_shift_overflows (int2h, 2);
1211 temp = int2l + int2l;
1212 int2h = (int2h << 2) + ((temp < int2l) << 1);
1215 int2h += (temp < int2l);
1216 t = build_int_2 (temp, int2h);
1219 overflow = left_shift_overflows (int2h, 3);
1220 temp = int2l + int2l;
1221 int2h = (int2h << 3) + ((temp < int2l) << 2);
1224 int2h += (temp < int2l) << 1;
1227 int2h += (temp < int2l);
1228 t = build_int_2 (temp, int2h);
1239 t = build_int_2 (0, 0);
1244 t = build_int_2 (int1l, int1h);
1249 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1250 t = build_int_2 (low, hi);
1253 case TRUNC_DIV_EXPR:
1254 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1255 case EXACT_DIV_EXPR:
1256 /* This is a shortcut for a common special case.
1257 It reduces the number of tree nodes generated
1259 if (int2h == 0 && int2l > 0
1260 && TREE_TYPE (arg1) == sizetype
1261 && int1h == 0 && int1l >= 0)
1263 if (code == CEIL_DIV_EXPR)
1265 return size_int (int1l / int2l);
1267 case ROUND_DIV_EXPR:
1268 if (int2h == 0 && int2l == 1)
1270 t = build_int_2 (int1l, int1h);
1273 if (int1l == int2l && int1h == int2h)
1275 if ((int1l | int1h) == 0)
1277 t = build_int_2 (1, 0);
1280 overflow = div_and_round_double (code, uns,
1281 int1l, int1h, int2l, int2h,
1282 &low, &hi, &garbagel, &garbageh);
1283 t = build_int_2 (low, hi);
1286 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1287 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1288 overflow = div_and_round_double (code, uns,
1289 int1l, int1h, int2l, int2h,
1290 &garbagel, &garbageh, &low, &hi);
1291 t = build_int_2 (low, hi);
1298 low = (((unsigned HOST_WIDE_INT) int1h
1299 < (unsigned HOST_WIDE_INT) int2h)
1300 || (((unsigned HOST_WIDE_INT) int1h
1301 == (unsigned HOST_WIDE_INT) int2h)
1302 && ((unsigned HOST_WIDE_INT) int1l
1303 < (unsigned HOST_WIDE_INT) int2l)));
1307 low = ((int1h < int2h)
1308 || ((int1h == int2h)
1309 && ((unsigned HOST_WIDE_INT) int1l
1310 < (unsigned HOST_WIDE_INT) int2l)));
1312 if (low == (code == MIN_EXPR))
1313 t = build_int_2 (int1l, int1h);
1315 t = build_int_2 (int2l, int2h);
1322 TREE_TYPE (t) = TREE_TYPE (arg1);
1324 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1325 | TREE_OVERFLOW (arg1)
1326 | TREE_OVERFLOW (arg2));
1327 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1328 | TREE_CONSTANT_OVERFLOW (arg1)
1329 | TREE_CONSTANT_OVERFLOW (arg2));
1332 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1333 if (TREE_CODE (arg1) == REAL_CST)
1337 REAL_VALUE_TYPE value;
1340 d1 = TREE_REAL_CST (arg1);
1341 d2 = TREE_REAL_CST (arg2);
1342 if (setjmp (float_error))
1344 pedwarn ("floating overflow in constant expression");
1345 return build (code, TREE_TYPE (arg1), arg1, arg2);
1347 set_float_handler (float_error);
1349 #ifdef REAL_ARITHMETIC
1350 REAL_ARITHMETIC (value, code, d1, d2);
1367 #ifndef REAL_INFINITY
1376 value = MIN (d1, d2);
1380 value = MAX (d1, d2);
1386 #endif /* no REAL_ARITHMETIC */
1387 t = build_real (TREE_TYPE (arg1),
1388 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1389 set_float_handler (NULL_PTR);
1392 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1393 if (TREE_CODE (arg1) == COMPLEX_CST)
1395 register tree r1 = TREE_REALPART (arg1);
1396 register tree i1 = TREE_IMAGPART (arg1);
1397 register tree r2 = TREE_REALPART (arg2);
1398 register tree i2 = TREE_IMAGPART (arg2);
1404 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1405 const_binop (PLUS_EXPR, i1, i2, notrunc));
1409 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1410 const_binop (MINUS_EXPR, i1, i2, notrunc));
1414 t = build_complex (const_binop (MINUS_EXPR,
1415 const_binop (MULT_EXPR,
1417 const_binop (MULT_EXPR,
1420 const_binop (PLUS_EXPR,
1421 const_binop (MULT_EXPR,
1423 const_binop (MULT_EXPR,
1430 register tree magsquared
1431 = const_binop (PLUS_EXPR,
1432 const_binop (MULT_EXPR, r2, r2, notrunc),
1433 const_binop (MULT_EXPR, i2, i2, notrunc),
1435 t = build_complex (const_binop (RDIV_EXPR,
1436 const_binop (PLUS_EXPR,
1437 const_binop (MULT_EXPR, r1, r2, notrunc),
1438 const_binop (MULT_EXPR, i1, i2, notrunc),
1440 magsquared, notrunc),
1441 const_binop (RDIV_EXPR,
1442 const_binop (MINUS_EXPR,
1443 const_binop (MULT_EXPR, i1, r2, notrunc),
1444 const_binop (MULT_EXPR, r1, i2, notrunc),
1446 magsquared, notrunc));
1453 TREE_TYPE (t) = TREE_TYPE (arg1);
1459 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1463 unsigned int number;
1466 /* Type-size nodes already made for small sizes. */
1467 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1469 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1470 && size_table[number] != 0)
1471 return size_table[number];
1472 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1474 push_obstacks_nochange ();
1475 /* Make this a permanent node. */
1476 end_temporary_allocation ();
1477 t = build_int_2 (number, 0);
1478 TREE_TYPE (t) = sizetype;
1479 size_table[number] = t;
1484 t = build_int_2 (number, 0);
1485 TREE_TYPE (t) = sizetype;
1490 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1491 CODE is a tree code. Data type is taken from `sizetype',
1492 If the operands are constant, so is the result. */
1495 size_binop (code, arg0, arg1)
1496 enum tree_code code;
1499 /* Handle the special case of two integer constants faster. */
1500 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1502 /* And some specific cases even faster than that. */
1503 if (code == PLUS_EXPR
1504 && TREE_INT_CST_LOW (arg0) == 0
1505 && TREE_INT_CST_HIGH (arg0) == 0)
1507 if (code == MINUS_EXPR
1508 && TREE_INT_CST_LOW (arg1) == 0
1509 && TREE_INT_CST_HIGH (arg1) == 0)
1511 if (code == MULT_EXPR
1512 && TREE_INT_CST_LOW (arg0) == 1
1513 && TREE_INT_CST_HIGH (arg0) == 0)
1515 /* Handle general case of two integer constants. */
1516 return const_binop (code, arg0, arg1, 1);
1519 if (arg0 == error_mark_node || arg1 == error_mark_node)
1520 return error_mark_node;
1522 return fold (build (code, sizetype, arg0, arg1));
1525 /* Given T, a tree representing type conversion of ARG1, a constant,
1526 return a constant tree representing the result of conversion. */
1529 fold_convert (t, arg1)
1533 register tree type = TREE_TYPE (t);
1535 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1537 if (TREE_CODE (arg1) == INTEGER_CST)
1539 /* Given an integer constant, make new constant with new type,
1540 appropriately sign-extended or truncated. */
1541 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1542 TREE_INT_CST_HIGH (arg1));
1543 TREE_TYPE (t) = type;
1544 /* Indicate an overflow if (1) ARG1 already overflowed,
1545 or (2) force_fit_type indicates an overflow.
1546 Tell force_fit_type that an overflow has already occurred
1547 if ARG1 is a too-large unsigned value and T is signed. */
1549 = (TREE_OVERFLOW (arg1)
1550 | force_fit_type (t,
1551 (TREE_INT_CST_HIGH (arg1) < 0
1552 & (TREE_UNSIGNED (type)
1553 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1554 TREE_CONSTANT_OVERFLOW (t)
1555 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1557 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1558 else if (TREE_CODE (arg1) == REAL_CST)
1560 REAL_VALUE_TYPE l, x, u;
1562 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1563 x = TREE_REAL_CST (arg1);
1564 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1566 /* See if X will be in range after truncation towards 0.
1567 To compensate for truncation, move the bounds away from 0,
1568 but reject if X exactly equals the adjusted bounds. */
1569 #ifdef REAL_ARITHMETIC
1570 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1571 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1576 if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1578 pedwarn ("real constant out of range for integer conversion");
1581 #ifndef REAL_ARITHMETIC
1584 HOST_WIDE_INT low, high;
1585 HOST_WIDE_INT half_word
1586 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1588 d = TREE_REAL_CST (arg1);
1592 high = (HOST_WIDE_INT) (d / half_word / half_word);
1593 d -= (REAL_VALUE_TYPE) high * half_word * half_word;
1594 if (d >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1596 low = d - (REAL_VALUE_TYPE) half_word * half_word / 2;
1597 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1600 low = (HOST_WIDE_INT) d;
1601 if (TREE_REAL_CST (arg1) < 0)
1602 neg_double (low, high, &low, &high);
1603 t = build_int_2 (low, high);
1607 HOST_WIDE_INT low, high;
1608 REAL_VALUE_TO_INT (&low, &high, (TREE_REAL_CST (arg1)));
1609 t = build_int_2 (low, high);
1612 TREE_TYPE (t) = type;
1613 force_fit_type (t, 0);
1615 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1616 TREE_TYPE (t) = type;
1618 else if (TREE_CODE (type) == REAL_TYPE)
1620 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1621 if (TREE_CODE (arg1) == INTEGER_CST)
1622 return build_real_from_int_cst (type, arg1);
1623 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1624 if (TREE_CODE (arg1) == REAL_CST)
1626 if (setjmp (float_error))
1628 pedwarn ("floating overflow in constant expression");
1631 set_float_handler (float_error);
1633 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1634 TREE_REAL_CST (arg1)));
1635 set_float_handler (NULL_PTR);
1639 TREE_CONSTANT (t) = 1;
1643 /* Return an expr equal to X but certainly not valid as an lvalue.
1644 Also make sure it is not valid as an null pointer constant. */
1652 /* These things are certainly not lvalues. */
1653 if (TREE_CODE (x) == NON_LVALUE_EXPR
1654 || TREE_CODE (x) == INTEGER_CST
1655 || TREE_CODE (x) == REAL_CST
1656 || TREE_CODE (x) == STRING_CST
1657 || TREE_CODE (x) == ADDR_EXPR)
1659 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1661 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1662 so convert_for_assignment won't strip it.
1663 This is so this 0 won't be treated as a null pointer constant. */
1664 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1665 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1671 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1672 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1676 /* Given a tree comparison code, return the code that is the logical inverse
1677 of the given code. It is not safe to do this for floating-point
1678 comparisons, except for NE_EXPR and EQ_EXPR. */
1680 static enum tree_code
1681 invert_tree_comparison (code)
1682 enum tree_code code;
1703 /* Similar, but return the comparison that results if the operands are
1704 swapped. This is safe for floating-point. */
1706 static enum tree_code
1707 swap_tree_comparison (code)
1708 enum tree_code code;
1728 /* Return nonzero if two operands are necessarily equal.
1729 If ONLY_CONST is non-zero, only return non-zero for constants.
1730 This function tests whether the operands are indistinguishable;
1731 it does not test whether they are equal using C's == operation.
1732 The distinction is important for IEEE floating point, because
1733 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1734 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1737 operand_equal_p (arg0, arg1, only_const)
1741 /* If both types don't have the same signedness, then we can't consider
1742 them equal. We must check this before the STRIP_NOPS calls
1743 because they may change the signedness of the arguments. */
1744 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1750 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1751 We don't care about side effects in that case because the SAVE_EXPR
1752 takes care of that for us. */
1753 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1754 return ! only_const;
1756 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1759 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1760 && TREE_CODE (arg0) == ADDR_EXPR
1761 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1764 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1765 && TREE_CODE (arg0) == INTEGER_CST
1766 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1767 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1770 /* Detect when real constants are equal. */
1771 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1772 && TREE_CODE (arg0) == REAL_CST)
1773 return !bcmp (&TREE_REAL_CST (arg0), &TREE_REAL_CST (arg1),
1774 sizeof (REAL_VALUE_TYPE));
1782 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1784 /* This is needed for conversions and for COMPONENT_REF.
1785 Might as well play it safe and always test this. */
1786 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1789 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1792 /* Two conversions are equal only if signedness and modes match. */
1793 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1794 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1795 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1798 return operand_equal_p (TREE_OPERAND (arg0, 0),
1799 TREE_OPERAND (arg1, 0), 0);
1803 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1804 TREE_OPERAND (arg1, 0), 0)
1805 && operand_equal_p (TREE_OPERAND (arg0, 1),
1806 TREE_OPERAND (arg1, 1), 0));
1809 switch (TREE_CODE (arg0))
1812 return operand_equal_p (TREE_OPERAND (arg0, 0),
1813 TREE_OPERAND (arg1, 0), 0);
1817 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1818 TREE_OPERAND (arg1, 0), 0)
1819 && operand_equal_p (TREE_OPERAND (arg0, 1),
1820 TREE_OPERAND (arg1, 1), 0));
1823 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1824 TREE_OPERAND (arg1, 0), 0)
1825 && operand_equal_p (TREE_OPERAND (arg0, 1),
1826 TREE_OPERAND (arg1, 1), 0)
1827 && operand_equal_p (TREE_OPERAND (arg0, 2),
1828 TREE_OPERAND (arg1, 2), 0));
1836 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1837 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1839 When in doubt, return 0. */
1842 operand_equal_for_comparison_p (arg0, arg1, other)
1846 int unsignedp1, unsignedpo;
1847 tree primarg1, primother;
1850 if (operand_equal_p (arg0, arg1, 0))
1853 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1856 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1857 actual comparison operand, ARG0.
1859 First throw away any conversions to wider types
1860 already present in the operands. */
1862 primarg1 = get_narrower (arg1, &unsignedp1);
1863 primother = get_narrower (other, &unsignedpo);
1865 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1866 if (unsignedp1 == unsignedpo
1867 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1868 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1870 tree type = TREE_TYPE (arg0);
1872 /* Make sure shorter operand is extended the right way
1873 to match the longer operand. */
1874 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1875 TREE_TYPE (primarg1)),
1878 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1885 /* See if ARG is an expression that is either a comparison or is performing
1886 arithmetic on comparisons. The comparisons must only be comparing
1887 two different values, which will be stored in *CVAL1 and *CVAL2; if
1888 they are non-zero it means that some operands have already been found.
1889 No variables may be used anywhere else in the expression except in the
1892 If this is true, return 1. Otherwise, return zero. */
1895 twoval_comparison_p (arg, cval1, cval2)
1897 tree *cval1, *cval2;
1899 enum tree_code code = TREE_CODE (arg);
1900 char class = TREE_CODE_CLASS (code);
1902 /* We can handle some of the 'e' cases here. */
1904 && (code == TRUTH_NOT_EXPR
1905 || (code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)))
1907 else if (class == 'e'
1908 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1909 || code == COMPOUND_EXPR))
1915 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2);
1918 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2)
1919 && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2));
1925 if (code == COND_EXPR)
1926 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2)
1927 && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2)
1928 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1933 /* First see if we can handle the first operand, then the second. For
1934 the second operand, we know *CVAL1 can't be zero. It must be that
1935 one side of the comparison is each of the values; test for the
1936 case where this isn't true by failing if the two operands
1939 if (operand_equal_p (TREE_OPERAND (arg, 0),
1940 TREE_OPERAND (arg, 1), 0))
1944 *cval1 = TREE_OPERAND (arg, 0);
1945 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1947 else if (*cval2 == 0)
1948 *cval2 = TREE_OPERAND (arg, 0);
1949 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1954 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1956 else if (*cval2 == 0)
1957 *cval2 = TREE_OPERAND (arg, 1);
1958 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1969 /* ARG is a tree that is known to contain just arithmetic operations and
1970 comparisons. Evaluate the operations in the tree substituting NEW0 for
1971 any occurrence of OLD0 as an operand of a comparison and likewise for
1975 eval_subst (arg, old0, new0, old1, new1)
1977 tree old0, new0, old1, new1;
1979 tree type = TREE_TYPE (arg);
1980 enum tree_code code = TREE_CODE (arg);
1981 char class = TREE_CODE_CLASS (code);
1983 /* We can handle some of the 'e' cases here. */
1984 if (class == 'e' && code == TRUTH_NOT_EXPR)
1986 else if (class == 'e'
1987 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1993 return fold (build1 (code, type,
1994 eval_subst (TREE_OPERAND (arg, 0),
1995 old0, new0, old1, new1)));
1998 return fold (build (code, type,
1999 eval_subst (TREE_OPERAND (arg, 0),
2000 old0, new0, old1, new1),
2001 eval_subst (TREE_OPERAND (arg, 1),
2002 old0, new0, old1, new1)));
2008 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
2011 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
2014 return fold (build (code, type,
2015 eval_subst (TREE_OPERAND (arg, 0),
2016 old0, new0, old1, new1),
2017 eval_subst (TREE_OPERAND (arg, 1),
2018 old0, new0, old1, new1),
2019 eval_subst (TREE_OPERAND (arg, 2),
2020 old0, new0, old1, new1)));
2025 tree arg0 = TREE_OPERAND (arg, 0);
2026 tree arg1 = TREE_OPERAND (arg, 1);
2028 /* We need to check both for exact equality and tree equality. The
2029 former will be true if the operand has a side-effect. In that
2030 case, we know the operand occurred exactly once. */
2032 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
2034 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
2037 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2039 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2042 return fold (build (code, type, arg0, arg1));
2049 /* Return a tree for the case when the result of an expression is RESULT
2050 converted to TYPE and OMITTED was previously an operand of the expression
2051 but is now not needed (e.g., we folded OMITTED * 0).
2053 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2054 the conversion of RESULT to TYPE. */
2057 omit_one_operand (type, result, omitted)
2058 tree type, result, omitted;
2060 tree t = convert (type, result);
2062 if (TREE_SIDE_EFFECTS (omitted))
2063 return build (COMPOUND_EXPR, type, omitted, t);
2065 return non_lvalue (t);
2068 /* Return a simplified tree node for the truth-negation of ARG. This
2069 never alters ARG itself. We assume that ARG is an operation that
2070 returns a truth value (0 or 1). */
2073 invert_truthvalue (arg)
2076 tree type = TREE_TYPE (arg);
2077 enum tree_code code = TREE_CODE (arg);
2079 /* If this is a comparison, we can simply invert it, except for
2080 floating-point non-equality comparisons, in which case we just
2081 enclose a TRUTH_NOT_EXPR around what we have. */
2083 if (TREE_CODE_CLASS (code) == '<')
2085 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2086 && code != NE_EXPR && code != EQ_EXPR)
2087 return build1 (TRUTH_NOT_EXPR, type, arg);
2089 return build (invert_tree_comparison (code), type,
2090 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2096 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2097 && TREE_INT_CST_HIGH (arg) == 0, 0));
2099 case TRUTH_AND_EXPR:
2100 return build (TRUTH_OR_EXPR, type,
2101 invert_truthvalue (TREE_OPERAND (arg, 0)),
2102 invert_truthvalue (TREE_OPERAND (arg, 1)));
2105 return build (TRUTH_AND_EXPR, type,
2106 invert_truthvalue (TREE_OPERAND (arg, 0)),
2107 invert_truthvalue (TREE_OPERAND (arg, 1)));
2109 case TRUTH_XOR_EXPR:
2110 /* Here we can invert either operand. We invert the first operand
2111 unless the second operand is a TRUTH_NOT_EXPR in which case our
2112 result is the XOR of the first operand with the inside of the
2113 negation of the second operand. */
2115 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2116 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2117 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2119 return build (TRUTH_XOR_EXPR, type,
2120 invert_truthvalue (TREE_OPERAND (arg, 0)),
2121 TREE_OPERAND (arg, 1));
2123 case TRUTH_ANDIF_EXPR:
2124 return build (TRUTH_ORIF_EXPR, type,
2125 invert_truthvalue (TREE_OPERAND (arg, 0)),
2126 invert_truthvalue (TREE_OPERAND (arg, 1)));
2128 case TRUTH_ORIF_EXPR:
2129 return build (TRUTH_ANDIF_EXPR, type,
2130 invert_truthvalue (TREE_OPERAND (arg, 0)),
2131 invert_truthvalue (TREE_OPERAND (arg, 1)));
2133 case TRUTH_NOT_EXPR:
2134 return TREE_OPERAND (arg, 0);
2137 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2138 invert_truthvalue (TREE_OPERAND (arg, 1)),
2139 invert_truthvalue (TREE_OPERAND (arg, 2)));
2142 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2143 invert_truthvalue (TREE_OPERAND (arg, 1)));
2145 case NON_LVALUE_EXPR:
2146 return invert_truthvalue (TREE_OPERAND (arg, 0));
2151 return build1 (TREE_CODE (arg), type,
2152 invert_truthvalue (TREE_OPERAND (arg, 0)));
2155 if (! integer_onep (TREE_OPERAND (arg, 1)))
2157 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2163 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2164 operands are another bit-wise operation with a common input. If so,
2165 distribute the bit operations to save an operation and possibly two if
2166 constants are involved. For example, convert
2167 (A | B) & (A | C) into A | (B & C)
2168 Further simplification will occur if B and C are constants.
2170 If this optimization cannot be done, 0 will be returned. */
2173 distribute_bit_expr (code, type, arg0, arg1)
2174 enum tree_code code;
2181 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2182 || TREE_CODE (arg0) == code
2183 || (TREE_CODE (arg0) != BIT_AND_EXPR
2184 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2187 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2189 common = TREE_OPERAND (arg0, 0);
2190 left = TREE_OPERAND (arg0, 1);
2191 right = TREE_OPERAND (arg1, 1);
2193 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2195 common = TREE_OPERAND (arg0, 0);
2196 left = TREE_OPERAND (arg0, 1);
2197 right = TREE_OPERAND (arg1, 0);
2199 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2201 common = TREE_OPERAND (arg0, 1);
2202 left = TREE_OPERAND (arg0, 0);
2203 right = TREE_OPERAND (arg1, 1);
2205 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2207 common = TREE_OPERAND (arg0, 1);
2208 left = TREE_OPERAND (arg0, 0);
2209 right = TREE_OPERAND (arg1, 0);
2214 return fold (build (TREE_CODE (arg0), type, common,
2215 fold (build (code, type, left, right))));
2218 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2219 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2222 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2225 int bitsize, bitpos;
2228 tree result = build (BIT_FIELD_REF, type, inner,
2229 size_int (bitsize), size_int (bitpos));
2231 TREE_UNSIGNED (result) = unsignedp;
2236 /* Optimize a bit-field compare.
2238 There are two cases: First is a compare against a constant and the
2239 second is a comparison of two items where the fields are at the same
2240 bit position relative to the start of a chunk (byte, halfword, word)
2241 large enough to contain it. In these cases we can avoid the shift
2242 implicit in bitfield extractions.
2244 For constants, we emit a compare of the shifted constant with the
2245 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2246 compared. For two fields at the same position, we do the ANDs with the
2247 similar mask and compare the result of the ANDs.
2249 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2250 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2251 are the left and right operands of the comparison, respectively.
2253 If the optimization described above can be done, we return the resulting
2254 tree. Otherwise we return zero. */
2257 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2258 enum tree_code code;
2262 int lbitpos, lbitsize, rbitpos, rbitsize;
2263 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2264 tree type = TREE_TYPE (lhs);
2265 tree signed_type, unsigned_type;
2266 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2267 enum machine_mode lmode, rmode, lnmode, rnmode;
2268 int lunsignedp, runsignedp;
2269 int lvolatilep = 0, rvolatilep = 0;
2270 tree linner, rinner;
2274 /* Get all the information about the extractions being done. If the bit size
2275 if the same as the size of the underlying object, we aren't doing an
2276 extraction at all and so can do nothing. */
2277 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2278 &lunsignedp, &lvolatilep);
2279 if (lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2285 /* If this is not a constant, we can only do something if bit positions,
2286 sizes, and signedness are the same. */
2287 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2288 &rmode, &runsignedp, &rvolatilep);
2290 if (lbitpos != rbitpos || lbitsize != rbitsize
2291 || lunsignedp != runsignedp || offset != 0)
2295 /* See if we can find a mode to refer to this field. We should be able to,
2296 but fail if we can't. */
2297 lnmode = get_best_mode (lbitsize, lbitpos,
2298 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2300 if (lnmode == VOIDmode)
2303 /* Set signed and unsigned types of the precision of this mode for the
2305 signed_type = type_for_mode (lnmode, 0);
2306 unsigned_type = type_for_mode (lnmode, 1);
2310 rnmode = get_best_mode (rbitsize, rbitpos,
2311 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2313 if (rnmode == VOIDmode)
2317 /* Compute the bit position and size for the new reference and our offset
2318 within it. If the new reference is the same size as the original, we
2319 won't optimize anything, so return zero. */
2320 lnbitsize = GET_MODE_BITSIZE (lnmode);
2321 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2322 lbitpos -= lnbitpos;
2323 if (lnbitsize == lbitsize)
2328 rnbitsize = GET_MODE_BITSIZE (rnmode);
2329 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2330 rbitpos -= rnbitpos;
2331 if (rnbitsize == rbitsize)
2335 #if BYTES_BIG_ENDIAN
2336 lbitpos = lnbitsize - lbitsize - lbitpos;
2339 /* Make the mask to be used against the extracted field. */
2340 mask = build_int_2 (~0, ~0);
2341 TREE_TYPE (mask) = unsigned_type;
2342 force_fit_type (mask, 0);
2343 mask = convert (unsigned_type, mask);
2344 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2345 mask = const_binop (RSHIFT_EXPR, mask,
2346 size_int (lnbitsize - lbitsize - lbitpos), 0);
2349 /* If not comparing with constant, just rework the comparison
2351 return build (code, compare_type,
2352 build (BIT_AND_EXPR, unsigned_type,
2353 make_bit_field_ref (linner, unsigned_type,
2354 lnbitsize, lnbitpos, 1),
2356 build (BIT_AND_EXPR, unsigned_type,
2357 make_bit_field_ref (rinner, unsigned_type,
2358 rnbitsize, rnbitpos, 1),
2361 /* Otherwise, we are handling the constant case. See if the constant is too
2362 big for the field. Warn and return a tree of for 0 (false) if so. We do
2363 this not only for its own sake, but to avoid having to test for this
2364 error case below. If we didn't, we might generate wrong code.
2366 For unsigned fields, the constant shifted right by the field length should
2367 be all zero. For signed fields, the high-order bits should agree with
2372 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2373 convert (unsigned_type, rhs),
2374 size_int (lbitsize), 0)))
2376 warning ("comparison is always %s due to width of bitfield",
2377 code == NE_EXPR ? "one" : "zero");
2378 return convert (compare_type,
2380 ? integer_one_node : integer_zero_node));
2385 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2386 size_int (lbitsize - 1), 0);
2387 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2389 warning ("comparison is always %s due to width of bitfield",
2390 code == NE_EXPR ? "one" : "zero");
2391 return convert (compare_type,
2393 ? integer_one_node : integer_zero_node));
2397 /* Single-bit compares should always be against zero. */
2398 if (lbitsize == 1 && ! integer_zerop (rhs))
2400 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2401 rhs = convert (type, integer_zero_node);
2404 /* Make a new bitfield reference, shift the constant over the
2405 appropriate number of bits and mask it with the computed mask
2406 (in case this was a signed field). If we changed it, make a new one. */
2407 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2409 rhs = fold (const_binop (BIT_AND_EXPR,
2410 const_binop (LSHIFT_EXPR,
2411 convert (unsigned_type, rhs),
2412 size_int (lbitpos), 0),
2415 return build (code, compare_type,
2416 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2420 /* Subroutine for fold_truthop: decode a field reference.
2422 If EXP is a comparison reference, we return the innermost reference.
2424 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2425 set to the starting bit number.
2427 If the innermost field can be completely contained in a mode-sized
2428 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2430 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2431 otherwise it is not changed.
2433 *PUNSIGNEDP is set to the signedness of the field.
2435 *PMASK is set to the mask used. This is either contained in a
2436 BIT_AND_EXPR or derived from the width of the field.
2438 Return 0 if this is not a component reference or is one that we can't
2439 do anything with. */
2442 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2445 int *pbitsize, *pbitpos;
2446 enum machine_mode *pmode;
2447 int *punsignedp, *pvolatilep;
2454 /* All the optimizations using this function assume integer fields.
2455 There are problems with FP fields since the type_for_size call
2456 below can fail for, e.g., XFmode. */
2457 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2462 if (TREE_CODE (exp) == BIT_AND_EXPR)
2464 mask = TREE_OPERAND (exp, 1);
2465 exp = TREE_OPERAND (exp, 0);
2466 STRIP_NOPS (exp); STRIP_NOPS (mask);
2467 if (TREE_CODE (mask) != INTEGER_CST)
2471 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2472 && TREE_CODE (exp) != BIT_FIELD_REF)
2475 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2476 punsignedp, pvolatilep);
2477 if (*pbitsize < 0 || offset != 0)
2482 tree unsigned_type = type_for_size (*pbitsize, 1);
2483 int precision = TYPE_PRECISION (unsigned_type);
2485 mask = build_int_2 (~0, ~0);
2486 TREE_TYPE (mask) = unsigned_type;
2487 force_fit_type (mask, 0);
2488 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2489 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2496 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2500 all_ones_mask_p (mask, size)
2504 tree type = TREE_TYPE (mask);
2505 int precision = TYPE_PRECISION (type);
2508 tmask = build_int_2 (~0, ~0);
2509 TREE_TYPE (tmask) = signed_type (type);
2510 force_fit_type (tmask, 0);
2512 operand_equal_p (mask,
2513 const_binop (RSHIFT_EXPR,
2514 const_binop (LSHIFT_EXPR, tmask,
2515 size_int (precision - size), 0),
2516 size_int (precision - size), 0),
2520 /* Subroutine for fold_truthop: determine if an operand is simple enough
2521 to be evaluated unconditionally. */
2524 simple_operand_p (exp)
2527 /* Strip any conversions that don't change the machine mode. */
2528 while ((TREE_CODE (exp) == NOP_EXPR
2529 || TREE_CODE (exp) == CONVERT_EXPR)
2530 && (TYPE_MODE (TREE_TYPE (exp))
2531 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2532 exp = TREE_OPERAND (exp, 0);
2534 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2535 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2536 && ! TREE_ADDRESSABLE (exp)
2537 && ! TREE_THIS_VOLATILE (exp)
2538 && ! DECL_NONLOCAL (exp)
2539 /* Don't regard global variables as simple. They may be
2540 allocated in ways unknown to the compiler (shared memory,
2541 #pragma weak, etc). */
2542 && ! TREE_PUBLIC (exp)
2543 && ! DECL_EXTERNAL (exp)
2544 /* Loading a static variable is unduly expensive, but global
2545 registers aren't expensive. */
2546 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2549 /* Subroutine for fold_truthop: try to optimize a range test.
2551 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2553 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2554 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2555 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2558 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2559 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2560 larger than HI_CST (they may be equal).
2562 We return the simplified tree or 0 if no optimization is possible. */
2565 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2566 enum tree_code jcode, lo_code, hi_code;
2567 tree type, var, lo_cst, hi_cst;
2570 enum tree_code rcode;
2572 /* See if this is a range test and normalize the constant terms. */
2574 if (jcode == TRUTH_AND_EXPR)
2579 /* See if we have VAR != CST && VAR != CST+1. */
2580 if (! (hi_code == NE_EXPR
2581 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2582 && tree_int_cst_equal (integer_one_node,
2583 const_binop (MINUS_EXPR,
2584 hi_cst, lo_cst, 0))))
2592 if (hi_code == LT_EXPR)
2593 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2594 else if (hi_code != LE_EXPR)
2597 if (lo_code == GT_EXPR)
2598 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2600 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2613 /* See if we have VAR == CST || VAR == CST+1. */
2614 if (! (hi_code == EQ_EXPR
2615 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2616 && tree_int_cst_equal (integer_one_node,
2617 const_binop (MINUS_EXPR,
2618 hi_cst, lo_cst, 0))))
2626 if (hi_code == GE_EXPR)
2627 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2628 else if (hi_code != GT_EXPR)
2631 if (lo_code == LE_EXPR)
2632 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2634 /* We now have VAR < LO_CST || VAR > HI_CST. */
2643 /* When normalizing, it is possible to both increment the smaller constant
2644 and decrement the larger constant. See if they are still ordered. */
2645 if (tree_int_cst_lt (hi_cst, lo_cst))
2648 /* Fail if VAR isn't an integer. */
2649 utype = TREE_TYPE (var);
2650 if (! INTEGRAL_TYPE_P (utype))
2653 /* The range test is invalid if subtracting the two constants results
2654 in overflow. This can happen in traditional mode. */
2655 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2656 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2659 if (! TREE_UNSIGNED (utype))
2661 utype = unsigned_type (utype);
2662 var = convert (utype, var);
2663 lo_cst = convert (utype, lo_cst);
2664 hi_cst = convert (utype, hi_cst);
2667 return fold (convert (type,
2668 build (rcode, utype,
2669 build (MINUS_EXPR, utype, var, lo_cst),
2670 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2673 /* Find ways of folding logical expressions of LHS and RHS:
2674 Try to merge two comparisons to the same innermost item.
2675 Look for range tests like "ch >= '0' && ch <= '9'".
2676 Look for combinations of simple terms on machines with expensive branches
2677 and evaluate the RHS unconditionally.
2679 For example, if we have p->a == 2 && p->b == 4 and we can make an
2680 object large enough to span both A and B, we can do this with a comparison
2681 against the object ANDed with the a mask.
2683 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2684 operations to do this with one comparison.
2686 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2687 function and the one above.
2689 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2690 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2692 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2695 We return the simplified tree or 0 if no optimization is possible. */
2698 fold_truthop (code, truth_type, lhs, rhs)
2699 enum tree_code code;
2700 tree truth_type, lhs, rhs;
2702 /* If this is the "or" of two comparisons, we can do something if we
2703 the comparisons are NE_EXPR. If this is the "and", we can do something
2704 if the comparisons are EQ_EXPR. I.e.,
2705 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2707 WANTED_CODE is this operation code. For single bit fields, we can
2708 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2709 comparison for one-bit fields. */
2711 enum tree_code wanted_code;
2712 enum tree_code lcode, rcode;
2713 tree ll_arg, lr_arg, rl_arg, rr_arg;
2714 tree ll_inner, lr_inner, rl_inner, rr_inner;
2715 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2716 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2717 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2718 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2719 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2720 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2721 enum machine_mode lnmode, rnmode;
2722 tree ll_mask, lr_mask, rl_mask, rr_mask;
2723 tree l_const, r_const;
2725 int first_bit, end_bit;
2728 /* Start by getting the comparison codes and seeing if this looks like
2729 a range test. Fail if anything is volatile. If one operand is a
2730 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2733 if (TREE_SIDE_EFFECTS (lhs)
2734 || TREE_SIDE_EFFECTS (rhs))
2737 lcode = TREE_CODE (lhs);
2738 rcode = TREE_CODE (rhs);
2740 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2741 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2743 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2744 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2746 if (TREE_CODE_CLASS (lcode) != '<'
2747 || TREE_CODE_CLASS (rcode) != '<')
2750 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2751 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2753 ll_arg = TREE_OPERAND (lhs, 0);
2754 lr_arg = TREE_OPERAND (lhs, 1);
2755 rl_arg = TREE_OPERAND (rhs, 0);
2756 rr_arg = TREE_OPERAND (rhs, 1);
2758 if (TREE_CODE (lr_arg) == INTEGER_CST
2759 && TREE_CODE (rr_arg) == INTEGER_CST
2760 && operand_equal_p (ll_arg, rl_arg, 0))
2762 if (tree_int_cst_lt (lr_arg, rr_arg))
2763 result = range_test (code, truth_type, lcode, rcode,
2764 ll_arg, lr_arg, rr_arg);
2766 result = range_test (code, truth_type, rcode, lcode,
2767 ll_arg, rr_arg, lr_arg);
2769 /* If this isn't a range test, it also isn't a comparison that
2770 can be merged. However, it wins to evaluate the RHS unconditionally
2771 on machines with expensive branches. */
2773 if (result == 0 && BRANCH_COST >= 2)
2775 if (TREE_CODE (ll_arg) != VAR_DECL
2776 && TREE_CODE (ll_arg) != PARM_DECL)
2778 /* Avoid evaluating the variable part twice. */
2779 ll_arg = save_expr (ll_arg);
2780 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2781 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2783 return build (code, truth_type, lhs, rhs);
2788 /* If the RHS can be evaluated unconditionally and its operands are
2789 simple, it wins to evaluate the RHS unconditionally on machines
2790 with expensive branches. In this case, this isn't a comparison
2791 that can be merged. */
2793 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2794 are with zero (tmw). */
2796 if (BRANCH_COST >= 2
2797 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2798 && simple_operand_p (rl_arg)
2799 && simple_operand_p (rr_arg))
2800 return build (code, truth_type, lhs, rhs);
2802 /* See if the comparisons can be merged. Then get all the parameters for
2805 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2806 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2810 ll_inner = decode_field_reference (ll_arg,
2811 &ll_bitsize, &ll_bitpos, &ll_mode,
2812 &ll_unsignedp, &volatilep, &ll_mask);
2813 lr_inner = decode_field_reference (lr_arg,
2814 &lr_bitsize, &lr_bitpos, &lr_mode,
2815 &lr_unsignedp, &volatilep, &lr_mask);
2816 rl_inner = decode_field_reference (rl_arg,
2817 &rl_bitsize, &rl_bitpos, &rl_mode,
2818 &rl_unsignedp, &volatilep, &rl_mask);
2819 rr_inner = decode_field_reference (rr_arg,
2820 &rr_bitsize, &rr_bitpos, &rr_mode,
2821 &rr_unsignedp, &volatilep, &rr_mask);
2823 /* It must be true that the inner operation on the lhs of each
2824 comparison must be the same if we are to be able to do anything.
2825 Then see if we have constants. If not, the same must be true for
2827 if (volatilep || ll_inner == 0 || rl_inner == 0
2828 || ! operand_equal_p (ll_inner, rl_inner, 0))
2831 if (TREE_CODE (lr_arg) == INTEGER_CST
2832 && TREE_CODE (rr_arg) == INTEGER_CST)
2833 l_const = lr_arg, r_const = rr_arg;
2834 else if (lr_inner == 0 || rr_inner == 0
2835 || ! operand_equal_p (lr_inner, rr_inner, 0))
2838 l_const = r_const = 0;
2840 /* If either comparison code is not correct for our logical operation,
2841 fail. However, we can convert a one-bit comparison against zero into
2842 the opposite comparison against that bit being set in the field. */
2844 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2845 if (lcode != wanted_code)
2847 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2853 if (rcode != wanted_code)
2855 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2861 /* See if we can find a mode that contains both fields being compared on
2862 the left. If we can't, fail. Otherwise, update all constants and masks
2863 to be relative to a field of that size. */
2864 first_bit = MIN (ll_bitpos, rl_bitpos);
2865 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2866 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2867 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2869 if (lnmode == VOIDmode)
2872 lnbitsize = GET_MODE_BITSIZE (lnmode);
2873 lnbitpos = first_bit & ~ (lnbitsize - 1);
2874 type = type_for_size (lnbitsize, 1);
2875 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2877 #if BYTES_BIG_ENDIAN
2878 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2879 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2882 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2883 size_int (xll_bitpos), 0);
2884 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2885 size_int (xrl_bitpos), 0);
2887 /* Make sure the constants are interpreted as unsigned, so we
2888 don't have sign bits outside the range of their type. */
2892 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2893 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2894 size_int (xll_bitpos), 0);
2898 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2899 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2900 size_int (xrl_bitpos), 0);
2903 /* If the right sides are not constant, do the same for it. Also,
2904 disallow this optimization if a size or signedness mismatch occurs
2905 between the left and right sides. */
2908 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2909 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2910 /* Make sure the two fields on the right
2911 correspond to the left without being swapped. */
2912 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2915 first_bit = MIN (lr_bitpos, rr_bitpos);
2916 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2917 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2918 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2920 if (rnmode == VOIDmode)
2923 rnbitsize = GET_MODE_BITSIZE (rnmode);
2924 rnbitpos = first_bit & ~ (rnbitsize - 1);
2925 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2927 #if BYTES_BIG_ENDIAN
2928 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2929 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2932 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2933 size_int (xlr_bitpos), 0);
2934 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2935 size_int (xrr_bitpos), 0);
2937 /* Make a mask that corresponds to both fields being compared.
2938 Do this for both items being compared. If the masks agree,
2939 we can do this by masking both and comparing the masked
2941 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2942 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2943 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2945 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2946 ll_unsignedp || rl_unsignedp);
2947 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2948 lr_unsignedp || rr_unsignedp);
2949 if (! all_ones_mask_p (ll_mask, lnbitsize))
2951 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2952 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2954 return build (wanted_code, truth_type, lhs, rhs);
2957 /* There is still another way we can do something: If both pairs of
2958 fields being compared are adjacent, we may be able to make a wider
2959 field containing them both. */
2960 if ((ll_bitsize + ll_bitpos == rl_bitpos
2961 && lr_bitsize + lr_bitpos == rr_bitpos)
2962 || (ll_bitpos == rl_bitpos + rl_bitsize
2963 && lr_bitpos == rr_bitpos + rr_bitsize))
2964 return build (wanted_code, truth_type,
2965 make_bit_field_ref (ll_inner, type,
2966 ll_bitsize + rl_bitsize,
2967 MIN (ll_bitpos, rl_bitpos),
2969 make_bit_field_ref (lr_inner, type,
2970 lr_bitsize + rr_bitsize,
2971 MIN (lr_bitpos, rr_bitpos),
2977 /* Handle the case of comparisons with constants. If there is something in
2978 common between the masks, those bits of the constants must be the same.
2979 If not, the condition is always false. Test for this to avoid generating
2980 incorrect code below. */
2981 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2982 if (! integer_zerop (result)
2983 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2984 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2986 if (wanted_code == NE_EXPR)
2988 warning ("`or' of unmatched not-equal tests is always 1");
2989 return convert (truth_type, integer_one_node);
2993 warning ("`and' of mutually exclusive equal-tests is always zero");
2994 return convert (truth_type, integer_zero_node);
2998 /* Construct the expression we will return. First get the component
2999 reference we will make. Unless the mask is all ones the width of
3000 that field, perform the mask operation. Then compare with the
3002 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
3003 ll_unsignedp || rl_unsignedp);
3005 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
3006 if (! all_ones_mask_p (ll_mask, lnbitsize))
3007 result = build (BIT_AND_EXPR, type, result, ll_mask);
3009 return build (wanted_code, truth_type, result,
3010 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
3013 /* Perform constant folding and related simplification of EXPR.
3014 The related simplifications include x*1 => x, x*0 => 0, etc.,
3015 and application of the associative law.
3016 NOP_EXPR conversions may be removed freely (as long as we
3017 are careful not to change the C type of the overall expression)
3018 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3019 but we can constant-fold them if they have constant operands. */
3025 register tree t = expr;
3026 tree t1 = NULL_TREE;
3028 tree type = TREE_TYPE (expr);
3029 register tree arg0, arg1;
3030 register enum tree_code code = TREE_CODE (t);
3034 /* WINS will be nonzero when the switch is done
3035 if all operands are constant. */
3039 /* Return right away if already constant. */
3040 if (TREE_CONSTANT (t))
3042 if (code == CONST_DECL)
3043 return DECL_INITIAL (t);
3047 kind = TREE_CODE_CLASS (code);
3048 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3052 /* Special case for conversion ops that can have fixed point args. */
3053 arg0 = TREE_OPERAND (t, 0);
3055 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3057 STRIP_TYPE_NOPS (arg0);
3059 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3060 subop = TREE_REALPART (arg0);
3064 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3065 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3066 && TREE_CODE (subop) != REAL_CST
3067 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3069 /* Note that TREE_CONSTANT isn't enough:
3070 static var addresses are constant but we can't
3071 do arithmetic on them. */
3074 else if (kind == 'e' || kind == '<'
3075 || kind == '1' || kind == '2' || kind == 'r')
3077 register int len = tree_code_length[(int) code];
3079 for (i = 0; i < len; i++)
3081 tree op = TREE_OPERAND (t, i);
3085 continue; /* Valid for CALL_EXPR, at least. */
3087 if (kind == '<' || code == RSHIFT_EXPR)
3089 /* Signedness matters here. Perhaps we can refine this
3091 STRIP_TYPE_NOPS (op);
3095 /* Strip any conversions that don't change the mode. */
3099 if (TREE_CODE (op) == COMPLEX_CST)
3100 subop = TREE_REALPART (op);
3104 if (TREE_CODE (subop) != INTEGER_CST
3105 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3106 && TREE_CODE (subop) != REAL_CST
3107 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3109 /* Note that TREE_CONSTANT isn't enough:
3110 static var addresses are constant but we can't
3111 do arithmetic on them. */
3121 /* If this is a commutative operation, and ARG0 is a constant, move it
3122 to ARG1 to reduce the number of tests below. */
3123 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3124 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3125 || code == BIT_AND_EXPR)
3126 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3128 tem = arg0; arg0 = arg1; arg1 = tem;
3130 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3131 TREE_OPERAND (t, 1) = tem;
3134 /* Now WINS is set as described above,
3135 ARG0 is the first operand of EXPR,
3136 and ARG1 is the second operand (if it has more than one operand).
3138 First check for cases where an arithmetic operation is applied to a
3139 compound, conditional, or comparison operation. Push the arithmetic
3140 operation inside the compound or conditional to see if any folding
3141 can then be done. Convert comparison to conditional for this purpose.
3142 The also optimizes non-constant cases that used to be done in
3145 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3146 one of the operands is a comparison and the other is either a comparison
3147 or a BIT_AND_EXPR with the constant 1. In that case, the code below
3148 would make the expression more complex. Change it to a
3149 TRUTH_{AND,OR}_EXPR. */
3151 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR)
3152 && ((TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
3153 && (TREE_CODE_CLASS (TREE_CODE (arg1)) == '<'
3154 || (TREE_CODE (arg1) == BIT_AND_EXPR
3155 && integer_onep (TREE_OPERAND (arg1, 1)))))
3156 || (TREE_CODE_CLASS (TREE_CODE (arg1)) == '<'
3157 && (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
3158 || (TREE_CODE (arg0) == BIT_AND_EXPR
3159 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3160 return fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
3163 if (TREE_CODE_CLASS (code) == '1')
3165 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3166 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3167 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3168 else if (TREE_CODE (arg0) == COND_EXPR)
3170 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3171 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3172 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3174 /* If this was a conversion, and all we did was to move into
3175 inside the COND_EXPR, bring it back out. Then return so we
3176 don't get into an infinite recursion loop taking the conversion
3177 out and then back in. */
3179 if ((code == NOP_EXPR || code == CONVERT_EXPR
3180 || code == NON_LVALUE_EXPR)
3181 && TREE_CODE (t) == COND_EXPR
3182 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3183 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3184 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3185 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
3186 t = build1 (code, type,
3188 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3189 TREE_OPERAND (t, 0),
3190 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3191 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3194 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3195 return fold (build (COND_EXPR, type, arg0,
3196 fold (build1 (code, type, integer_one_node)),
3197 fold (build1 (code, type, integer_zero_node))));
3199 else if (TREE_CODE_CLASS (code) == '2'
3200 || TREE_CODE_CLASS (code) == '<')
3202 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3203 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3204 fold (build (code, type,
3205 arg0, TREE_OPERAND (arg1, 1))));
3206 else if (TREE_CODE (arg1) == COND_EXPR
3207 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3209 tree test, true_value, false_value;
3211 if (TREE_CODE (arg1) == COND_EXPR)
3213 test = TREE_OPERAND (arg1, 0);
3214 true_value = TREE_OPERAND (arg1, 1);
3215 false_value = TREE_OPERAND (arg1, 2);
3220 true_value = integer_one_node;
3221 false_value = integer_zero_node;
3224 /* If ARG0 is complex we want to make sure we only evaluate
3225 it once. Though this is only required if it is volatile, it
3226 might be more efficient even if it is not. However, if we
3227 succeed in folding one part to a constant, we do not need
3228 to make this SAVE_EXPR. Since we do this optimization
3229 primarily to see if we do end up with constant and this
3230 SAVE_EXPR interfers with later optimizations, suppressing
3231 it when we can is important. */
3233 if ((TREE_CODE (arg0) != VAR_DECL && TREE_CODE (arg0) != PARM_DECL)
3234 || TREE_SIDE_EFFECTS (arg0))
3236 tree lhs = fold (build (code, type, arg0, true_value));
3237 tree rhs = fold (build (code, type, arg0, false_value));
3239 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3240 return fold (build (COND_EXPR, type, test, lhs, rhs));
3242 arg0 = save_expr (arg0);
3245 test = fold (build (COND_EXPR, type, test,
3246 fold (build (code, type, arg0, true_value)),
3247 fold (build (code, type, arg0, false_value))));
3248 if (TREE_CODE (arg0) == SAVE_EXPR)
3249 return build (COMPOUND_EXPR, type,
3250 convert (void_type_node, arg0), test);
3252 return convert (type, test);
3255 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3256 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3257 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3258 else if (TREE_CODE (arg0) == COND_EXPR
3259 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3261 tree test, true_value, false_value;
3263 if (TREE_CODE (arg0) == COND_EXPR)
3265 test = TREE_OPERAND (arg0, 0);
3266 true_value = TREE_OPERAND (arg0, 1);
3267 false_value = TREE_OPERAND (arg0, 2);
3272 true_value = integer_one_node;
3273 false_value = integer_zero_node;
3276 if ((TREE_CODE (arg1) != VAR_DECL && TREE_CODE (arg1) != PARM_DECL)
3277 || TREE_SIDE_EFFECTS (arg1))
3279 tree lhs = fold (build (code, type, true_value, arg1));
3280 tree rhs = fold (build (code, type, false_value, arg1));
3282 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3283 return fold (build (COND_EXPR, type, test, lhs, rhs));
3285 arg1 = save_expr (arg1);
3288 test = fold (build (COND_EXPR, type, test,
3289 fold (build (code, type, true_value, arg1)),
3290 fold (build (code, type, false_value, arg1))));
3291 if (TREE_CODE (arg1) == SAVE_EXPR)
3292 return build (COMPOUND_EXPR, type,
3293 convert (void_type_node, arg1), test);
3295 return convert (type, test);
3298 else if (TREE_CODE_CLASS (code) == '<'
3299 && TREE_CODE (arg0) == COMPOUND_EXPR)
3300 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3301 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3302 else if (TREE_CODE_CLASS (code) == '<'
3303 && TREE_CODE (arg1) == COMPOUND_EXPR)
3304 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3305 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3317 return fold (DECL_INITIAL (t));
3322 case FIX_TRUNC_EXPR:
3323 /* Other kinds of FIX are not handled properly by fold_convert. */
3324 /* Two conversions in a row are not needed unless:
3325 - the intermediate type is narrower than both initial and final, or
3326 - the intermediate type and innermost type differ in signedness,
3327 and the outermost type is wider than the intermediate, or
3328 - the initial type is a pointer type and the precisions of the
3329 intermediate and final types differ, or
3330 - the final type is a pointer type and the precisions of the
3331 initial and intermediate types differ. */
3332 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3333 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3334 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3335 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3337 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3338 > TYPE_PRECISION (TREE_TYPE (t)))
3339 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3341 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3343 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3344 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3345 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3346 < TYPE_PRECISION (TREE_TYPE (t))))
3347 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3348 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3349 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3351 (TREE_UNSIGNED (TREE_TYPE (t))
3352 && (TYPE_PRECISION (TREE_TYPE (t))
3353 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3354 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3356 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3357 != TYPE_PRECISION (TREE_TYPE (t))))
3358 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3359 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3360 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3361 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3363 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3364 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3365 /* Detect assigning a bitfield. */
3366 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3367 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3369 /* Don't leave an assignment inside a conversion
3370 unless assigning a bitfield. */
3371 tree prev = TREE_OPERAND (t, 0);
3372 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3373 /* First do the assignment, then return converted constant. */
3374 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3380 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3383 return fold_convert (t, arg0);
3385 #if 0 /* This loses on &"foo"[0]. */
3390 /* Fold an expression like: "foo"[2] */
3391 if (TREE_CODE (arg0) == STRING_CST
3392 && TREE_CODE (arg1) == INTEGER_CST
3393 && !TREE_INT_CST_HIGH (arg1)
3394 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3396 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3397 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3398 force_fit_type (t, 0);
3405 TREE_CONSTANT (t) = wins;
3411 if (TREE_CODE (arg0) == INTEGER_CST)
3413 HOST_WIDE_INT low, high;
3414 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3415 TREE_INT_CST_HIGH (arg0),
3417 t = build_int_2 (low, high);
3418 TREE_TYPE (t) = type;
3420 = (TREE_OVERFLOW (arg0)
3421 | force_fit_type (t, overflow));
3422 TREE_CONSTANT_OVERFLOW (t)
3423 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3425 else if (TREE_CODE (arg0) == REAL_CST)
3426 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3427 TREE_TYPE (t) = type;
3429 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3430 return TREE_OPERAND (arg0, 0);
3432 /* Convert - (a - b) to (b - a) for non-floating-point. */
3433 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3434 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3435 TREE_OPERAND (arg0, 0));
3442 if (TREE_CODE (arg0) == INTEGER_CST)
3444 if (! TREE_UNSIGNED (type)
3445 && TREE_INT_CST_HIGH (arg0) < 0)
3447 HOST_WIDE_INT low, high;
3448 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3449 TREE_INT_CST_HIGH (arg0),
3451 t = build_int_2 (low, high);
3452 TREE_TYPE (t) = type;
3454 = (TREE_OVERFLOW (arg0)
3455 | force_fit_type (t, overflow));
3456 TREE_CONSTANT_OVERFLOW (t)
3457 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3460 else if (TREE_CODE (arg0) == REAL_CST)
3462 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3463 t = build_real (type,
3464 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3466 TREE_TYPE (t) = type;
3468 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3469 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3475 if (TREE_CODE (arg0) == INTEGER_CST)
3476 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3477 ~ TREE_INT_CST_HIGH (arg0));
3478 TREE_TYPE (t) = type;
3479 force_fit_type (t, 0);
3480 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3481 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3483 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3484 return TREE_OPERAND (arg0, 0);
3488 /* A + (-B) -> A - B */
3489 if (TREE_CODE (arg1) == NEGATE_EXPR)
3490 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3491 else if (! FLOAT_TYPE_P (type))
3493 if (integer_zerop (arg1))
3494 return non_lvalue (convert (type, arg0));
3496 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3497 with a constant, and the two constants have no bits in common,
3498 we should treat this as a BIT_IOR_EXPR since this may produce more
3500 if (TREE_CODE (arg0) == BIT_AND_EXPR
3501 && TREE_CODE (arg1) == BIT_AND_EXPR
3502 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3503 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3504 && integer_zerop (const_binop (BIT_AND_EXPR,
3505 TREE_OPERAND (arg0, 1),
3506 TREE_OPERAND (arg1, 1), 0)))
3508 code = BIT_IOR_EXPR;
3512 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3513 about the case where C is a constant, just try one of the
3514 four possibilities. */
3516 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3517 && operand_equal_p (TREE_OPERAND (arg0, 1),
3518 TREE_OPERAND (arg1, 1), 0))
3519 return fold (build (MULT_EXPR, type,
3520 fold (build (PLUS_EXPR, type,
3521 TREE_OPERAND (arg0, 0),
3522 TREE_OPERAND (arg1, 0))),
3523 TREE_OPERAND (arg0, 1)));
3525 /* In IEEE floating point, x+0 may not equal x. */
3526 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3527 && real_zerop (arg1))
3528 return non_lvalue (convert (type, arg0));
3530 /* In most languages, can't associate operations on floats
3531 through parentheses. Rather than remember where the parentheses
3532 were, we don't associate floats at all. It shouldn't matter much. */
3533 if (FLOAT_TYPE_P (type))
3535 /* The varsign == -1 cases happen only for addition and subtraction.
3536 It says that the arg that was split was really CON minus VAR.
3537 The rest of the code applies to all associative operations. */
3543 if (split_tree (arg0, code, &var, &con, &varsign))
3547 /* EXPR is (CON-VAR) +- ARG1. */
3548 /* If it is + and VAR==ARG1, return just CONST. */
3549 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3550 return convert (TREE_TYPE (t), con);
3552 /* If ARG0 is a constant, don't change things around;
3553 instead keep all the constant computations together. */
3555 if (TREE_CONSTANT (arg0))
3558 /* Otherwise return (CON +- ARG1) - VAR. */
3559 TREE_SET_CODE (t, MINUS_EXPR);
3560 TREE_OPERAND (t, 1) = var;
3562 = fold (build (code, TREE_TYPE (t), con, arg1));
3566 /* EXPR is (VAR+CON) +- ARG1. */
3567 /* If it is - and VAR==ARG1, return just CONST. */
3568 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3569 return convert (TREE_TYPE (t), con);
3571 /* If ARG0 is a constant, don't change things around;
3572 instead keep all the constant computations together. */
3574 if (TREE_CONSTANT (arg0))
3577 /* Otherwise return VAR +- (ARG1 +- CON). */
3578 TREE_OPERAND (t, 1) = tem
3579 = fold (build (code, TREE_TYPE (t), arg1, con));
3580 TREE_OPERAND (t, 0) = var;
3581 if (integer_zerop (tem)
3582 && (code == PLUS_EXPR || code == MINUS_EXPR))
3583 return convert (type, var);
3584 /* If we have x +/- (c - d) [c an explicit integer]
3585 change it to x -/+ (d - c) since if d is relocatable
3586 then the latter can be a single immediate insn
3587 and the former cannot. */
3588 if (TREE_CODE (tem) == MINUS_EXPR
3589 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3591 tree tem1 = TREE_OPERAND (tem, 1);
3592 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3593 TREE_OPERAND (tem, 0) = tem1;
3595 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3601 if (split_tree (arg1, code, &var, &con, &varsign))
3603 /* EXPR is ARG0 +- (CON +- VAR). */
3604 if (TREE_CODE (t) == MINUS_EXPR
3605 && operand_equal_p (var, arg0, 0))
3607 /* If VAR and ARG0 cancel, return just CON or -CON. */
3608 if (code == PLUS_EXPR)
3609 return convert (TREE_TYPE (t), con);
3610 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3611 convert (TREE_TYPE (t), con)));
3613 if (TREE_CONSTANT (arg1))
3617 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3619 = fold (build (code, TREE_TYPE (t), arg0, con));
3620 TREE_OPERAND (t, 1) = var;
3621 if (integer_zerop (TREE_OPERAND (t, 0))
3622 && TREE_CODE (t) == PLUS_EXPR)
3623 return convert (TREE_TYPE (t), var);
3628 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3629 if (TREE_CODE (arg1) == REAL_CST)
3631 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3633 t1 = const_binop (code, arg0, arg1, 0);
3634 if (t1 != NULL_TREE)
3636 /* The return value should always have
3637 the same type as the original expression. */
3638 TREE_TYPE (t1) = TREE_TYPE (t);
3644 if (! FLOAT_TYPE_P (type))
3646 if (! wins && integer_zerop (arg0))
3647 return build1 (NEGATE_EXPR, type, arg1);
3648 if (integer_zerop (arg1))
3649 return non_lvalue (convert (type, arg0));
3651 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3652 about the case where C is a constant, just try one of the
3653 four possibilities. */
3655 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3656 && operand_equal_p (TREE_OPERAND (arg0, 1),
3657 TREE_OPERAND (arg1, 1), 0))
3658 return fold (build (MULT_EXPR, type,
3659 fold (build (MINUS_EXPR, type,
3660 TREE_OPERAND (arg0, 0),
3661 TREE_OPERAND (arg1, 0))),
3662 TREE_OPERAND (arg0, 1)));
3664 /* Convert A - (-B) to A + B. */
3665 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3666 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3667 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
3669 /* Except with IEEE floating point, 0-x equals -x. */
3670 if (! wins && real_zerop (arg0))
3671 return build1 (NEGATE_EXPR, type, arg1);
3672 /* Except with IEEE floating point, x-0 equals x. */
3673 if (real_zerop (arg1))
3674 return non_lvalue (convert (type, arg0));
3676 /* Fold &x - &x. This can happen from &x.foo - &x.
3677 This is unsafe for certain floats even in non-IEEE formats.
3678 In IEEE, it is unsafe because it does wrong for NaNs.
3679 Also note that operand_equal_p is always false if an operand
3682 if (operand_equal_p (arg0, arg1, FLOAT_TYPE_P (type)))
3683 return convert (type, integer_zero_node);
3688 if (! FLOAT_TYPE_P (type))
3690 if (integer_zerop (arg1))
3691 return omit_one_operand (type, arg1, arg0);
3692 if (integer_onep (arg1))
3693 return non_lvalue (convert (type, arg0));
3695 /* (a * (1 << b)) is (a << b) */
3696 if (TREE_CODE (arg1) == LSHIFT_EXPR
3697 && integer_onep (TREE_OPERAND (arg1, 0)))
3698 return fold (build (LSHIFT_EXPR, type, arg0,
3699 TREE_OPERAND (arg1, 1)));
3700 if (TREE_CODE (arg0) == LSHIFT_EXPR
3701 && integer_onep (TREE_OPERAND (arg0, 0)))
3702 return fold (build (LSHIFT_EXPR, type, arg1,
3703 TREE_OPERAND (arg0, 1)));
3707 /* x*0 is 0, except for IEEE floating point. */
3708 if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3709 && real_zerop (arg1))
3710 return omit_one_operand (type, arg1, arg0);
3711 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3712 However, ANSI says we can drop signals,
3713 so we can do this anyway. */
3714 if (real_onep (arg1))
3715 return non_lvalue (convert (type, arg0));
3717 if (! wins && real_twop (arg1))
3719 tree arg = save_expr (arg0);
3720 return build (PLUS_EXPR, type, arg, arg);
3727 if (integer_all_onesp (arg1))
3728 return omit_one_operand (type, arg1, arg0);
3729 if (integer_zerop (arg1))
3730 return non_lvalue (convert (type, arg0));
3731 t1 = distribute_bit_expr (code, type, arg0, arg1);
3732 if (t1 != NULL_TREE)
3735 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3736 is a rotate of A by C1 bits. */
3738 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3739 || TREE_CODE (arg0) == LSHIFT_EXPR)
3740 && (TREE_CODE (arg1) == RSHIFT_EXPR
3741 || TREE_CODE (arg1) == LSHIFT_EXPR)
3742 && TREE_CODE (arg0) != TREE_CODE (arg1)
3743 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3744 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3745 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3746 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3747 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3748 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3749 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3750 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3751 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3752 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3753 TREE_CODE (arg0) == LSHIFT_EXPR
3754 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3759 if (integer_zerop (arg1))
3760 return non_lvalue (convert (type, arg0));
3761 if (integer_all_onesp (arg1))
3762 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3767 if (integer_all_onesp (arg1))
3768 return non_lvalue (convert (type, arg0));
3769 if (integer_zerop (arg1))
3770 return omit_one_operand (type, arg1, arg0);
3771 t1 = distribute_bit_expr (code, type, arg0, arg1);
3772 if (t1 != NULL_TREE)
3774 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3775 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3776 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3778 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3779 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3780 && (~TREE_INT_CST_LOW (arg0)
3781 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3782 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3784 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3785 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3787 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3788 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3789 && (~TREE_INT_CST_LOW (arg1)
3790 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3791 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3795 case BIT_ANDTC_EXPR:
3796 if (integer_all_onesp (arg0))
3797 return non_lvalue (convert (type, arg1));
3798 if (integer_zerop (arg0))
3799 return omit_one_operand (type, arg0, arg1);
3800 if (TREE_CODE (arg1) == INTEGER_CST)
3802 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3803 code = BIT_AND_EXPR;
3808 case TRUNC_DIV_EXPR:
3809 case ROUND_DIV_EXPR:
3810 case FLOOR_DIV_EXPR:
3812 case EXACT_DIV_EXPR:
3814 if (integer_onep (arg1))
3815 return non_lvalue (convert (type, arg0));
3816 if (integer_zerop (arg1))
3819 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
3820 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
3821 expressions, which often appear in the offsets or sizes of
3822 objects with a varying size. Only deal with positive divisors
3825 Look for NOPs and SAVE_EXPRs inside. */
3827 if (TREE_CODE (arg1) == INTEGER_CST
3828 && tree_int_cst_lt (integer_zero_node, arg1))
3830 int have_save_expr = 0;
3831 tree c2 = integer_zero_node;
3834 if (TREE_CODE (xarg0) == SAVE_EXPR)
3835 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3839 if (TREE_CODE (xarg0) == PLUS_EXPR
3840 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3841 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
3842 else if (TREE_CODE (xarg0) == MINUS_EXPR
3843 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3845 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
3846 xarg0 = TREE_OPERAND (xarg0, 0);
3849 if (TREE_CODE (xarg0) == SAVE_EXPR)
3850 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3854 if (TREE_CODE (xarg0) == MULT_EXPR
3855 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3856 && tree_int_cst_lt (integer_zero_node, TREE_OPERAND (xarg0, 1))
3857 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
3858 TREE_OPERAND (xarg0, 1), arg1, 1))
3859 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
3860 TREE_OPERAND (xarg0, 1), 1))))
3862 tree outer_div = integer_one_node;
3863 tree c1 = TREE_OPERAND (xarg0, 1);
3866 /* If C3 > C1, set them equal and do a divide by
3867 C3/C1 at the end of the operation. */
3868 if (tree_int_cst_lt (c1, c3))
3869 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
3871 /* The result is A * (C1/C3) + (C2/C3). */
3872 t = fold (build (PLUS_EXPR, type,
3873 fold (build (MULT_EXPR, type,
3874 TREE_OPERAND (xarg0, 0),
3875 const_binop (code, c1, c3, 1))),
3876 const_binop (code, c2, c3, 1)));
3878 if (! integer_onep (outer_div))
3879 t = fold (build (code, type, t, outer_div));
3888 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3889 #ifndef REAL_INFINITY
3890 if (TREE_CODE (arg1) == REAL_CST
3891 && real_zerop (arg1))
3894 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3899 case FLOOR_MOD_EXPR:
3900 case ROUND_MOD_EXPR:
3901 case TRUNC_MOD_EXPR:
3902 if (integer_onep (arg1))
3903 return omit_one_operand (type, integer_zero_node, arg0);
3904 if (integer_zerop (arg1))
3907 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
3908 where C1 % C3 == 0. Handle similarly to the division case,
3909 but don't bother with SAVE_EXPRs. */
3911 if (TREE_CODE (arg1) == INTEGER_CST
3912 && ! integer_zerop (arg1))
3914 tree c2 = integer_zero_node;
3917 if (TREE_CODE (xarg0) == PLUS_EXPR
3918 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3919 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
3920 else if (TREE_CODE (xarg0) == MINUS_EXPR
3921 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3923 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
3924 xarg0 = TREE_OPERAND (xarg0, 0);
3929 if (TREE_CODE (xarg0) == MULT_EXPR
3930 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3931 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
3932 TREE_OPERAND (xarg0, 1),
3934 /* The result is (C2%C3). */
3935 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
3936 TREE_OPERAND (xarg0, 0));
3945 if (integer_zerop (arg1))
3946 return non_lvalue (convert (type, arg0));
3947 /* Since negative shift count is not well-defined,
3948 don't try to compute it in the compiler. */
3949 if (tree_int_cst_lt (arg1, integer_zero_node))
3954 if (operand_equal_p (arg0, arg1, 0))
3956 if (INTEGRAL_TYPE_P (type)
3957 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
3958 return omit_one_operand (type, arg1, arg0);
3962 if (operand_equal_p (arg0, arg1, 0))
3964 if (INTEGRAL_TYPE_P (type)
3965 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
3966 return omit_one_operand (type, arg1, arg0);
3969 case TRUTH_NOT_EXPR:
3970 /* Note that the operand of this must be an int
3971 and its values must be 0 or 1.
3972 ("true" is a fixed value perhaps depending on the language,
3973 but we don't handle values other than 1 correctly yet.) */
3974 return invert_truthvalue (arg0);
3976 case TRUTH_ANDIF_EXPR:
3977 /* Note that the operands of this must be ints
3978 and their values must be 0 or 1.
3979 ("true" is a fixed value perhaps depending on the language.) */
3980 /* If first arg is constant zero, return it. */
3981 if (integer_zerop (arg0))
3983 case TRUTH_AND_EXPR:
3984 /* If either arg is constant true, drop it. */
3985 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
3986 return non_lvalue (arg1);
3987 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
3988 return non_lvalue (arg0);
3989 /* If second arg is constant zero, result is zero, but first arg
3990 must be evaluated. */
3991 if (integer_zerop (arg1))
3992 return omit_one_operand (type, arg1, arg0);
3995 /* Check for the possibility of merging component references. If our
3996 lhs is another similar operation, try to merge its rhs with our
3997 rhs. Then try to merge our lhs and rhs. */
4000 if (TREE_CODE (arg0) == code)
4002 tem = fold_truthop (code, type,
4003 TREE_OPERAND (arg0, 1), arg1);
4005 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4008 tem = fold_truthop (code, type, arg0, arg1);
4014 case TRUTH_ORIF_EXPR:
4015 /* Note that the operands of this must be ints
4016 and their values must be 0 or true.
4017 ("true" is a fixed value perhaps depending on the language.) */
4018 /* If first arg is constant true, return it. */
4019 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4022 /* If either arg is constant zero, drop it. */
4023 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4024 return non_lvalue (arg1);
4025 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4026 return non_lvalue (arg0);
4027 /* If second arg is constant true, result is true, but we must
4028 evaluate first arg. */
4029 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4030 return omit_one_operand (type, arg1, arg0);
4033 case TRUTH_XOR_EXPR:
4034 /* If either arg is constant zero, drop it. */
4035 if (integer_zerop (arg0))
4036 return non_lvalue (arg1);
4037 if (integer_zerop (arg1))
4038 return non_lvalue (arg0);
4039 /* If either arg is constant true, this is a logical inversion. */
4040 if (integer_onep (arg0))
4041 return non_lvalue (invert_truthvalue (arg1));
4042 if (integer_onep (arg1))
4043 return non_lvalue (invert_truthvalue (arg0));
4052 /* If one arg is a constant integer, put it last. */
4053 if (TREE_CODE (arg0) == INTEGER_CST
4054 && TREE_CODE (arg1) != INTEGER_CST)
4056 TREE_OPERAND (t, 0) = arg1;
4057 TREE_OPERAND (t, 1) = arg0;
4058 arg0 = TREE_OPERAND (t, 0);
4059 arg1 = TREE_OPERAND (t, 1);
4060 code = swap_tree_comparison (code);
4061 TREE_SET_CODE (t, code);
4064 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4065 First, see if one arg is constant; find the constant arg
4066 and the other one. */
4068 tree constop = 0, varop;
4071 if (TREE_CONSTANT (arg1))
4072 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
4073 if (TREE_CONSTANT (arg0))
4074 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
4076 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4078 /* This optimization is invalid for ordered comparisons
4079 if CONST+INCR overflows or if foo+incr might overflow.
4080 This optimization is invalid for floating point due to rounding.
4081 For pointer types we assume overflow doesn't happen. */
4082 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4083 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4084 && (code == EQ_EXPR || code == NE_EXPR)))
4087 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4088 constop, TREE_OPERAND (varop, 1)));
4089 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4090 *constoploc = newconst;
4094 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4096 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4097 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4098 && (code == EQ_EXPR || code == NE_EXPR)))
4101 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4102 constop, TREE_OPERAND (varop, 1)));
4103 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4104 *constoploc = newconst;
4110 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4111 if (TREE_CODE (arg1) == INTEGER_CST
4112 && TREE_CODE (arg0) != INTEGER_CST
4113 && ! tree_int_cst_lt (arg1, integer_one_node))
4115 switch (TREE_CODE (t))
4119 TREE_SET_CODE (t, code);
4120 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4121 TREE_OPERAND (t, 1) = arg1;
4126 TREE_SET_CODE (t, code);
4127 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4128 TREE_OPERAND (t, 1) = arg1;
4132 /* If this is an EQ or NE comparison with zero and ARG0 is
4133 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4134 two operations, but the latter can be done in one less insn
4135 one machine that have only two-operand insns or on which a
4136 constant cannot be the first operand. */
4137 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4138 && TREE_CODE (arg0) == BIT_AND_EXPR)
4140 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4141 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4143 fold (build (code, type,
4144 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4146 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4147 TREE_OPERAND (arg0, 1),
4148 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4149 convert (TREE_TYPE (arg0),
4152 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4153 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4155 fold (build (code, type,
4156 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4158 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4159 TREE_OPERAND (arg0, 0),
4160 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4161 convert (TREE_TYPE (arg0),
4166 /* If this is an NE comparison of zero with an AND of one, remove the
4167 comparison since the AND will give the correct value. */
4168 if (code == NE_EXPR && integer_zerop (arg1)
4169 && TREE_CODE (arg0) == BIT_AND_EXPR
4170 && integer_onep (TREE_OPERAND (arg0, 1)))
4171 return convert (type, arg0);
4173 /* If we have (A & C) == C where C is a power of 2, convert this into
4174 (A & C) != 0. Similarly for NE_EXPR. */
4175 if ((code == EQ_EXPR || code == NE_EXPR)
4176 && TREE_CODE (arg0) == BIT_AND_EXPR
4177 && integer_pow2p (TREE_OPERAND (arg0, 1))
4178 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4179 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4180 arg0, integer_zero_node);
4182 /* Simplify comparison of something with itself. (For IEEE
4183 floating-point, we can only do some of these simplifications.) */
4184 if (operand_equal_p (arg0, arg1, 0))
4191 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4193 t = build_int_2 (1, 0);
4194 TREE_TYPE (t) = type;
4198 TREE_SET_CODE (t, code);
4202 /* For NE, we can only do this simplification if integer. */
4203 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4205 /* ... fall through ... */
4208 t = build_int_2 (0, 0);
4209 TREE_TYPE (t) = type;
4214 /* An unsigned comparison against 0 can be simplified. */
4215 if (integer_zerop (arg1)
4216 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4217 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4218 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4220 switch (TREE_CODE (t))
4224 TREE_SET_CODE (t, NE_EXPR);
4228 TREE_SET_CODE (t, EQ_EXPR);
4231 return omit_one_operand (type,
4232 convert (type, integer_one_node),
4235 return omit_one_operand (type,
4236 convert (type, integer_zero_node),
4241 /* If we are comparing an expression that just has comparisons
4242 of two integer values, arithmetic expressions of those comparisons,
4243 and constants, we can simplify it. There are only three cases
4244 to check: the two values can either be equal, the first can be
4245 greater, or the second can be greater. Fold the expression for
4246 those three values. Since each value must be 0 or 1, we have
4247 eight possibilities, each of which corresponds to the constant 0
4248 or 1 or one of the six possible comparisons.
4250 This handles common cases like (a > b) == 0 but also handles
4251 expressions like ((x > y) - (y > x)) > 0, which supposedly
4252 occur in macroized code. */
4254 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4256 tree cval1 = 0, cval2 = 0;
4258 if (twoval_comparison_p (arg0, &cval1, &cval2)
4259 /* Don't handle degenerate cases here; they should already
4260 have been handled anyway. */
4261 && cval1 != 0 && cval2 != 0
4262 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4263 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4264 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4265 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4266 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4268 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4269 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4271 /* We can't just pass T to eval_subst in case cval1 or cval2
4272 was the same as ARG1. */
4275 = fold (build (code, type,
4276 eval_subst (arg0, cval1, maxval, cval2, minval),
4279 = fold (build (code, type,
4280 eval_subst (arg0, cval1, maxval, cval2, maxval),
4283 = fold (build (code, type,
4284 eval_subst (arg0, cval1, minval, cval2, maxval),
4287 /* All three of these results should be 0 or 1. Confirm they
4288 are. Then use those values to select the proper code
4291 if ((integer_zerop (high_result)
4292 || integer_onep (high_result))
4293 && (integer_zerop (equal_result)
4294 || integer_onep (equal_result))
4295 && (integer_zerop (low_result)
4296 || integer_onep (low_result)))
4298 /* Make a 3-bit mask with the high-order bit being the
4299 value for `>', the next for '=', and the low for '<'. */
4300 switch ((integer_onep (high_result) * 4)
4301 + (integer_onep (equal_result) * 2)
4302 + integer_onep (low_result))
4306 return omit_one_operand (type, integer_zero_node, arg0);
4327 return omit_one_operand (type, integer_one_node, arg0);
4330 return fold (build (code, type, cval1, cval2));
4335 /* If this is a comparison of a field, we may be able to simplify it. */
4336 if ((TREE_CODE (arg0) == COMPONENT_REF
4337 || TREE_CODE (arg0) == BIT_FIELD_REF)
4338 && (code == EQ_EXPR || code == NE_EXPR)
4339 /* Handle the constant case even without -O
4340 to make sure the warnings are given. */
4341 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4343 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4347 /* From here on, the only cases we handle are when the result is
4348 known to be a constant.
4350 To compute GT, swap the arguments and do LT.
4351 To compute GE, do LT and invert the result.
4352 To compute LE, swap the arguments, do LT and invert the result.
4353 To compute NE, do EQ and invert the result.
4355 Therefore, the code below must handle only EQ and LT. */
4357 if (code == LE_EXPR || code == GT_EXPR)
4359 tem = arg0, arg0 = arg1, arg1 = tem;
4360 code = swap_tree_comparison (code);
4363 /* Note that it is safe to invert for real values here because we
4364 will check below in the one case that it matters. */
4367 if (code == NE_EXPR || code == GE_EXPR)
4370 code = invert_tree_comparison (code);
4373 /* Compute a result for LT or EQ if args permit;
4374 otherwise return T. */
4375 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4377 if (code == EQ_EXPR)
4378 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4379 == TREE_INT_CST_LOW (arg1))
4380 && (TREE_INT_CST_HIGH (arg0)
4381 == TREE_INT_CST_HIGH (arg1)),
4384 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4385 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4386 : INT_CST_LT (arg0, arg1)),
4390 /* Assume a nonexplicit constant cannot equal an explicit one,
4391 since such code would be undefined anyway.
4392 Exception: on sysvr4, using #pragma weak,
4393 a label can come out as 0. */
4394 else if (TREE_CODE (arg1) == INTEGER_CST
4395 && !integer_zerop (arg1)
4396 && TREE_CONSTANT (arg0)
4397 && TREE_CODE (arg0) == ADDR_EXPR
4399 t1 = build_int_2 (0, 0);
4401 /* Two real constants can be compared explicitly. */
4402 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4404 /* If either operand is a NaN, the result is false with two
4405 exceptions: First, an NE_EXPR is true on NaNs, but that case
4406 is already handled correctly since we will be inverting the
4407 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4408 or a GE_EXPR into a LT_EXPR, we must return true so that it
4409 will be inverted into false. */
4411 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4412 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4413 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4415 else if (code == EQ_EXPR)
4416 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4417 TREE_REAL_CST (arg1)),
4420 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4421 TREE_REAL_CST (arg1)),
4425 if (t1 == NULL_TREE)
4429 TREE_INT_CST_LOW (t1) ^= 1;
4431 TREE_TYPE (t1) = type;
4435 if (TREE_CODE (arg0) == INTEGER_CST)
4436 return TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1));
4437 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4438 return omit_one_operand (type, arg1, arg0);
4440 /* If the second operand is zero, invert the comparison and swap
4441 the second and third operands. Likewise if the second operand
4442 is constant and the third is not or if the third operand is
4443 equivalent to the first operand of the comparison. */
4445 if (integer_zerop (arg1)
4446 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4447 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4448 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4449 TREE_OPERAND (t, 2),
4450 TREE_OPERAND (arg0, 1))))
4452 /* See if this can be inverted. If it can't, possibly because
4453 it was a floating-point inequality comparison, don't do
4455 tem = invert_truthvalue (arg0);
4457 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4459 arg0 = TREE_OPERAND (t, 0) = tem;
4460 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4461 TREE_OPERAND (t, 2) = arg1;
4462 arg1 = TREE_OPERAND (t, 1);
4466 /* If we have A op B ? A : C, we may be able to convert this to a
4467 simpler expression, depending on the operation and the values
4468 of B and C. IEEE floating point prevents this though,
4469 because A or B might be -0.0 or a NaN. */
4471 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4472 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4473 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0))))
4474 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4475 arg1, TREE_OPERAND (arg0, 1)))
4477 tree arg2 = TREE_OPERAND (t, 2);
4478 enum tree_code comp_code = TREE_CODE (arg0);
4480 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4481 depending on the comparison operation. */
4482 if (integer_zerop (TREE_OPERAND (arg0, 1))
4483 && TREE_CODE (arg2) == NEGATE_EXPR
4484 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4488 return fold (build1 (NEGATE_EXPR, type, arg1));
4490 return convert (type, arg1);
4493 return fold (build1 (ABS_EXPR, type, arg1));
4496 return fold (build1 (NEGATE_EXPR, type,
4497 fold (build1 (ABS_EXPR, type, arg1))));
4500 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4503 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4505 if (comp_code == NE_EXPR)
4506 return convert (type, arg1);
4507 else if (comp_code == EQ_EXPR)
4508 return convert (type, integer_zero_node);
4511 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4512 or max (A, B), depending on the operation. */
4514 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4515 arg2, TREE_OPERAND (arg0, 0)))
4519 return convert (type, arg2);
4521 return convert (type, arg1);
4524 return fold (build (MIN_EXPR, type, arg1, arg2));
4527 return fold (build (MAX_EXPR, type, arg1, arg2));
4530 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4531 we might still be able to simplify this. For example,
4532 if C1 is one less or one more than C2, this might have started
4533 out as a MIN or MAX and been transformed by this function.
4534 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4536 if (INTEGRAL_TYPE_P (type)
4537 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4538 && TREE_CODE (arg2) == INTEGER_CST)
4542 /* We can replace A with C1 in this case. */
4543 arg1 = TREE_OPERAND (t, 1)
4544 = convert (type, TREE_OPERAND (arg0, 1));
4548 /* If C1 is C2 + 1, this is min(A, C2). */
4549 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4550 && operand_equal_p (TREE_OPERAND (arg0, 1),
4551 const_binop (PLUS_EXPR, arg2,
4552 integer_one_node, 0), 1))
4553 return fold (build (MIN_EXPR, type, arg1, arg2));
4557 /* If C1 is C2 - 1, this is min(A, C2). */
4558 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4559 && operand_equal_p (TREE_OPERAND (arg0, 1),
4560 const_binop (MINUS_EXPR, arg2,
4561 integer_one_node, 0), 1))
4562 return fold (build (MIN_EXPR, type, arg1, arg2));
4566 /* If C1 is C2 - 1, this is max(A, C2). */
4567 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4568 && operand_equal_p (TREE_OPERAND (arg0, 1),
4569 const_binop (MINUS_EXPR, arg2,
4570 integer_one_node, 0), 1))
4571 return fold (build (MAX_EXPR, type, arg1, arg2));
4575 /* If C1 is C2 + 1, this is max(A, C2). */
4576 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4577 && operand_equal_p (TREE_OPERAND (arg0, 1),
4578 const_binop (PLUS_EXPR, arg2,
4579 integer_one_node, 0), 1))
4580 return fold (build (MAX_EXPR, type, arg1, arg2));
4585 /* Convert A ? 1 : 0 to simply A. */
4586 if (integer_onep (TREE_OPERAND (t, 1))
4587 && integer_zerop (TREE_OPERAND (t, 2))
4588 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4589 call to fold will try to move the conversion inside
4590 a COND, which will recurse. In that case, the COND_EXPR
4591 is probably the best choice, so leave it alone. */
4592 && type == TREE_TYPE (arg0))
4596 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4597 operation is simply A & 2. */
4599 if (integer_zerop (TREE_OPERAND (t, 2))
4600 && TREE_CODE (arg0) == NE_EXPR
4601 && integer_zerop (TREE_OPERAND (arg0, 1))
4602 && integer_pow2p (arg1)
4603 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4604 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4606 return convert (type, TREE_OPERAND (arg0, 0));
4611 /* When pedantic, a compound expression can be neither an lvalue
4612 nor an integer constant expression. */
4613 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4615 /* Don't let (0, 0) be null pointer constant. */
4616 if (integer_zerop (arg1))
4617 return non_lvalue (arg1);
4622 return build_complex (arg0, arg1);
4626 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4628 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4629 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4630 TREE_OPERAND (arg0, 1));
4631 else if (TREE_CODE (arg0) == COMPLEX_CST)
4632 return TREE_REALPART (arg0);
4633 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4634 return fold (build (TREE_CODE (arg0), type,
4635 fold (build1 (REALPART_EXPR, type,
4636 TREE_OPERAND (arg0, 0))),
4637 fold (build1 (REALPART_EXPR,
4638 type, TREE_OPERAND (arg0, 1)))));
4642 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4643 return convert (type, integer_zero_node);
4644 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4645 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4646 TREE_OPERAND (arg0, 0));
4647 else if (TREE_CODE (arg0) == COMPLEX_CST)
4648 return TREE_IMAGPART (arg0);
4649 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4650 return fold (build (TREE_CODE (arg0), type,
4651 fold (build1 (IMAGPART_EXPR, type,
4652 TREE_OPERAND (arg0, 0))),
4653 fold (build1 (IMAGPART_EXPR, type,
4654 TREE_OPERAND (arg0, 1)))));
4659 } /* switch (code) */