1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ Fix lossage on folding division of big integers. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
31 /* The entry points in this file are fold, size_int and size_binop.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'. */
48 /* Handle floating overflow for `const_binop'. */
49 static jmp_buf float_error;
51 void lshift_double ();
52 void rshift_double ();
53 void lrotate_double ();
54 void rrotate_double ();
55 static tree const_binop ();
61 /* Yield nonzero if a signed left shift of A by B bits overflows. */
62 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
64 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
65 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
66 Then this yields nonzero if overflow occurred during the addition.
67 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
68 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
69 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
71 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
72 We do that by representing the two-word integer as MAX_SHORTS shorts,
73 with only 8 bits stored in each short, as a positive number. */
75 /* Unpack a two-word integer into MAX_SHORTS shorts.
76 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
77 SHORTS points to the array of shorts. */
80 encode (shorts, low, hi)
82 HOST_WIDE_INT low, hi;
86 for (i = 0; i < MAX_SHORTS / 2; i++)
88 shorts[i] = (low >> (i * 8)) & 0xff;
89 shorts[i + MAX_SHORTS / 2] = (hi >> (i * 8) & 0xff);
93 /* Pack an array of MAX_SHORTS shorts into a two-word integer.
94 SHORTS points to the array of shorts.
95 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
98 decode (shorts, low, hi)
100 HOST_WIDE_INT *low, *hi;
103 HOST_WIDE_INT lv = 0, hv = 0;
105 for (i = 0; i < MAX_SHORTS / 2; i++)
107 lv |= (HOST_WIDE_INT) shorts[i] << (i * 8);
108 hv |= (HOST_WIDE_INT) shorts[i + MAX_SHORTS / 2] << (i * 8);
114 /* Make the integer constant T valid for its type
115 by setting to 0 or 1 all the bits in the constant
116 that don't belong in the type.
117 Yield 1 if a signed overflow occurs, 0 otherwise.
118 If OVERFLOW is nonzero, a signed overflow has already occurred
119 in calculating T, so propagate it. */
122 force_fit_type (t, overflow)
126 HOST_WIDE_INT low = TREE_INT_CST_LOW (t), high = TREE_INT_CST_HIGH (t);
127 register int prec = TYPE_PRECISION (TREE_TYPE (t));
129 if (TREE_CODE (t) != INTEGER_CST)
132 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
135 /* First clear all bits that are beyond the type's precision. */
137 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
139 else if (prec > HOST_BITS_PER_WIDE_INT)
141 TREE_INT_CST_HIGH (t)
142 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
146 TREE_INT_CST_HIGH (t) = 0;
147 if (prec < HOST_BITS_PER_WIDE_INT)
148 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
151 /* Unsigned types do not suffer sign extension or overflow. */
152 if (TREE_UNSIGNED (TREE_TYPE (t)))
155 /* If the value's sign bit is set, extend the sign. */
156 if (prec != 2 * HOST_BITS_PER_WIDE_INT
157 && (prec > HOST_BITS_PER_WIDE_INT
158 ? (TREE_INT_CST_HIGH (t)
159 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
160 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
162 /* Value is negative:
163 set to 1 all the bits that are outside this type's precision. */
164 if (prec > HOST_BITS_PER_WIDE_INT)
166 TREE_INT_CST_HIGH (t)
167 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
171 TREE_INT_CST_HIGH (t) = -1;
172 if (prec < HOST_BITS_PER_WIDE_INT)
173 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
177 /* Yield nonzero if signed overflow occurred. */
179 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
183 /* Add two doubleword integers with doubleword result.
184 Each argument is given as two `HOST_WIDE_INT' pieces.
185 One argument is L1 and H1; the other, L2 and H2.
186 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
187 We use the 8-shorts representation internally. */
190 add_double (l1, h1, l2, h2, lv, hv)
191 HOST_WIDE_INT l1, h1, l2, h2;
192 HOST_WIDE_INT *lv, *hv;
194 short arg1[MAX_SHORTS];
195 short arg2[MAX_SHORTS];
196 register int carry = 0;
199 encode (arg1, l1, h1);
200 encode (arg2, l2, h2);
202 for (i = 0; i < MAX_SHORTS; i++)
204 carry += arg1[i] + arg2[i];
205 arg1[i] = carry & 0xff;
209 decode (arg1, lv, hv);
210 return overflow_sum_sign (h1, h2, *hv);
213 /* Negate a doubleword integer with doubleword result.
214 Return nonzero if the operation overflows, assuming it's signed.
215 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
216 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
217 We use the 8-shorts representation internally. */
220 neg_double (l1, h1, lv, hv)
221 HOST_WIDE_INT l1, h1;
222 HOST_WIDE_INT *lv, *hv;
228 return (*hv & h1) < 0;
238 /* Multiply two doubleword integers with doubleword result.
239 Return nonzero if the operation overflows, assuming it's signed.
240 Each argument is given as two `HOST_WIDE_INT' pieces.
241 One argument is L1 and H1; the other, L2 and H2.
242 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
243 We use the 8-shorts representation internally. */
246 mul_double (l1, h1, l2, h2, lv, hv)
247 HOST_WIDE_INT l1, h1, l2, h2;
248 HOST_WIDE_INT *lv, *hv;
250 short arg1[MAX_SHORTS];
251 short arg2[MAX_SHORTS];
252 short prod[MAX_SHORTS * 2];
253 register int carry = 0;
254 register int i, j, k;
255 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
257 /* These cases are used extensively, arising from pointer combinations. */
262 int overflow = left_shift_overflows (h1, 1);
263 unsigned HOST_WIDE_INT temp = l1 + l1;
264 *hv = (h1 << 1) + (temp < l1);
270 int overflow = left_shift_overflows (h1, 2);
271 unsigned HOST_WIDE_INT temp = l1 + l1;
272 h1 = (h1 << 2) + ((temp < l1) << 1);
282 int overflow = left_shift_overflows (h1, 3);
283 unsigned HOST_WIDE_INT temp = l1 + l1;
284 h1 = (h1 << 3) + ((temp < l1) << 2);
287 h1 += (temp < l1) << 1;
297 encode (arg1, l1, h1);
298 encode (arg2, l2, h2);
300 bzero (prod, sizeof prod);
302 for (i = 0; i < MAX_SHORTS; i++)
303 for (j = 0; j < MAX_SHORTS; j++)
306 carry = arg1[i] * arg2[j];
310 prod[k] = carry & 0xff;
316 decode (prod, lv, hv); /* This ignores
317 prod[MAX_SHORTS] -> prod[MAX_SHORTS*2-1] */
319 /* Check for overflow by calculating the top half of the answer in full;
320 it should agree with the low half's sign bit. */
321 decode (prod+MAX_SHORTS, &toplow, &tophigh);
324 neg_double (l2, h2, &neglow, &neghigh);
325 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
329 neg_double (l1, h1, &neglow, &neghigh);
330 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
332 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
335 /* Shift the doubleword integer in L1, H1 left by COUNT places
336 keeping only PREC bits of result.
337 Shift right if COUNT is negative.
338 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
339 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
342 lshift_double (l1, h1, count, prec, lv, hv, arith)
343 HOST_WIDE_INT l1, h1;
345 HOST_WIDE_INT *lv, *hv;
348 short arg1[MAX_SHORTS];
354 rshift_double (l1, h1, - count, prec, lv, hv, arith);
358 encode (arg1, l1, h1);
366 for (i = 0; i < MAX_SHORTS; i++)
368 carry += arg1[i] << 1;
369 arg1[i] = carry & 0xff;
375 decode (arg1, lv, hv);
378 /* Shift the doubleword integer in L1, H1 right by COUNT places
379 keeping only PREC bits of result. COUNT must be positive.
380 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
381 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
384 rshift_double (l1, h1, count, prec, lv, hv, arith)
385 HOST_WIDE_INT l1, h1, count, prec;
386 HOST_WIDE_INT *lv, *hv;
389 short arg1[MAX_SHORTS];
393 encode (arg1, l1, h1);
400 carry = arith && arg1[7] >> 7;
401 for (i = MAX_SHORTS - 1; i >= 0; i--)
405 arg1[i] = (carry >> 1) & 0xff;
410 decode (arg1, lv, hv);
413 /* Rotate the doubldword integer in L1, H1 left by COUNT places
414 keeping only PREC bits of result.
415 Rotate right if COUNT is negative.
416 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
419 lrotate_double (l1, h1, count, prec, lv, hv)
420 HOST_WIDE_INT l1, h1, count, prec;
421 HOST_WIDE_INT *lv, *hv;
423 short arg1[MAX_SHORTS];
429 rrotate_double (l1, h1, - count, prec, lv, hv);
433 encode (arg1, l1, h1);
438 carry = arg1[MAX_SHORTS - 1] >> 7;
441 for (i = 0; i < MAX_SHORTS; i++)
443 carry += arg1[i] << 1;
444 arg1[i] = carry & 0xff;
450 decode (arg1, lv, hv);
453 /* Rotate the doubleword integer in L1, H1 left by COUNT places
454 keeping only PREC bits of result. COUNT must be positive.
455 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
458 rrotate_double (l1, h1, count, prec, lv, hv)
459 HOST_WIDE_INT l1, h1, count, prec;
460 HOST_WIDE_INT *lv, *hv;
462 short arg1[MAX_SHORTS];
466 encode (arg1, l1, h1);
474 for (i = MAX_SHORTS - 1; i >= 0; i--)
478 arg1[i] = (carry >> 1) & 0xff;
483 decode (arg1, lv, hv);
486 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
487 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
488 CODE is a tree code for a kind of division, one of
489 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
491 It controls how the quotient is rounded to a integer.
492 Return nonzero if the operation overflows.
493 UNS nonzero says do unsigned division. */
496 div_and_round_double (code, uns,
497 lnum_orig, hnum_orig, lden_orig, hden_orig,
498 lquo, hquo, lrem, hrem)
501 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
502 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
503 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
506 short num[MAX_SHORTS + 1]; /* extra element for scaling. */
507 short den[MAX_SHORTS], quo[MAX_SHORTS];
508 register int i, j, work;
509 register int carry = 0;
510 unsigned HOST_WIDE_INT lnum = lnum_orig;
511 HOST_WIDE_INT hnum = hnum_orig;
512 unsigned HOST_WIDE_INT lden = lden_orig;
513 HOST_WIDE_INT hden = hden_orig;
516 if ((hden == 0) && (lden == 0))
519 /* calculate quotient sign and convert operands to unsigned. */
525 /* (minimum integer) / (-1) is the only overflow case. */
526 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
532 neg_double (lden, hden, &lden, &hden);
536 if (hnum == 0 && hden == 0)
537 { /* single precision */
539 *lquo = lnum / lden; /* rounds toward zero since positive args */
544 { /* trivial case: dividend < divisor */
545 /* hden != 0 already checked. */
552 bzero (quo, sizeof quo);
554 bzero (num, sizeof num); /* to zero 9th element */
555 bzero (den, sizeof den);
557 encode (num, lnum, hnum);
558 encode (den, lden, hden);
560 /* This code requires more than just hden == 0.
561 We also have to require that we don't need more than three bytes
562 to hold CARRY. If we ever did need four bytes to hold it, we
563 would lose part of it when computing WORK on the next round. */
564 if (hden == 0 && ((lden << 8) >> 8) == lden)
565 { /* simpler algorithm */
566 /* hnum != 0 already checked. */
567 for (i = MAX_SHORTS - 1; i >= 0; i--)
569 work = num[i] + (carry << 8);
570 quo[i] = work / lden;
574 else { /* full double precision,
575 with thanks to Don Knuth's
576 "Seminumerical Algorithms". */
578 int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
580 /* Find the highest non-zero divisor digit. */
581 for (i = MAX_SHORTS - 1; ; i--)
586 for (i = MAX_SHORTS - 1; ; i--)
591 quo_hi_sig = num_hi_sig - den_hi_sig + 1;
593 /* Insure that the first digit of the divisor is at least BASE/2.
594 This is required by the quotient digit estimation algorithm. */
596 scale = BASE / (den[den_hi_sig] + 1);
597 if (scale > 1) { /* scale divisor and dividend */
599 for (i = 0; i <= MAX_SHORTS - 1; i++) {
600 work = (num[i] * scale) + carry;
601 num[i] = work & 0xff;
603 if (num[i] != 0) num_hi_sig = i;
606 for (i = 0; i <= MAX_SHORTS - 1; i++) {
607 work = (den[i] * scale) + carry;
608 den[i] = work & 0xff;
610 if (den[i] != 0) den_hi_sig = i;
615 for (i = quo_hi_sig; i > 0; i--) {
616 /* guess the next quotient digit, quo_est, by dividing the first
617 two remaining dividend digits by the high order quotient digit.
618 quo_est is never low and is at most 2 high. */
620 int num_hi; /* index of highest remaining dividend digit */
622 num_hi = i + den_hi_sig;
624 work = (num[num_hi] * BASE) + (num_hi > 0 ? num[num_hi - 1] : 0);
625 if (num[num_hi] != den[den_hi_sig]) {
626 quo_est = work / den[den_hi_sig];
632 /* refine quo_est so it's usually correct, and at most one high. */
633 while ((den[den_hi_sig - 1] * quo_est)
634 > (((work - (quo_est * den[den_hi_sig])) * BASE)
635 + ((num_hi - 1) > 0 ? num[num_hi - 2] : 0)))
638 /* Try QUO_EST as the quotient digit, by multiplying the
639 divisor by QUO_EST and subtracting from the remaining dividend.
640 Keep in mind that QUO_EST is the I - 1st digit. */
644 for (j = 0; j <= den_hi_sig; j++)
648 work = num[i + j - 1] - (quo_est * den[j]) + carry;
656 num[i + j - 1] = digit;
659 /* if quo_est was high by one, then num[i] went negative and
660 we need to correct things. */
665 carry = 0; /* add divisor back in */
666 for (j = 0; j <= den_hi_sig; j++)
668 work = num[i + j - 1] + den[j] + carry;
678 num[i + j - 1] = work;
680 num [num_hi] += carry;
683 /* store the quotient digit. */
684 quo[i - 1] = quo_est;
688 decode (quo, lquo, hquo);
691 /* if result is negative, make it so. */
693 neg_double (*lquo, *hquo, lquo, hquo);
695 /* compute trial remainder: rem = num - (quo * den) */
696 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
697 neg_double (*lrem, *hrem, lrem, hrem);
698 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
703 case TRUNC_MOD_EXPR: /* round toward zero */
704 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
708 case FLOOR_MOD_EXPR: /* round toward negative infinity */
709 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
712 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
715 else return overflow;
719 case CEIL_MOD_EXPR: /* round toward positive infinity */
720 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
722 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
725 else return overflow;
729 case ROUND_MOD_EXPR: /* round to closest integer */
731 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
732 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
734 /* get absolute values */
735 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
736 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
738 /* if (2 * abs (lrem) >= abs (lden)) */
739 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
740 labs_rem, habs_rem, <wice, &htwice);
741 if (((unsigned HOST_WIDE_INT) habs_den
742 < (unsigned HOST_WIDE_INT) htwice)
743 || (((unsigned HOST_WIDE_INT) habs_den
744 == (unsigned HOST_WIDE_INT) htwice)
745 && ((HOST_WIDE_INT unsigned) labs_den
746 < (unsigned HOST_WIDE_INT) ltwice)))
750 add_double (*lquo, *hquo,
751 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
754 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
757 else return overflow;
765 /* compute true remainder: rem = num - (quo * den) */
766 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
767 neg_double (*lrem, *hrem, lrem, hrem);
768 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
772 /* Effectively truncate a real value to represent
773 the nearest possible value in a narrower mode.
774 The result is actually represented in the same data type as the argument,
775 but its value is usually different. */
778 real_value_truncate (mode, arg)
779 enum machine_mode mode;
783 /* Make sure the value is actually stored in memory before we turn off
787 REAL_VALUE_TYPE value;
788 jmp_buf handler, old_handler;
791 if (setjmp (handler))
793 error ("floating overflow");
796 handled = push_float_handler (handler, old_handler);
797 value = REAL_VALUE_TRUNCATE (mode, arg);
798 pop_float_handler (handled, old_handler);
802 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
804 /* Check for infinity in an IEEE double precision number. */
810 /* The IEEE 64-bit double format. */
815 unsigned exponent : 11;
816 unsigned mantissa1 : 20;
821 unsigned mantissa1 : 20;
822 unsigned exponent : 11;
828 if (u.big_endian.sign == 1)
831 return (u.big_endian.exponent == 2047
832 && u.big_endian.mantissa1 == 0
833 && u.big_endian.mantissa2 == 0);
838 return (u.little_endian.exponent == 2047
839 && u.little_endian.mantissa1 == 0
840 && u.little_endian.mantissa2 == 0);
844 /* Check whether an IEEE double precision number is a NaN. */
850 /* The IEEE 64-bit double format. */
855 unsigned exponent : 11;
856 unsigned mantissa1 : 20;
861 unsigned mantissa1 : 20;
862 unsigned exponent : 11;
868 if (u.big_endian.sign == 1)
871 return (u.big_endian.exponent == 2047
872 && (u.big_endian.mantissa1 != 0
873 || u.big_endian.mantissa2 != 0));
878 return (u.little_endian.exponent == 2047
879 && (u.little_endian.mantissa1 != 0
880 || u.little_endian.mantissa2 != 0));
884 /* Check for a negative IEEE double precision number. */
890 /* The IEEE 64-bit double format. */
895 unsigned exponent : 11;
896 unsigned mantissa1 : 20;
901 unsigned mantissa1 : 20;
902 unsigned exponent : 11;
908 if (u.big_endian.sign == 1)
911 return u.big_endian.sign;
916 return u.little_endian.sign;
919 #else /* Target not IEEE */
921 /* Let's assume other float formats don't have infinity.
922 (This can be overridden by redefining REAL_VALUE_ISINF.) */
930 /* Let's assume other float formats don't have NaNs.
931 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
939 /* Let's assume other float formats don't have minus zero.
940 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
947 #endif /* Target not IEEE */
949 /* Split a tree IN into a constant and a variable part
950 that could be combined with CODE to make IN.
951 CODE must be a commutative arithmetic operation.
952 Store the constant part into *CONP and the variable in &VARP.
953 Return 1 if this was done; zero means the tree IN did not decompose
956 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
957 Therefore, we must tell the caller whether the variable part
958 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
959 The value stored is the coefficient for the variable term.
960 The constant term we return should always be added;
961 we negate it if necessary. */
964 split_tree (in, code, varp, conp, varsignp)
970 register tree outtype = TREE_TYPE (in);
974 /* Strip any conversions that don't change the machine mode. */
975 while ((TREE_CODE (in) == NOP_EXPR
976 || TREE_CODE (in) == CONVERT_EXPR)
977 && (TYPE_MODE (TREE_TYPE (in))
978 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
979 in = TREE_OPERAND (in, 0);
981 if (TREE_CODE (in) == code
982 || (TREE_CODE (TREE_TYPE (in)) != REAL_TYPE
983 /* We can associate addition and subtraction together
984 (even though the C standard doesn't say so)
985 for integers because the value is not affected.
986 For reals, the value might be affected, so we can't. */
988 ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
989 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
991 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
992 if (code == INTEGER_CST)
994 *conp = TREE_OPERAND (in, 0);
995 *varp = TREE_OPERAND (in, 1);
996 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
997 && TREE_TYPE (*varp) != outtype)
998 *varp = convert (outtype, *varp);
999 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1002 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
1004 *conp = TREE_OPERAND (in, 1);
1005 *varp = TREE_OPERAND (in, 0);
1007 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1008 && TREE_TYPE (*varp) != outtype)
1009 *varp = convert (outtype, *varp);
1010 if (TREE_CODE (in) == MINUS_EXPR)
1012 /* If operation is subtraction and constant is second,
1013 must negate it to get an additive constant.
1014 And this cannot be done unless it is a manifest constant.
1015 It could also be the address of a static variable.
1016 We cannot negate that, so give up. */
1017 if (TREE_CODE (*conp) == INTEGER_CST)
1018 /* Subtracting from integer_zero_node loses for long long. */
1019 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
1025 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
1027 *conp = TREE_OPERAND (in, 0);
1028 *varp = TREE_OPERAND (in, 1);
1029 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1030 && TREE_TYPE (*varp) != outtype)
1031 *varp = convert (outtype, *varp);
1032 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1039 /* Combine two constants NUM and ARG2 under operation CODE
1040 to produce a new constant.
1041 We assume ARG1 and ARG2 have the same data type,
1042 or at least are the same kind of constant and the same machine mode.
1044 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1047 const_binop (code, arg1, arg2, notrunc)
1048 enum tree_code code;
1049 register tree arg1, arg2;
1052 if (TREE_CODE (arg1) == INTEGER_CST)
1054 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
1055 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
1056 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
1057 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
1058 HOST_WIDE_INT low, hi;
1059 HOST_WIDE_INT garbagel, garbageh;
1061 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1067 t = build_int_2 (int1l | int2l, int1h | int2h);
1071 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1075 t = build_int_2 (int1l & int2l, int1h & int2h);
1078 case BIT_ANDTC_EXPR:
1079 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1085 /* It's unclear from the C standard whether shifts can overflow.
1086 The following code ignores overflow; perhaps a C standard
1087 interpretation ruling is needed. */
1088 lshift_double (int1l, int1h, int2l,
1089 TYPE_PRECISION (TREE_TYPE (arg1)),
1092 t = build_int_2 (low, hi);
1098 lrotate_double (int1l, int1h, int2l,
1099 TYPE_PRECISION (TREE_TYPE (arg1)),
1101 t = build_int_2 (low, hi);
1108 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1111 overflow = int2h < hi;
1113 t = build_int_2 (int2l, int2h);
1119 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1122 overflow = int1h < hi;
1124 t = build_int_2 (int1l, int1h);
1127 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1128 t = build_int_2 (low, hi);
1132 if (int2h == 0 && int2l == 0)
1134 t = build_int_2 (int1l, int1h);
1137 neg_double (int2l, int2h, &low, &hi);
1138 add_double (int1l, int1h, low, hi, &low, &hi);
1139 overflow = overflow_sum_sign (hi, int2h, int1h);
1140 t = build_int_2 (low, hi);
1144 /* Optimize simple cases. */
1147 unsigned HOST_WIDE_INT temp;
1152 t = build_int_2 (0, 0);
1155 t = build_int_2 (int2l, int2h);
1158 overflow = left_shift_overflows (int2h, 1);
1159 temp = int2l + int2l;
1160 int2h = (int2h << 1) + (temp < int2l);
1161 t = build_int_2 (temp, int2h);
1163 #if 0 /* This code can lose carries. */
1165 temp = int2l + int2l + int2l;
1166 int2h = int2h * 3 + (temp < int2l);
1167 t = build_int_2 (temp, int2h);
1171 overflow = left_shift_overflows (int2h, 2);
1172 temp = int2l + int2l;
1173 int2h = (int2h << 2) + ((temp < int2l) << 1);
1176 int2h += (temp < int2l);
1177 t = build_int_2 (temp, int2h);
1180 overflow = left_shift_overflows (int2h, 3);
1181 temp = int2l + int2l;
1182 int2h = (int2h << 3) + ((temp < int2l) << 2);
1185 int2h += (temp < int2l) << 1;
1188 int2h += (temp < int2l);
1189 t = build_int_2 (temp, int2h);
1200 t = build_int_2 (0, 0);
1205 t = build_int_2 (int1l, int1h);
1210 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1211 t = build_int_2 (low, hi);
1214 case TRUNC_DIV_EXPR:
1215 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1216 case EXACT_DIV_EXPR:
1217 /* This is a shortcut for a common special case.
1218 It reduces the number of tree nodes generated
1220 if (int2h == 0 && int2l > 0
1221 && TREE_TYPE (arg1) == sizetype
1222 && int1h == 0 && int1l >= 0)
1224 if (code == CEIL_DIV_EXPR)
1226 return size_int (int1l / int2l);
1228 case ROUND_DIV_EXPR:
1229 if (int2h == 0 && int2l == 1)
1231 t = build_int_2 (int1l, int1h);
1234 if (int1l == int2l && int1h == int2h)
1236 if ((int1l | int1h) == 0)
1238 t = build_int_2 (1, 0);
1241 overflow = div_and_round_double (code, uns,
1242 int1l, int1h, int2l, int2h,
1243 &low, &hi, &garbagel, &garbageh);
1244 t = build_int_2 (low, hi);
1247 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1248 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1249 overflow = div_and_round_double (code, uns,
1250 int1l, int1h, int2l, int2h,
1251 &garbagel, &garbageh, &low, &hi);
1252 t = build_int_2 (low, hi);
1259 low = (((unsigned HOST_WIDE_INT) int1h
1260 < (unsigned HOST_WIDE_INT) int2h)
1261 || (((unsigned HOST_WIDE_INT) int1h
1262 == (unsigned HOST_WIDE_INT) int2h)
1263 && ((unsigned HOST_WIDE_INT) int1l
1264 < (unsigned HOST_WIDE_INT) int2l)));
1268 low = ((int1h < int2h)
1269 || ((int1h == int2h)
1270 && ((unsigned HOST_WIDE_INT) int1l
1271 < (unsigned HOST_WIDE_INT) int2l)));
1273 if (low == (code == MIN_EXPR))
1274 t = build_int_2 (int1l, int1h);
1276 t = build_int_2 (int2l, int2h);
1283 TREE_TYPE (t) = TREE_TYPE (arg1);
1284 TREE_CONSTANT_OVERFLOW (t)
1285 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1286 | TREE_CONSTANT_OVERFLOW (arg1)
1287 | TREE_CONSTANT_OVERFLOW (arg2));
1290 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1291 if (TREE_CODE (arg1) == REAL_CST)
1293 register REAL_VALUE_TYPE d1;
1294 register REAL_VALUE_TYPE d2;
1295 register REAL_VALUE_TYPE value;
1298 d1 = TREE_REAL_CST (arg1);
1299 d2 = TREE_REAL_CST (arg2);
1300 if (setjmp (float_error))
1302 pedwarn ("floating overflow in constant expression");
1303 return build (code, TREE_TYPE (arg1), arg1, arg2);
1305 set_float_handler (float_error);
1307 #ifdef REAL_ARITHMETIC
1308 REAL_ARITHMETIC (value, code, d1, d2);
1325 #ifndef REAL_INFINITY
1334 value = MIN (d1, d2);
1338 value = MAX (d1, d2);
1344 #endif /* no REAL_ARITHMETIC */
1345 t = build_real (TREE_TYPE (arg1),
1346 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1347 set_float_handler (NULL_PTR);
1350 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1351 if (TREE_CODE (arg1) == COMPLEX_CST)
1353 register tree r1 = TREE_REALPART (arg1);
1354 register tree i1 = TREE_IMAGPART (arg1);
1355 register tree r2 = TREE_REALPART (arg2);
1356 register tree i2 = TREE_IMAGPART (arg2);
1362 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1363 const_binop (PLUS_EXPR, i1, i2, notrunc));
1367 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1368 const_binop (MINUS_EXPR, i1, i2, notrunc));
1372 t = build_complex (const_binop (MINUS_EXPR,
1373 const_binop (MULT_EXPR,
1375 const_binop (MULT_EXPR,
1378 const_binop (PLUS_EXPR,
1379 const_binop (MULT_EXPR,
1381 const_binop (MULT_EXPR,
1388 register tree magsquared
1389 = const_binop (PLUS_EXPR,
1390 const_binop (MULT_EXPR, r2, r2, notrunc),
1391 const_binop (MULT_EXPR, i2, i2, notrunc),
1393 t = build_complex (const_binop (RDIV_EXPR,
1394 const_binop (PLUS_EXPR,
1395 const_binop (MULT_EXPR, r1, r2, notrunc),
1396 const_binop (MULT_EXPR, i1, i2, notrunc),
1398 magsquared, notrunc),
1399 const_binop (RDIV_EXPR,
1400 const_binop (MINUS_EXPR,
1401 const_binop (MULT_EXPR, i1, r2, notrunc),
1402 const_binop (MULT_EXPR, r1, i2, notrunc),
1404 magsquared, notrunc));
1411 TREE_TYPE (t) = TREE_TYPE (arg1);
1417 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1421 unsigned int number;
1424 /* Type-size nodes already made for small sizes. */
1425 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1427 if (number >= 0 && number < 2*HOST_BITS_PER_WIDE_INT + 1
1428 && size_table[number] != 0)
1429 return size_table[number];
1430 if (number >= 0 && number < 2*HOST_BITS_PER_WIDE_INT + 1)
1432 push_obstacks_nochange ();
1433 /* Make this a permanent node. */
1434 end_temporary_allocation ();
1435 t = build_int_2 (number, 0);
1436 TREE_TYPE (t) = sizetype;
1437 size_table[number] = t;
1442 t = build_int_2 (number, 0);
1443 TREE_TYPE (t) = sizetype;
1448 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1449 CODE is a tree code. Data type is taken from `sizetype',
1450 If the operands are constant, so is the result. */
1453 size_binop (code, arg0, arg1)
1454 enum tree_code code;
1457 /* Handle the special case of two integer constants faster. */
1458 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1460 /* And some specific cases even faster than that. */
1461 if (code == PLUS_EXPR
1462 && TREE_INT_CST_LOW (arg0) == 0
1463 && TREE_INT_CST_HIGH (arg0) == 0)
1465 if (code == MINUS_EXPR
1466 && TREE_INT_CST_LOW (arg1) == 0
1467 && TREE_INT_CST_HIGH (arg1) == 0)
1469 if (code == MULT_EXPR
1470 && TREE_INT_CST_LOW (arg0) == 1
1471 && TREE_INT_CST_HIGH (arg0) == 0)
1473 /* Handle general case of two integer constants. */
1474 return const_binop (code, arg0, arg1, 1);
1477 if (arg0 == error_mark_node || arg1 == error_mark_node)
1478 return error_mark_node;
1480 return fold (build (code, sizetype, arg0, arg1));
1483 /* Given T, a tree representing type conversion of ARG1, a constant,
1484 return a constant tree representing the result of conversion. */
1487 fold_convert (t, arg1)
1491 register tree type = TREE_TYPE (t);
1493 if (TREE_CODE (type) == POINTER_TYPE
1494 || TREE_CODE (type) == INTEGER_TYPE
1495 || TREE_CODE (type) == ENUMERAL_TYPE)
1497 if (TREE_CODE (arg1) == INTEGER_CST)
1499 /* Given an integer constant, make new constant with new type,
1500 appropriately sign-extended or truncated. */
1501 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1502 TREE_INT_CST_HIGH (arg1));
1503 TREE_TYPE (t) = type;
1504 /* Indicate an overflow if (1) ARG1 already overflowed,
1505 or (2) ARG1 is a too-large unsigned value and T is signed,
1506 or (3) force_fit_type indicates an overflow.
1507 force_fit_type can't detect (2), since it sees only T's type. */
1508 TREE_CONSTANT_OVERFLOW (t) =
1509 (TREE_CONSTANT_OVERFLOW (arg1)
1510 | (TREE_INT_CST_HIGH (arg1) < 0
1511 & TREE_UNSIGNED (type) < TREE_UNSIGNED (TREE_TYPE (arg1)))
1512 | force_fit_type (t, 0));
1514 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1515 else if (TREE_CODE (arg1) == REAL_CST)
1518 l = real_value_from_int_cst (TYPE_MIN_VALUE (type)),
1519 x = TREE_REAL_CST (arg1),
1520 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1521 /* See if X will be in range after truncation towards 0.
1522 To compensate for truncation, move the bounds away from 0,
1523 but reject if X exactly equals the adjusted bounds. */
1524 #ifdef REAL_ARITHMETIC
1525 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1526 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1531 if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1533 pedwarn ("real constant out of range for integer conversion");
1536 #ifndef REAL_ARITHMETIC
1539 HOST_WIDE_INT low, high;
1540 HOST_WIDE_INT half_word
1541 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1543 d = TREE_REAL_CST (arg1);
1547 high = (HOST_WIDE_INT) (d / half_word / half_word);
1548 d -= (REAL_VALUE_TYPE) high * half_word * half_word;
1549 if (d >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1551 low = d - (REAL_VALUE_TYPE) half_word * half_word / 2;
1552 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1555 low = (HOST_WIDE_INT) d;
1556 if (TREE_REAL_CST (arg1) < 0)
1557 neg_double (low, high, &low, &high);
1558 t = build_int_2 (low, high);
1562 HOST_WIDE_INT low, high;
1563 REAL_VALUE_TO_INT (low, high, TREE_REAL_CST (arg1));
1564 t = build_int_2 (low, high);
1567 TREE_TYPE (t) = type;
1568 force_fit_type (t, 0);
1570 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1571 TREE_TYPE (t) = type;
1573 else if (TREE_CODE (type) == REAL_TYPE)
1575 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1576 if (TREE_CODE (arg1) == INTEGER_CST)
1577 return build_real_from_int_cst (type, arg1);
1578 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1579 if (TREE_CODE (arg1) == REAL_CST)
1581 if (setjmp (float_error))
1583 pedwarn ("floating overflow in constant expression");
1586 set_float_handler (float_error);
1588 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1589 TREE_REAL_CST (arg1)));
1590 set_float_handler (NULL_PTR);
1594 TREE_CONSTANT (t) = 1;
1598 /* Return an expr equal to X but certainly not valid as an lvalue.
1599 Also make sure it is not valid as an null pointer constant. */
1607 /* These things are certainly not lvalues. */
1608 if (TREE_CODE (x) == NON_LVALUE_EXPR
1609 || TREE_CODE (x) == INTEGER_CST
1610 || TREE_CODE (x) == REAL_CST
1611 || TREE_CODE (x) == STRING_CST
1612 || TREE_CODE (x) == ADDR_EXPR)
1614 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1616 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1617 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1623 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1624 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1628 /* Given a tree comparison code, return the code that is the logical inverse
1629 of the given code. It is not safe to do this for floating-point
1630 comparisons, except for NE_EXPR and EQ_EXPR. */
1632 static enum tree_code
1633 invert_tree_comparison (code)
1634 enum tree_code code;
1655 /* Similar, but return the comparison that results if the operands are
1656 swapped. This is safe for floating-point. */
1658 static enum tree_code
1659 swap_tree_comparison (code)
1660 enum tree_code code;
1680 /* Return nonzero if two operands are necessarily equal.
1681 If ONLY_CONST is non-zero, only return non-zero for constants.
1682 This function tests whether the operands are indistinguishable;
1683 it does not test whether they are equal using C's == operation.
1684 The distinction is important for IEEE floating point, because
1685 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1686 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1689 operand_equal_p (arg0, arg1, only_const)
1693 /* If both types don't have the same signedness, then we can't consider
1694 them equal. We must check this before the STRIP_NOPS calls
1695 because they may change the signedness of the arguments. */
1696 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1702 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1703 We don't care about side effects in that case because the SAVE_EXPR
1704 takes care of that for us. */
1705 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1706 return ! only_const;
1708 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1711 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1712 && TREE_CODE (arg0) == ADDR_EXPR
1713 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1716 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1717 && TREE_CODE (arg0) == INTEGER_CST
1718 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1719 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1722 /* Detect when real constants are equal. */
1723 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1724 && TREE_CODE (arg0) == REAL_CST)
1725 return !bcmp (&TREE_REAL_CST (arg0), &TREE_REAL_CST (arg1),
1726 sizeof (REAL_VALUE_TYPE));
1734 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1736 /* This is needed for conversions and for COMPONENT_REF.
1737 Might as well play it safe and always test this. */
1738 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1741 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1744 /* Two conversions are equal only if signedness and modes match. */
1745 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1746 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1747 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1750 return operand_equal_p (TREE_OPERAND (arg0, 0),
1751 TREE_OPERAND (arg1, 0), 0);
1755 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1756 TREE_OPERAND (arg1, 0), 0)
1757 && operand_equal_p (TREE_OPERAND (arg0, 1),
1758 TREE_OPERAND (arg1, 1), 0));
1761 switch (TREE_CODE (arg0))
1764 return operand_equal_p (TREE_OPERAND (arg0, 0),
1765 TREE_OPERAND (arg1, 0), 0);
1769 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1770 TREE_OPERAND (arg1, 0), 0)
1771 && operand_equal_p (TREE_OPERAND (arg0, 1),
1772 TREE_OPERAND (arg1, 1), 0));
1775 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1776 TREE_OPERAND (arg1, 0), 0)
1777 && operand_equal_p (TREE_OPERAND (arg0, 1),
1778 TREE_OPERAND (arg1, 1), 0)
1779 && operand_equal_p (TREE_OPERAND (arg0, 2),
1780 TREE_OPERAND (arg1, 2), 0));
1788 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1789 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1791 When in doubt, return 0. */
1794 operand_equal_for_comparison_p (arg0, arg1, other)
1798 int unsignedp1, unsignedpo;
1799 tree primarg1, primother;
1802 if (operand_equal_p (arg0, arg1, 0))
1805 if (TREE_CODE (TREE_TYPE (arg0)) != INTEGER_TYPE)
1808 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1809 actual comparison operand, ARG0.
1811 First throw away any conversions to wider types
1812 already present in the operands. */
1814 primarg1 = get_narrower (arg1, &unsignedp1);
1815 primother = get_narrower (other, &unsignedpo);
1817 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1818 if (unsignedp1 == unsignedpo
1819 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1820 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1822 tree type = TREE_TYPE (arg0);
1824 /* Make sure shorter operand is extended the right way
1825 to match the longer operand. */
1826 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1827 TREE_TYPE (primarg1)),
1830 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1837 /* See if ARG is an expression that is either a comparison or is performing
1838 arithmetic on comparisons. The comparisons must only be comparing
1839 two different values, which will be stored in *CVAL1 and *CVAL2; if
1840 they are non-zero it means that some operands have already been found.
1841 No variables may be used anywhere else in the expression except in the
1844 If this is true, return 1. Otherwise, return zero. */
1847 twoval_comparison_p (arg, cval1, cval2)
1849 tree *cval1, *cval2;
1851 enum tree_code code = TREE_CODE (arg);
1852 char class = TREE_CODE_CLASS (code);
1854 /* We can handle some of the 'e' cases here. */
1856 && (code == TRUTH_NOT_EXPR
1857 || (code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)))
1859 else if (class == 'e'
1860 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1861 || code == COMPOUND_EXPR))
1867 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2);
1870 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2)
1871 && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2));
1877 if (code == COND_EXPR)
1878 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2)
1879 && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2)
1880 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1885 /* First see if we can handle the first operand, then the second. For
1886 the second operand, we know *CVAL1 can't be zero. It must be that
1887 one side of the comparison is each of the values; test for the
1888 case where this isn't true by failing if the two operands
1891 if (operand_equal_p (TREE_OPERAND (arg, 0),
1892 TREE_OPERAND (arg, 1), 0))
1896 *cval1 = TREE_OPERAND (arg, 0);
1897 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1899 else if (*cval2 == 0)
1900 *cval2 = TREE_OPERAND (arg, 0);
1901 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1906 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1908 else if (*cval2 == 0)
1909 *cval2 = TREE_OPERAND (arg, 1);
1910 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1921 /* ARG is a tree that is known to contain just arithmetic operations and
1922 comparisons. Evaluate the operations in the tree substituting NEW0 for
1923 any occurrence of OLD0 as an operand of a comparison and likewise for
1927 eval_subst (arg, old0, new0, old1, new1)
1929 tree old0, new0, old1, new1;
1931 tree type = TREE_TYPE (arg);
1932 enum tree_code code = TREE_CODE (arg);
1933 char class = TREE_CODE_CLASS (code);
1935 /* We can handle some of the 'e' cases here. */
1936 if (class == 'e' && code == TRUTH_NOT_EXPR)
1938 else if (class == 'e'
1939 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1945 return fold (build1 (code, type,
1946 eval_subst (TREE_OPERAND (arg, 0),
1947 old0, new0, old1, new1)));
1950 return fold (build (code, type,
1951 eval_subst (TREE_OPERAND (arg, 0),
1952 old0, new0, old1, new1),
1953 eval_subst (TREE_OPERAND (arg, 1),
1954 old0, new0, old1, new1)));
1960 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1963 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1966 return fold (build (code, type,
1967 eval_subst (TREE_OPERAND (arg, 0),
1968 old0, new0, old1, new1),
1969 eval_subst (TREE_OPERAND (arg, 1),
1970 old0, new0, old1, new1),
1971 eval_subst (TREE_OPERAND (arg, 2),
1972 old0, new0, old1, new1)));
1977 tree arg0 = TREE_OPERAND (arg, 0);
1978 tree arg1 = TREE_OPERAND (arg, 1);
1980 /* We need to check both for exact equality and tree equality. The
1981 former will be true if the operand has a side-effect. In that
1982 case, we know the operand occurred exactly once. */
1984 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1986 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1989 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1991 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1994 return fold (build (code, type, arg0, arg1));
2001 /* Return a tree for the case when the result of an expression is RESULT
2002 converted to TYPE and OMITTED was previously an operand of the expression
2003 but is now not needed (e.g., we folded OMITTED * 0).
2005 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2006 the conversion of RESULT to TYPE. */
2009 omit_one_operand (type, result, omitted)
2010 tree type, result, omitted;
2012 tree t = convert (type, result);
2014 if (TREE_SIDE_EFFECTS (omitted))
2015 return build (COMPOUND_EXPR, type, omitted, t);
2017 return non_lvalue (t);
2020 /* Return a simplified tree node for the truth-negation of ARG. This
2021 never alters ARG itself. We assume that ARG is an operation that
2022 returns a truth value (0 or 1). */
2025 invert_truthvalue (arg)
2028 tree type = TREE_TYPE (arg);
2029 enum tree_code code = TREE_CODE (arg);
2031 /* If this is a comparison, we can simply invert it, except for
2032 floating-point non-equality comparisons, in which case we just
2033 enclose a TRUTH_NOT_EXPR around what we have. */
2035 if (TREE_CODE_CLASS (code) == '<')
2037 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (arg, 0))) == REAL_TYPE
2038 && code != NE_EXPR && code != EQ_EXPR)
2039 return build1 (TRUTH_NOT_EXPR, type, arg);
2041 return build (invert_tree_comparison (code), type,
2042 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2048 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2049 && TREE_INT_CST_HIGH (arg) == 0, 0));
2051 case TRUTH_AND_EXPR:
2052 return build (TRUTH_OR_EXPR, type,
2053 invert_truthvalue (TREE_OPERAND (arg, 0)),
2054 invert_truthvalue (TREE_OPERAND (arg, 1)));
2057 return build (TRUTH_AND_EXPR, type,
2058 invert_truthvalue (TREE_OPERAND (arg, 0)),
2059 invert_truthvalue (TREE_OPERAND (arg, 1)));
2061 case TRUTH_XOR_EXPR:
2062 /* Here we can invert either operand. We invert the first operand
2063 unless the second operand is a TRUTH_NOT_EXPR in which case our
2064 result is the XOR of the first operand with the inside of the
2065 negation of the second operand. */
2067 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2068 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2069 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2071 return build (TRUTH_XOR_EXPR, type,
2072 invert_truthvalue (TREE_OPERAND (arg, 0)),
2073 TREE_OPERAND (arg, 1));
2075 case TRUTH_ANDIF_EXPR:
2076 return build (TRUTH_ORIF_EXPR, type,
2077 invert_truthvalue (TREE_OPERAND (arg, 0)),
2078 invert_truthvalue (TREE_OPERAND (arg, 1)));
2080 case TRUTH_ORIF_EXPR:
2081 return build (TRUTH_ANDIF_EXPR, type,
2082 invert_truthvalue (TREE_OPERAND (arg, 0)),
2083 invert_truthvalue (TREE_OPERAND (arg, 1)));
2085 case TRUTH_NOT_EXPR:
2086 return TREE_OPERAND (arg, 0);
2089 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2090 invert_truthvalue (TREE_OPERAND (arg, 1)),
2091 invert_truthvalue (TREE_OPERAND (arg, 2)));
2094 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2095 invert_truthvalue (TREE_OPERAND (arg, 1)));
2097 case NON_LVALUE_EXPR:
2098 return invert_truthvalue (TREE_OPERAND (arg, 0));
2103 return build1 (TREE_CODE (arg), type,
2104 invert_truthvalue (TREE_OPERAND (arg, 0)));
2107 if (! integer_onep (TREE_OPERAND (arg, 1)))
2109 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2115 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2116 operands are another bit-wise operation with a common input. If so,
2117 distribute the bit operations to save an operation and possibly two if
2118 constants are involved. For example, convert
2119 (A | B) & (A | C) into A | (B & C)
2120 Further simplification will occur if B and C are constants.
2122 If this optimization cannot be done, 0 will be returned. */
2125 distribute_bit_expr (code, type, arg0, arg1)
2126 enum tree_code code;
2133 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2134 || TREE_CODE (arg0) == code
2135 || (TREE_CODE (arg0) != BIT_AND_EXPR
2136 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2139 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2141 common = TREE_OPERAND (arg0, 0);
2142 left = TREE_OPERAND (arg0, 1);
2143 right = TREE_OPERAND (arg1, 1);
2145 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2147 common = TREE_OPERAND (arg0, 0);
2148 left = TREE_OPERAND (arg0, 1);
2149 right = TREE_OPERAND (arg1, 0);
2151 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2153 common = TREE_OPERAND (arg0, 1);
2154 left = TREE_OPERAND (arg0, 0);
2155 right = TREE_OPERAND (arg1, 1);
2157 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2159 common = TREE_OPERAND (arg0, 1);
2160 left = TREE_OPERAND (arg0, 0);
2161 right = TREE_OPERAND (arg1, 0);
2166 return fold (build (TREE_CODE (arg0), type, common,
2167 fold (build (code, type, left, right))));
2170 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2171 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2174 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2177 int bitsize, bitpos;
2180 tree result = build (BIT_FIELD_REF, type, inner,
2181 size_int (bitsize), size_int (bitpos));
2183 TREE_UNSIGNED (result) = unsignedp;
2188 /* Optimize a bit-field compare.
2190 There are two cases: First is a compare against a constant and the
2191 second is a comparison of two items where the fields are at the same
2192 bit position relative to the start of a chunk (byte, halfword, word)
2193 large enough to contain it. In these cases we can avoid the shift
2194 implicit in bitfield extractions.
2196 For constants, we emit a compare of the shifted constant with the
2197 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2198 compared. For two fields at the same position, we do the ANDs with the
2199 similar mask and compare the result of the ANDs.
2201 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2202 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2203 are the left and right operands of the comparison, respectively.
2205 If the optimization described above can be done, we return the resulting
2206 tree. Otherwise we return zero. */
2209 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2210 enum tree_code code;
2214 int lbitpos, lbitsize, rbitpos, rbitsize;
2215 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2216 tree type = TREE_TYPE (lhs);
2217 tree signed_type, unsigned_type;
2218 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2219 enum machine_mode lmode, rmode, lnmode, rnmode;
2220 int lunsignedp, runsignedp;
2221 int lvolatilep = 0, rvolatilep = 0;
2222 tree linner, rinner;
2226 /* Get all the information about the extractions being done. If the bit size
2227 if the same as the size of the underlying object, we aren't doing an
2228 extraction at all and so can do nothing. */
2229 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2230 &lunsignedp, &lvolatilep);
2231 if (lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2237 /* If this is not a constant, we can only do something if bit positions,
2238 sizes, and signedness are the same. */
2239 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2240 &rmode, &runsignedp, &rvolatilep);
2242 if (lbitpos != rbitpos || lbitsize != rbitsize
2243 || lunsignedp != runsignedp || offset != 0)
2247 /* See if we can find a mode to refer to this field. We should be able to,
2248 but fail if we can't. */
2249 lnmode = get_best_mode (lbitsize, lbitpos,
2250 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2252 if (lnmode == VOIDmode)
2255 /* Set signed and unsigned types of the precision of this mode for the
2257 signed_type = type_for_mode (lnmode, 0);
2258 unsigned_type = type_for_mode (lnmode, 1);
2262 rnmode = get_best_mode (rbitsize, rbitpos,
2263 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2265 if (rnmode == VOIDmode)
2269 /* Compute the bit position and size for the new reference and our offset
2270 within it. If the new reference is the same size as the original, we
2271 won't optimize anything, so return zero. */
2272 lnbitsize = GET_MODE_BITSIZE (lnmode);
2273 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2274 lbitpos -= lnbitpos;
2275 if (lnbitsize == lbitsize)
2280 rnbitsize = GET_MODE_BITSIZE (rnmode);
2281 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2282 rbitpos -= rnbitpos;
2283 if (rnbitsize == rbitsize)
2287 #if BYTES_BIG_ENDIAN
2288 lbitpos = lnbitsize - lbitsize - lbitpos;
2291 /* Make the mask to be used against the extracted field. */
2292 mask = convert (unsigned_type, build_int_2 (~0, ~0));
2293 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2294 mask = const_binop (RSHIFT_EXPR, mask,
2295 size_int (lnbitsize - lbitsize - lbitpos), 0);
2298 /* If not comparing with constant, just rework the comparison
2300 return build (code, compare_type,
2301 build (BIT_AND_EXPR, unsigned_type,
2302 make_bit_field_ref (linner, unsigned_type,
2303 lnbitsize, lnbitpos, 1),
2305 build (BIT_AND_EXPR, unsigned_type,
2306 make_bit_field_ref (rinner, unsigned_type,
2307 rnbitsize, rnbitpos, 1),
2310 /* Otherwise, we are handling the constant case. See if the constant is too
2311 big for the field. Warn and return a tree of for 0 (false) if so. We do
2312 this not only for its own sake, but to avoid having to test for this
2313 error case below. If we didn't, we might generate wrong code.
2315 For unsigned fields, the constant shifted right by the field length should
2316 be all zero. For signed fields, the high-order bits should agree with
2321 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2322 convert (unsigned_type, rhs),
2323 size_int (lbitsize), 0)))
2325 warning ("comparison is always %s due to width of bitfield",
2326 code == NE_EXPR ? "one" : "zero");
2327 return convert (compare_type,
2329 ? integer_one_node : integer_zero_node));
2334 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2335 size_int (lbitsize - 1), 0);
2336 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2338 warning ("comparison is always %s due to width of bitfield",
2339 code == NE_EXPR ? "one" : "zero");
2340 return convert (compare_type,
2342 ? integer_one_node : integer_zero_node));
2346 /* Single-bit compares should always be against zero. */
2347 if (lbitsize == 1 && ! integer_zerop (rhs))
2349 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2350 rhs = convert (type, integer_zero_node);
2353 /* Make a new bitfield reference, shift the constant over the
2354 appropriate number of bits and mask it with the computed mask
2355 (in case this was a signed field). If we changed it, make a new one. */
2356 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2358 rhs = fold (const_binop (BIT_AND_EXPR,
2359 const_binop (LSHIFT_EXPR,
2360 convert (unsigned_type, rhs),
2361 size_int (lbitpos)),
2364 return build (code, compare_type,
2365 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2369 /* Subroutine for fold_truthop: decode a field reference.
2371 If EXP is a comparison reference, we return the innermost reference.
2373 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2374 set to the starting bit number.
2376 If the innermost field can be completely contained in a mode-sized
2377 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2379 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2380 otherwise it is not changed.
2382 *PUNSIGNEDP is set to the signedness of the field.
2384 *PMASK is set to the mask used. This is either contained in a
2385 BIT_AND_EXPR or derived from the width of the field.
2387 Return 0 if this is not a component reference or is one that we can't
2388 do anything with. */
2391 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2394 int *pbitsize, *pbitpos;
2395 enum machine_mode *pmode;
2396 int *punsignedp, *pvolatilep;
2405 if (TREE_CODE (exp) == BIT_AND_EXPR)
2407 mask = TREE_OPERAND (exp, 1);
2408 exp = TREE_OPERAND (exp, 0);
2409 STRIP_NOPS (exp); STRIP_NOPS (mask);
2410 if (TREE_CODE (mask) != INTEGER_CST)
2414 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2415 && TREE_CODE (exp) != BIT_FIELD_REF)
2418 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2419 punsignedp, pvolatilep);
2420 if (*pbitsize < 0 || offset != 0)
2425 tree unsigned_type = type_for_size (*pbitsize, 1);
2426 int precision = TYPE_PRECISION (unsigned_type);
2428 mask = convert (unsigned_type, build_int_2 (~0, ~0));
2429 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2430 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2437 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2441 all_ones_mask_p (mask, size)
2445 tree type = TREE_TYPE (mask);
2446 int precision = TYPE_PRECISION (type);
2449 operand_equal_p (mask,
2450 const_binop (RSHIFT_EXPR,
2451 const_binop (LSHIFT_EXPR,
2452 convert (signed_type (type),
2453 build_int_2 (~0, ~0)),
2454 size_int (precision - size), 0),
2455 size_int (precision - size), 0),
2459 /* Subroutine for fold_truthop: determine if an operand is simple enough
2460 to be evaluated unconditionally. */
2466 simple_operand_p (exp)
2469 /* Strip any conversions that don't change the machine mode. */
2470 while ((TREE_CODE (exp) == NOP_EXPR
2471 || TREE_CODE (exp) == CONVERT_EXPR)
2472 && (TYPE_MODE (TREE_TYPE (exp))
2473 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2474 exp = TREE_OPERAND (exp, 0);
2476 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2477 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2478 && ! TREE_ADDRESSABLE (exp)
2479 && ! TREE_THIS_VOLATILE (exp)
2480 && ! DECL_NONLOCAL (exp)
2481 /* Don't regard global variables as simple. They may be
2482 allocated in ways unknown to the compiler (shared memory,
2483 #pragma weak, etc). */
2484 && ! TREE_PUBLIC (exp)
2485 && ! DECL_EXTERNAL (exp)
2486 /* Loading a static variable is unduly expensive, but global
2487 registers aren't expensive. */
2488 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2491 /* Subroutine for fold_truthop: try to optimize a range test.
2493 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2495 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2496 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2497 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2500 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2501 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2502 larger than HI_CST (they may be equal).
2504 We return the simplified tree or 0 if no optimization is possible. */
2507 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2508 enum tree_code jcode, lo_code, hi_code;
2509 tree type, var, lo_cst, hi_cst;
2512 enum tree_code rcode;
2514 /* See if this is a range test and normalize the constant terms. */
2516 if (jcode == TRUTH_AND_EXPR)
2521 /* See if we have VAR != CST && VAR != CST+1. */
2522 if (! (hi_code == NE_EXPR
2523 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2524 && tree_int_cst_equal (integer_one_node,
2525 const_binop (MINUS_EXPR,
2526 hi_cst, lo_cst, 0))))
2534 if (hi_code == LT_EXPR)
2535 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2536 else if (hi_code != LE_EXPR)
2539 if (lo_code == GT_EXPR)
2540 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2542 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2555 /* See if we have VAR == CST || VAR == CST+1. */
2556 if (! (hi_code == EQ_EXPR
2557 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2558 && tree_int_cst_equal (integer_one_node,
2559 const_binop (MINUS_EXPR,
2560 hi_cst, lo_cst, 0))))
2568 if (hi_code == GE_EXPR)
2569 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2570 else if (hi_code != GT_EXPR)
2573 if (lo_code == LE_EXPR)
2574 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2576 /* We now have VAR < LO_CST || VAR > HI_CST. */
2585 /* When normalizing, it is possible to both increment the smaller constant
2586 and decrement the larger constant. See if they are still ordered. */
2587 if (tree_int_cst_lt (hi_cst, lo_cst))
2590 /* Fail if VAR isn't an integer. */
2591 utype = TREE_TYPE (var);
2592 if (TREE_CODE (utype) != INTEGER_TYPE
2593 && TREE_CODE (utype) != ENUMERAL_TYPE)
2596 /* The range test is invalid if subtracting the two constants results
2597 in overflow. This can happen in traditional mode. */
2598 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2599 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2602 if (! TREE_UNSIGNED (utype))
2604 utype = unsigned_type (utype);
2605 var = convert (utype, var);
2606 lo_cst = convert (utype, lo_cst);
2607 hi_cst = convert (utype, hi_cst);
2610 return fold (convert (type,
2611 build (rcode, utype,
2612 build (MINUS_EXPR, utype, var, lo_cst),
2613 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2616 /* Find ways of folding logical expressions of LHS and RHS:
2617 Try to merge two comparisons to the same innermost item.
2618 Look for range tests like "ch >= '0' && ch <= '9'".
2619 Look for combinations of simple terms on machines with expensive branches
2620 and evaluate the RHS unconditionally.
2622 For example, if we have p->a == 2 && p->b == 4 and we can make an
2623 object large enough to span both A and B, we can do this with a comparison
2624 against the object ANDed with the a mask.
2626 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2627 operations to do this with one comparison.
2629 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2630 function and the one above.
2632 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2633 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2635 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2638 We return the simplified tree or 0 if no optimization is possible. */
2641 fold_truthop (code, truth_type, lhs, rhs)
2642 enum tree_code code;
2643 tree truth_type, lhs, rhs;
2645 /* If this is the "or" of two comparisons, we can do something if we
2646 the comparisons are NE_EXPR. If this is the "and", we can do something
2647 if the comparisons are EQ_EXPR. I.e.,
2648 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2650 WANTED_CODE is this operation code. For single bit fields, we can
2651 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2652 comparison for one-bit fields. */
2654 enum tree_code wanted_code;
2655 enum tree_code lcode, rcode;
2656 tree ll_arg, lr_arg, rl_arg, rr_arg;
2657 tree ll_inner, lr_inner, rl_inner, rr_inner;
2658 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2659 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2660 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2661 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2662 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2663 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2664 enum machine_mode lnmode, rnmode;
2665 tree ll_mask, lr_mask, rl_mask, rr_mask;
2666 tree l_const, r_const;
2668 int first_bit, end_bit;
2671 /* Start by getting the comparison codes and seeing if this looks like
2672 a range test. Fail if anything is volatile. */
2674 if (TREE_SIDE_EFFECTS (lhs)
2675 || TREE_SIDE_EFFECTS (rhs))
2678 lcode = TREE_CODE (lhs);
2679 rcode = TREE_CODE (rhs);
2681 if (TREE_CODE_CLASS (lcode) != '<'
2682 || TREE_CODE_CLASS (rcode) != '<')
2685 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2686 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2688 ll_arg = TREE_OPERAND (lhs, 0);
2689 lr_arg = TREE_OPERAND (lhs, 1);
2690 rl_arg = TREE_OPERAND (rhs, 0);
2691 rr_arg = TREE_OPERAND (rhs, 1);
2693 if (TREE_CODE (lr_arg) == INTEGER_CST
2694 && TREE_CODE (rr_arg) == INTEGER_CST
2695 && operand_equal_p (ll_arg, rl_arg, 0))
2697 if (tree_int_cst_lt (lr_arg, rr_arg))
2698 result = range_test (code, truth_type, lcode, rcode,
2699 ll_arg, lr_arg, rr_arg);
2701 result = range_test (code, truth_type, rcode, lcode,
2702 ll_arg, rr_arg, lr_arg);
2704 /* If this isn't a range test, it also isn't a comparison that
2705 can be merged. However, it wins to evaluate the RHS unconditionally
2706 on machines with expensive branches. */
2708 if (result == 0 && BRANCH_COST >= 2)
2710 if (TREE_CODE (ll_arg) != VAR_DECL
2711 && TREE_CODE (ll_arg) != PARM_DECL)
2713 /* Avoid evaluating the variable part twice. */
2714 ll_arg = save_expr (ll_arg);
2715 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2716 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2718 return build (code, truth_type, lhs, rhs);
2723 /* If the RHS can be evaluated unconditionally and its operands are
2724 simple, it wins to evaluate the RHS unconditionally on machines
2725 with expensive branches. In this case, this isn't a comparison
2726 that can be merged. */
2728 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2729 are with zero (tmw). */
2731 if (BRANCH_COST >= 2
2732 && TREE_CODE (TREE_TYPE (rhs)) == INTEGER_TYPE
2733 && simple_operand_p (rl_arg)
2734 && simple_operand_p (rr_arg))
2735 return build (code, truth_type, lhs, rhs);
2737 /* See if the comparisons can be merged. Then get all the parameters for
2740 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2741 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2745 ll_inner = decode_field_reference (ll_arg,
2746 &ll_bitsize, &ll_bitpos, &ll_mode,
2747 &ll_unsignedp, &volatilep, &ll_mask);
2748 lr_inner = decode_field_reference (lr_arg,
2749 &lr_bitsize, &lr_bitpos, &lr_mode,
2750 &lr_unsignedp, &volatilep, &lr_mask);
2751 rl_inner = decode_field_reference (rl_arg,
2752 &rl_bitsize, &rl_bitpos, &rl_mode,
2753 &rl_unsignedp, &volatilep, &rl_mask);
2754 rr_inner = decode_field_reference (rr_arg,
2755 &rr_bitsize, &rr_bitpos, &rr_mode,
2756 &rr_unsignedp, &volatilep, &rr_mask);
2758 /* It must be true that the inner operation on the lhs of each
2759 comparison must be the same if we are to be able to do anything.
2760 Then see if we have constants. If not, the same must be true for
2762 if (volatilep || ll_inner == 0 || rl_inner == 0
2763 || ! operand_equal_p (ll_inner, rl_inner, 0))
2766 if (TREE_CODE (lr_arg) == INTEGER_CST
2767 && TREE_CODE (rr_arg) == INTEGER_CST)
2768 l_const = lr_arg, r_const = rr_arg;
2769 else if (lr_inner == 0 || rr_inner == 0
2770 || ! operand_equal_p (lr_inner, rr_inner, 0))
2773 l_const = r_const = 0;
2775 /* If either comparison code is not correct for our logical operation,
2776 fail. However, we can convert a one-bit comparison against zero into
2777 the opposite comparison against that bit being set in the field. */
2779 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2780 if (lcode != wanted_code)
2782 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2788 if (rcode != wanted_code)
2790 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2796 /* See if we can find a mode that contains both fields being compared on
2797 the left. If we can't, fail. Otherwise, update all constants and masks
2798 to be relative to a field of that size. */
2799 first_bit = MIN (ll_bitpos, rl_bitpos);
2800 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2801 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2802 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2804 if (lnmode == VOIDmode)
2807 lnbitsize = GET_MODE_BITSIZE (lnmode);
2808 lnbitpos = first_bit & ~ (lnbitsize - 1);
2809 type = type_for_size (lnbitsize, 1);
2810 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2812 #if BYTES_BIG_ENDIAN
2813 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2814 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2817 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2818 size_int (xll_bitpos), 0);
2819 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2820 size_int (xrl_bitpos), 0);
2822 /* Make sure the constants are interpreted as unsigned, so we
2823 don't have sign bits outside the range of their type. */
2827 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2828 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2829 size_int (xll_bitpos), 0);
2833 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2834 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2835 size_int (xrl_bitpos), 0);
2838 /* If the right sides are not constant, do the same for it. Also,
2839 disallow this optimization if a size or signedness mismatch occurs
2840 between the left and right sides. */
2843 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2844 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2845 /* Make sure the two fields on the right
2846 correspond to the left without being swapped. */
2847 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2850 first_bit = MIN (lr_bitpos, rr_bitpos);
2851 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2852 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2853 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2855 if (rnmode == VOIDmode)
2858 rnbitsize = GET_MODE_BITSIZE (rnmode);
2859 rnbitpos = first_bit & ~ (rnbitsize - 1);
2860 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2862 #if BYTES_BIG_ENDIAN
2863 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2864 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2867 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2868 size_int (xlr_bitpos), 0);
2869 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2870 size_int (xrr_bitpos), 0);
2872 /* Make a mask that corresponds to both fields being compared.
2873 Do this for both items being compared. If the masks agree,
2874 we can do this by masking both and comparing the masked
2876 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2877 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2878 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2880 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2881 ll_unsignedp || rl_unsignedp);
2882 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2883 lr_unsignedp || rr_unsignedp);
2884 if (! all_ones_mask_p (ll_mask, lnbitsize))
2886 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2887 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2889 return build (wanted_code, truth_type, lhs, rhs);
2892 /* There is still another way we can do something: If both pairs of
2893 fields being compared are adjacent, we may be able to make a wider
2894 field containing them both. */
2895 if ((ll_bitsize + ll_bitpos == rl_bitpos
2896 && lr_bitsize + lr_bitpos == rr_bitpos)
2897 || (ll_bitpos == rl_bitpos + rl_bitsize
2898 && lr_bitpos == rr_bitpos + rr_bitsize))
2899 return build (wanted_code, truth_type,
2900 make_bit_field_ref (ll_inner, type,
2901 ll_bitsize + rl_bitsize,
2902 MIN (ll_bitpos, rl_bitpos),
2904 make_bit_field_ref (lr_inner, type,
2905 lr_bitsize + rr_bitsize,
2906 MIN (lr_bitpos, rr_bitpos),
2912 /* Handle the case of comparisons with constants. If there is something in
2913 common between the masks, those bits of the constants must be the same.
2914 If not, the condition is always false. Test for this to avoid generating
2915 incorrect code below. */
2916 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2917 if (! integer_zerop (result)
2918 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2919 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2921 if (wanted_code == NE_EXPR)
2923 warning ("`or' of unmatched not-equal tests is always 1");
2924 return convert (truth_type, integer_one_node);
2928 warning ("`and' of mutually exclusive equal-tests is always zero");
2929 return convert (truth_type, integer_zero_node);
2933 /* Construct the expression we will return. First get the component
2934 reference we will make. Unless the mask is all ones the width of
2935 that field, perform the mask operation. Then compare with the
2937 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2938 ll_unsignedp || rl_unsignedp);
2940 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2941 if (! all_ones_mask_p (ll_mask, lnbitsize))
2942 result = build (BIT_AND_EXPR, type, result, ll_mask);
2944 return build (wanted_code, truth_type, result,
2945 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
2948 /* Perform constant folding and related simplification of EXPR.
2949 The related simplifications include x*1 => x, x*0 => 0, etc.,
2950 and application of the associative law.
2951 NOP_EXPR conversions may be removed freely (as long as we
2952 are careful not to change the C type of the overall expression)
2953 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
2954 but we can constant-fold them if they have constant operands. */
2960 register tree t = expr;
2961 tree t1 = NULL_TREE;
2963 tree type = TREE_TYPE (expr);
2964 register tree arg0, arg1;
2965 register enum tree_code code = TREE_CODE (t);
2969 /* WINS will be nonzero when the switch is done
2970 if all operands are constant. */
2974 /* Return right away if already constant. */
2975 if (TREE_CONSTANT (t))
2977 if (code == CONST_DECL)
2978 return DECL_INITIAL (t);
2982 kind = TREE_CODE_CLASS (code);
2983 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
2985 /* Special case for conversion ops that can have fixed point args. */
2986 arg0 = TREE_OPERAND (t, 0);
2988 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
2990 STRIP_TYPE_NOPS (arg0);
2992 if (arg0 != 0 && TREE_CODE (arg0) != INTEGER_CST
2993 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
2994 && TREE_CODE (arg0) != REAL_CST
2995 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
2997 /* Note that TREE_CONSTANT isn't enough:
2998 static var addresses are constant but we can't
2999 do arithmetic on them. */
3002 else if (kind == 'e' || kind == '<'
3003 || kind == '1' || kind == '2' || kind == 'r')
3005 register int len = tree_code_length[(int) code];
3007 for (i = 0; i < len; i++)
3009 tree op = TREE_OPERAND (t, i);
3012 continue; /* Valid for CALL_EXPR, at least. */
3014 /* Strip any conversions that don't change the mode. */
3017 if (TREE_CODE (op) != INTEGER_CST
3018 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3019 && TREE_CODE (op) != REAL_CST
3020 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3022 /* Note that TREE_CONSTANT isn't enough:
3023 static var addresses are constant but we can't
3024 do arithmetic on them. */
3034 /* If this is a commutative operation, and ARG0 is a constant, move it
3035 to ARG1 to reduce the number of tests below. */
3036 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3037 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3038 || code == BIT_AND_EXPR)
3039 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3041 tem = arg0; arg0 = arg1; arg1 = tem;
3043 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3044 TREE_OPERAND (t, 1) = tem;
3047 /* Now WINS is set as described above,
3048 ARG0 is the first operand of EXPR,
3049 and ARG1 is the second operand (if it has more than one operand).
3051 First check for cases where an arithmetic operation is applied to a
3052 compound, conditional, or comparison operation. Push the arithmetic
3053 operation inside the compound or conditional to see if any folding
3054 can then be done. Convert comparison to conditional for this purpose.
3055 The also optimizes non-constant cases that used to be done in
3057 if (TREE_CODE_CLASS (code) == '1')
3059 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3060 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3061 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3062 else if (TREE_CODE (arg0) == COND_EXPR)
3064 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3065 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3066 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3068 /* If this was a conversion, and all we did was to move into
3069 inside the COND_EXPR, bring it back out. Then return so we
3070 don't get into an infinite recursion loop taking the conversion
3071 out and then back in. */
3073 if ((code == NOP_EXPR || code == CONVERT_EXPR
3074 || code == NON_LVALUE_EXPR)
3075 && TREE_CODE (t) == COND_EXPR
3076 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3077 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3078 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3079 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
3080 t = build1 (code, type,
3082 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3083 TREE_OPERAND (t, 0),
3084 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3085 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3088 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3089 return fold (build (COND_EXPR, type, arg0,
3090 fold (build1 (code, type, integer_one_node)),
3091 fold (build1 (code, type, integer_zero_node))));
3093 else if (TREE_CODE_CLASS (code) == '2')
3095 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3096 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3097 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3098 else if (TREE_CODE (arg1) == COND_EXPR
3099 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3101 tree test, true_value, false_value;
3103 if (TREE_CODE (arg1) == COND_EXPR)
3105 test = TREE_OPERAND (arg1, 0);
3106 true_value = TREE_OPERAND (arg1, 1);
3107 false_value = TREE_OPERAND (arg1, 2);
3112 true_value = integer_one_node;
3113 false_value = integer_zero_node;
3116 if (TREE_CODE (arg0) != VAR_DECL && TREE_CODE (arg0) != PARM_DECL)
3117 arg0 = save_expr (arg0);
3118 test = fold (build (COND_EXPR, type, test,
3119 fold (build (code, type, arg0, true_value)),
3120 fold (build (code, type, arg0, false_value))));
3121 if (TREE_CODE (arg0) == SAVE_EXPR)
3122 return build (COMPOUND_EXPR, type,
3123 convert (void_type_node, arg0), test);
3125 return convert (type, test);
3128 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3129 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3130 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3131 else if (TREE_CODE (arg0) == COND_EXPR
3132 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3134 tree test, true_value, false_value;
3136 if (TREE_CODE (arg0) == COND_EXPR)
3138 test = TREE_OPERAND (arg0, 0);
3139 true_value = TREE_OPERAND (arg0, 1);
3140 false_value = TREE_OPERAND (arg0, 2);
3145 true_value = integer_one_node;
3146 false_value = integer_zero_node;
3149 if (TREE_CODE (arg1) != VAR_DECL && TREE_CODE (arg1) != PARM_DECL)
3150 arg1 = save_expr (arg1);
3151 test = fold (build (COND_EXPR, type, test,
3152 fold (build (code, type, true_value, arg1)),
3153 fold (build (code, type, false_value, arg1))));
3154 if (TREE_CODE (arg1) == SAVE_EXPR)
3155 return build (COMPOUND_EXPR, type,
3156 convert (void_type_node, arg1), test);
3158 return convert (type, test);
3161 else if (TREE_CODE_CLASS (code) == '<'
3162 && TREE_CODE (arg0) == COMPOUND_EXPR)
3163 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3164 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3165 else if (TREE_CODE_CLASS (code) == '<'
3166 && TREE_CODE (arg1) == COMPOUND_EXPR)
3167 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3168 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3180 return fold (DECL_INITIAL (t));
3185 case FIX_TRUNC_EXPR:
3186 /* Other kinds of FIX are not handled properly by fold_convert. */
3187 /* Two conversions in a row are not needed unless:
3188 - the intermediate type is narrower than both initial and final, or
3189 - the intermediate type and innermost type differ in signedness,
3190 and the outermost type is wider than the intermediate, or
3191 - the initial type is a pointer type and the precisions of the
3192 intermediate and final types differ, or
3193 - the final type is a pointer type and the precisions of the
3194 initial and intermediate types differ. */
3195 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3196 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3197 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3198 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3200 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3201 > TYPE_PRECISION (TREE_TYPE (t)))
3202 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3204 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3206 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3207 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3208 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3209 < TYPE_PRECISION (TREE_TYPE (t))))
3210 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3211 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3212 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3214 (TREE_UNSIGNED (TREE_TYPE (t))
3215 && (TYPE_PRECISION (TREE_TYPE (t))
3216 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3217 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3219 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3220 != TYPE_PRECISION (TREE_TYPE (t))))
3221 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3222 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3223 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3224 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3226 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3227 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3228 /* Detect assigning a bitfield. */
3229 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3230 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3232 /* Don't leave an assignment inside a conversion
3233 unless assigning a bitfield. */
3234 tree prev = TREE_OPERAND (t, 0);
3235 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3236 /* First do the assignment, then return converted constant. */
3237 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3243 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3246 return fold_convert (t, arg0);
3248 #if 0 /* This loses on &"foo"[0]. */
3253 /* Fold an expression like: "foo"[2] */
3254 if (TREE_CODE (arg0) == STRING_CST
3255 && TREE_CODE (arg1) == INTEGER_CST
3256 && !TREE_INT_CST_HIGH (arg1)
3257 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3259 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3260 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3268 TREE_CONSTANT (t) = wins;
3274 if (TREE_CODE (arg0) == INTEGER_CST)
3276 HOST_WIDE_INT low, high;
3277 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3278 TREE_INT_CST_HIGH (arg0),
3280 t = build_int_2 (low, high);
3281 TREE_TYPE (t) = type;
3282 TREE_CONSTANT_OVERFLOW (t)
3283 = (TREE_CONSTANT_OVERFLOW (arg0)
3284 | force_fit_type (t, overflow));
3286 else if (TREE_CODE (arg0) == REAL_CST)
3287 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3288 TREE_TYPE (t) = type;
3290 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3291 return TREE_OPERAND (arg0, 0);
3293 /* Convert - (a - b) to (b - a) for non-floating-point. */
3294 else if (TREE_CODE (arg0) == MINUS_EXPR && TREE_CODE (type) != REAL_TYPE)
3295 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3296 TREE_OPERAND (arg0, 0));
3303 if (TREE_CODE (arg0) == INTEGER_CST)
3305 if (! TREE_UNSIGNED (type)
3306 && TREE_INT_CST_HIGH (arg0) < 0)
3308 HOST_WIDE_INT low, high;
3309 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3310 TREE_INT_CST_HIGH (arg0),
3312 t = build_int_2 (low, high);
3313 TREE_TYPE (t) = type;
3314 TREE_CONSTANT_OVERFLOW (t)
3315 = (TREE_CONSTANT_OVERFLOW (arg0)
3316 | force_fit_type (t, overflow));
3319 else if (TREE_CODE (arg0) == REAL_CST)
3321 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3322 t = build_real (type,
3323 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3325 TREE_TYPE (t) = type;
3327 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3328 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3334 if (TREE_CODE (arg0) == INTEGER_CST)
3335 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3336 ~ TREE_INT_CST_HIGH (arg0));
3337 TREE_TYPE (t) = type;
3338 force_fit_type (t, 0);
3339 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3341 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3342 return TREE_OPERAND (arg0, 0);
3346 /* A + (-B) -> A - B */
3347 if (TREE_CODE (arg1) == NEGATE_EXPR)
3348 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3349 else if (TREE_CODE (type) != REAL_TYPE)
3351 if (integer_zerop (arg1))
3352 return non_lvalue (convert (type, arg0));
3354 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3355 with a constant, and the two constants have no bits in common,
3356 we should treat this as a BIT_IOR_EXPR since this may produce more
3358 if (TREE_CODE (arg0) == BIT_AND_EXPR
3359 && TREE_CODE (arg1) == BIT_AND_EXPR
3360 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3361 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3362 && integer_zerop (const_binop (BIT_AND_EXPR,
3363 TREE_OPERAND (arg0, 1),
3364 TREE_OPERAND (arg1, 1), 0)))
3366 code = BIT_IOR_EXPR;
3370 /* In IEEE floating point, x+0 may not equal x. */
3371 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3372 && real_zerop (arg1))
3373 return non_lvalue (convert (type, arg0));
3375 /* In most languages, can't associate operations on floats
3376 through parentheses. Rather than remember where the parentheses
3377 were, we don't associate floats at all. It shouldn't matter much. */
3378 if (TREE_CODE (type) == REAL_TYPE)
3380 /* The varsign == -1 cases happen only for addition and subtraction.
3381 It says that the arg that was split was really CON minus VAR.
3382 The rest of the code applies to all associative operations. */
3388 if (split_tree (arg0, code, &var, &con, &varsign))
3392 /* EXPR is (CON-VAR) +- ARG1. */
3393 /* If it is + and VAR==ARG1, return just CONST. */
3394 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3395 return convert (TREE_TYPE (t), con);
3397 /* Otherwise return (CON +- ARG1) - VAR. */
3398 TREE_SET_CODE (t, MINUS_EXPR);
3399 TREE_OPERAND (t, 1) = var;
3401 = fold (build (code, TREE_TYPE (t), con, arg1));
3405 /* EXPR is (VAR+CON) +- ARG1. */
3406 /* If it is - and VAR==ARG1, return just CONST. */
3407 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3408 return convert (TREE_TYPE (t), con);
3410 /* Otherwise return VAR +- (ARG1 +- CON). */
3411 TREE_OPERAND (t, 1) = tem
3412 = fold (build (code, TREE_TYPE (t), arg1, con));
3413 TREE_OPERAND (t, 0) = var;
3414 if (integer_zerop (tem)
3415 && (code == PLUS_EXPR || code == MINUS_EXPR))
3416 return convert (type, var);
3417 /* If we have x +/- (c - d) [c an explicit integer]
3418 change it to x -/+ (d - c) since if d is relocatable
3419 then the latter can be a single immediate insn
3420 and the former cannot. */
3421 if (TREE_CODE (tem) == MINUS_EXPR
3422 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3424 tree tem1 = TREE_OPERAND (tem, 1);
3425 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3426 TREE_OPERAND (tem, 0) = tem1;
3428 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3434 if (split_tree (arg1, code, &var, &con, &varsign))
3436 /* EXPR is ARG0 +- (CON +- VAR). */
3439 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3440 if (TREE_CODE (t) == MINUS_EXPR
3441 && operand_equal_p (var, arg0, 0))
3443 /* If VAR and ARG0 cancel, return just CON or -CON. */
3444 if (code == PLUS_EXPR)
3445 return convert (TREE_TYPE (t), con);
3446 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3447 convert (TREE_TYPE (t), con)));
3450 = fold (build (code, TREE_TYPE (t), arg0, con));
3451 TREE_OPERAND (t, 1) = var;
3452 if (integer_zerop (TREE_OPERAND (t, 0))
3453 && TREE_CODE (t) == PLUS_EXPR)
3454 return convert (TREE_TYPE (t), var);
3459 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3460 if (TREE_CODE (arg1) == REAL_CST)
3462 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3464 t1 = const_binop (code, arg0, arg1, 0);
3465 if (t1 != NULL_TREE)
3467 /* The return value should always have
3468 the same type as the original expression. */
3469 TREE_TYPE (t1) = TREE_TYPE (t);
3475 if (TREE_CODE (type) != REAL_TYPE)
3477 if (! wins && integer_zerop (arg0))
3478 return build1 (NEGATE_EXPR, type, arg1);
3479 if (integer_zerop (arg1))
3480 return non_lvalue (convert (type, arg0));
3482 /* Convert A - (-B) to A + B. */
3483 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3484 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3485 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
3487 /* Except with IEEE floating point, 0-x equals -x. */
3488 if (! wins && real_zerop (arg0))
3489 return build1 (NEGATE_EXPR, type, arg1);
3490 /* Except with IEEE floating point, x-0 equals x. */
3491 if (real_zerop (arg1))
3492 return non_lvalue (convert (type, arg0));
3494 /* Fold &x - &x. This can happen from &x.foo - &x.
3495 This is unsafe for certain floats even in non-IEEE formats.
3496 In IEEE, it is unsafe because it does wrong for NaNs.
3497 Also note that operand_equal_p is always false if an operand
3500 if (operand_equal_p (arg0, arg1,
3501 TREE_CODE (type) == REAL_TYPE))
3502 return convert (type, integer_zero_node);
3507 if (TREE_CODE (type) != REAL_TYPE)
3509 if (integer_zerop (arg1))
3510 return omit_one_operand (type, arg1, arg0);
3511 if (integer_onep (arg1))
3512 return non_lvalue (convert (type, arg0));
3514 /* (a * (1 << b)) is (a << b) */
3515 if (TREE_CODE (arg1) == LSHIFT_EXPR
3516 && integer_onep (TREE_OPERAND (arg1, 0)))
3517 return fold (build (LSHIFT_EXPR, type, arg0,
3518 TREE_OPERAND (arg1, 1)));
3519 if (TREE_CODE (arg0) == LSHIFT_EXPR
3520 && integer_onep (TREE_OPERAND (arg0, 0)))
3521 return fold (build (LSHIFT_EXPR, type, arg1,
3522 TREE_OPERAND (arg0, 1)));
3526 /* x*0 is 0, except for IEEE floating point. */
3527 if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3528 && real_zerop (arg1))
3529 return omit_one_operand (type, arg1, arg0);
3530 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3531 However, ANSI says we can drop signals,
3532 so we can do this anyway. */
3533 if (real_onep (arg1))
3534 return non_lvalue (convert (type, arg0));
3536 if (! wins && real_twop (arg1))
3538 tree arg = save_expr (arg0);
3539 return build (PLUS_EXPR, type, arg, arg);
3546 if (integer_all_onesp (arg1))
3547 return omit_one_operand (type, arg1, arg0);
3548 if (integer_zerop (arg1))
3549 return non_lvalue (convert (type, arg0));
3550 t1 = distribute_bit_expr (code, type, arg0, arg1);
3551 if (t1 != NULL_TREE)
3554 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3555 is a rotate of A by C1 bits. */
3557 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3558 || TREE_CODE (arg0) == LSHIFT_EXPR)
3559 && (TREE_CODE (arg1) == RSHIFT_EXPR
3560 || TREE_CODE (arg1) == LSHIFT_EXPR)
3561 && TREE_CODE (arg0) != TREE_CODE (arg1)
3562 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3563 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3564 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3565 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3566 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3567 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3568 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3569 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3570 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3571 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3572 TREE_CODE (arg0) == LSHIFT_EXPR
3573 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3578 if (integer_zerop (arg1))
3579 return non_lvalue (convert (type, arg0));
3580 if (integer_all_onesp (arg1))
3581 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3586 if (integer_all_onesp (arg1))
3587 return non_lvalue (convert (type, arg0));
3588 if (integer_zerop (arg1))
3589 return omit_one_operand (type, arg1, arg0);
3590 t1 = distribute_bit_expr (code, type, arg0, arg1);
3591 if (t1 != NULL_TREE)
3593 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3594 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3595 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3597 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3598 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3599 && (~TREE_INT_CST_LOW (arg0)
3600 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3601 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3603 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3604 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3606 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3607 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3608 && (~TREE_INT_CST_LOW (arg1)
3609 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3610 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3614 case BIT_ANDTC_EXPR:
3615 if (integer_all_onesp (arg0))
3616 return non_lvalue (convert (type, arg1));
3617 if (integer_zerop (arg0))
3618 return omit_one_operand (type, arg0, arg1);
3619 if (TREE_CODE (arg1) == INTEGER_CST)
3621 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3622 code = BIT_AND_EXPR;
3627 case TRUNC_DIV_EXPR:
3628 case ROUND_DIV_EXPR:
3629 case FLOOR_DIV_EXPR:
3631 case EXACT_DIV_EXPR:
3633 if (integer_onep (arg1))
3634 return non_lvalue (convert (type, arg0));
3635 if (integer_zerop (arg1))
3638 /* If we have ((a * C1) / C2) and C1 % C2 == 0, we can replace this with
3639 (a * (C1/C2). Also look for when we have a SAVE_EXPR in
3641 if (TREE_CODE (arg1) == INTEGER_CST
3642 && TREE_INT_CST_LOW (arg1) > 0 && TREE_INT_CST_HIGH (arg1) == 0
3643 && TREE_CODE (arg0) == MULT_EXPR
3644 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3645 && TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) > 0
3646 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3647 && 0 == (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3648 % TREE_INT_CST_LOW (arg1)))
3651 = build_int_2 (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3652 / TREE_INT_CST_LOW (arg1), 0);
3654 TREE_TYPE (new_op) = type;
3655 return build (MULT_EXPR, type, TREE_OPERAND (arg0, 0), new_op);
3658 else if (TREE_CODE (arg1) == INTEGER_CST
3659 && TREE_INT_CST_LOW (arg1) > 0 && TREE_INT_CST_HIGH (arg1) == 0
3660 && TREE_CODE (arg0) == SAVE_EXPR
3661 && TREE_CODE (TREE_OPERAND (arg0, 0)) == MULT_EXPR
3662 && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3664 && (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3666 && (TREE_INT_CST_HIGH (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3668 && (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3669 % TREE_INT_CST_LOW (arg1)) == 0)
3672 = build_int_2 (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3673 / TREE_INT_CST_LOW (arg1), 0);
3675 TREE_TYPE (new_op) = type;
3676 return build (MULT_EXPR, type,
3677 TREE_OPERAND (TREE_OPERAND (arg0, 0), 0), new_op);
3680 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3681 #ifndef REAL_INFINITY
3682 if (TREE_CODE (arg1) == REAL_CST
3683 && real_zerop (arg1))
3686 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3691 case FLOOR_MOD_EXPR:
3692 case ROUND_MOD_EXPR:
3693 case TRUNC_MOD_EXPR:
3694 if (integer_onep (arg1))
3695 return omit_one_operand (type, integer_zero_node, arg0);
3696 if (integer_zerop (arg1))
3704 if (integer_zerop (arg1))
3705 return non_lvalue (convert (type, arg0));
3706 /* Since negative shift count is not well-defined,
3707 don't try to compute it in the compiler. */
3708 if (tree_int_cst_lt (arg1, integer_zero_node))
3713 if (operand_equal_p (arg0, arg1, 0))
3715 if (TREE_CODE (type) == INTEGER_TYPE
3716 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
3717 return omit_one_operand (type, arg1, arg0);
3721 if (operand_equal_p (arg0, arg1, 0))
3723 if (TREE_CODE (type) == INTEGER_TYPE
3724 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
3725 return omit_one_operand (type, arg1, arg0);
3728 case TRUTH_NOT_EXPR:
3729 /* Note that the operand of this must be an int
3730 and its values must be 0 or 1.
3731 ("true" is a fixed value perhaps depending on the language,
3732 but we don't handle values other than 1 correctly yet.) */
3733 return invert_truthvalue (arg0);
3735 case TRUTH_ANDIF_EXPR:
3736 /* Note that the operands of this must be ints
3737 and their values must be 0 or 1.
3738 ("true" is a fixed value perhaps depending on the language.) */
3739 /* If first arg is constant zero, return it. */
3740 if (integer_zerop (arg0))
3742 case TRUTH_AND_EXPR:
3743 /* If either arg is constant true, drop it. */
3744 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
3745 return non_lvalue (arg1);
3746 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
3747 return non_lvalue (arg0);
3748 /* If second arg is constant zero, result is zero, but first arg
3749 must be evaluated. */
3750 if (integer_zerop (arg1))
3751 return omit_one_operand (type, arg1, arg0);
3754 /* Check for the possibility of merging component references. If our
3755 lhs is another similar operation, try to merge its rhs with our
3756 rhs. Then try to merge our lhs and rhs. */
3759 if (TREE_CODE (arg0) == code)
3761 tem = fold_truthop (code, type,
3762 TREE_OPERAND (arg0, 1), arg1);
3764 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
3767 tem = fold_truthop (code, type, arg0, arg1);
3773 case TRUTH_ORIF_EXPR:
3774 /* Note that the operands of this must be ints
3775 and their values must be 0 or true.
3776 ("true" is a fixed value perhaps depending on the language.) */
3777 /* If first arg is constant true, return it. */
3778 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
3781 /* If either arg is constant zero, drop it. */
3782 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
3783 return non_lvalue (arg1);
3784 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
3785 return non_lvalue (arg0);
3786 /* If second arg is constant true, result is true, but we must
3787 evaluate first arg. */
3788 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
3789 return omit_one_operand (type, arg1, arg0);
3792 case TRUTH_XOR_EXPR:
3793 /* If either arg is constant zero, drop it. */
3794 if (integer_zerop (arg0))
3795 return non_lvalue (arg1);
3796 if (integer_zerop (arg1))
3797 return non_lvalue (arg0);
3798 /* If either arg is constant true, this is a logical inversion. */
3799 if (integer_onep (arg0))
3800 return non_lvalue (invert_truthvalue (arg1));
3801 if (integer_onep (arg1))
3802 return non_lvalue (invert_truthvalue (arg0));
3811 /* If one arg is a constant integer, put it last. */
3812 if (TREE_CODE (arg0) == INTEGER_CST
3813 && TREE_CODE (arg1) != INTEGER_CST)
3815 TREE_OPERAND (t, 0) = arg1;
3816 TREE_OPERAND (t, 1) = arg0;
3817 arg0 = TREE_OPERAND (t, 0);
3818 arg1 = TREE_OPERAND (t, 1);
3819 code = swap_tree_comparison (code);
3820 TREE_SET_CODE (t, code);
3823 /* Convert foo++ == CONST into ++foo == CONST + INCR.
3824 First, see if one arg is constant; find the constant arg
3825 and the other one. */
3827 tree constop = 0, varop;
3830 if (TREE_CONSTANT (arg1))
3831 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
3832 if (TREE_CONSTANT (arg0))
3833 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
3835 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
3837 /* This optimization is invalid for ordered comparisons
3838 if CONST+INCR overflows or if foo+incr might overflow.
3839 This optimization is invalid for floating point due to rounding.
3840 For pointer types we assume overflow doesn't happen. */
3841 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
3842 || (TREE_CODE (TREE_TYPE (varop)) != REAL_TYPE
3843 && (code == EQ_EXPR || code == NE_EXPR)))
3846 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
3847 constop, TREE_OPERAND (varop, 1)));
3848 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
3849 *constoploc = newconst;
3853 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
3855 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
3856 || (TREE_CODE (TREE_TYPE (varop)) != REAL_TYPE
3857 && (code == EQ_EXPR || code == NE_EXPR)))
3860 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
3861 constop, TREE_OPERAND (varop, 1)));
3862 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
3863 *constoploc = newconst;
3869 /* Change X >= CST to X > (CST - 1) if CST is positive. */
3870 if (TREE_CODE (arg1) == INTEGER_CST
3871 && TREE_CODE (arg0) != INTEGER_CST
3872 && ! tree_int_cst_lt (arg1, integer_one_node))
3874 switch (TREE_CODE (t))
3878 TREE_SET_CODE (t, code);
3879 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
3880 TREE_OPERAND (t, 1) = arg1;
3885 TREE_SET_CODE (t, code);
3886 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
3887 TREE_OPERAND (t, 1) = arg1;
3891 /* If this is an EQ or NE comparison with zero and ARG0 is
3892 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
3893 two operations, but the latter can be done in one less insn
3894 one machine that have only two-operand insns or on which a
3895 constant cannot be the first operand. */
3896 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
3897 && TREE_CODE (arg0) == BIT_AND_EXPR)
3899 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
3900 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
3902 fold (build (code, type,
3903 build (BIT_AND_EXPR, TREE_TYPE (arg0),
3905 TREE_TYPE (TREE_OPERAND (arg0, 0)),
3906 TREE_OPERAND (arg0, 1),
3907 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
3908 convert (TREE_TYPE (arg0),
3911 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
3912 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
3914 fold (build (code, type,
3915 build (BIT_AND_EXPR, TREE_TYPE (arg0),
3917 TREE_TYPE (TREE_OPERAND (arg0, 1)),
3918 TREE_OPERAND (arg0, 0),
3919 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
3920 convert (TREE_TYPE (arg0),
3925 /* If this is an NE comparison of zero with an AND of one, remove the
3926 comparison since the AND will give the correct value. */
3927 if (code == NE_EXPR && integer_zerop (arg1)
3928 && TREE_CODE (arg0) == BIT_AND_EXPR
3929 && integer_onep (TREE_OPERAND (arg0, 1)))
3930 return convert (type, arg0);
3932 /* If we have (A & C) == C where C is a power of 2, convert this into
3933 (A & C) != 0. Similarly for NE_EXPR. */
3934 if ((code == EQ_EXPR || code == NE_EXPR)
3935 && TREE_CODE (arg0) == BIT_AND_EXPR
3936 && integer_pow2p (TREE_OPERAND (arg0, 1))
3937 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3938 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
3939 arg0, integer_zero_node);
3941 /* Simplify comparison of something with itself. (For IEEE
3942 floating-point, we can only do some of these simplifications.) */
3943 if (operand_equal_p (arg0, arg1, 0))
3950 if (TREE_CODE (TREE_TYPE (arg0)) == INTEGER_TYPE)
3952 t = build_int_2 (1, 0);
3953 TREE_TYPE (t) = type;
3957 TREE_SET_CODE (t, code);
3961 /* For NE, we can only do this simplification if integer. */
3962 if (TREE_CODE (TREE_TYPE (arg0)) != INTEGER_TYPE)
3964 /* ... fall through ... */
3967 t = build_int_2 (0, 0);
3968 TREE_TYPE (t) = type;
3973 /* An unsigned comparison against 0 can be simplified. */
3974 if (integer_zerop (arg1)
3975 && (TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
3976 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
3977 && TREE_UNSIGNED (TREE_TYPE (arg1)))
3979 switch (TREE_CODE (t))
3983 TREE_SET_CODE (t, NE_EXPR);
3987 TREE_SET_CODE (t, EQ_EXPR);
3990 return omit_one_operand (integer_type_node,
3991 integer_one_node, arg0);
3993 return omit_one_operand (integer_type_node,
3994 integer_zero_node, arg0);
3998 /* If we are comparing an expression that just has comparisons
3999 of two integer values, arithmetic expressions of those comparisons,
4000 and constants, we can simplify it. There are only three cases
4001 to check: the two values can either be equal, the first can be
4002 greater, or the second can be greater. Fold the expression for
4003 those three values. Since each value must be 0 or 1, we have
4004 eight possibilities, each of which corresponds to the constant 0
4005 or 1 or one of the six possible comparisons.
4007 This handles common cases like (a > b) == 0 but also handles
4008 expressions like ((x > y) - (y > x)) > 0, which supposedly
4009 occur in macroized code. */
4011 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4013 tree cval1 = 0, cval2 = 0;
4015 if (twoval_comparison_p (arg0, &cval1, &cval2)
4016 /* Don't handle degenerate cases here; they should already
4017 have been handled anyway. */
4018 && cval1 != 0 && cval2 != 0
4019 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4020 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4021 && TREE_CODE (TREE_TYPE (cval1)) == INTEGER_TYPE
4022 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4023 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4025 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4026 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4028 /* We can't just pass T to eval_subst in case cval1 or cval2
4029 was the same as ARG1. */
4032 = fold (build (code, type,
4033 eval_subst (arg0, cval1, maxval, cval2, minval),
4036 = fold (build (code, type,
4037 eval_subst (arg0, cval1, maxval, cval2, maxval),
4040 = fold (build (code, type,
4041 eval_subst (arg0, cval1, minval, cval2, maxval),
4044 /* All three of these results should be 0 or 1. Confirm they
4045 are. Then use those values to select the proper code
4048 if ((integer_zerop (high_result)
4049 || integer_onep (high_result))
4050 && (integer_zerop (equal_result)
4051 || integer_onep (equal_result))
4052 && (integer_zerop (low_result)
4053 || integer_onep (low_result)))
4055 /* Make a 3-bit mask with the high-order bit being the
4056 value for `>', the next for '=', and the low for '<'. */
4057 switch ((integer_onep (high_result) * 4)
4058 + (integer_onep (equal_result) * 2)
4059 + integer_onep (low_result))
4063 return omit_one_operand (type, integer_zero_node, arg0);
4084 return omit_one_operand (type, integer_one_node, arg0);
4087 return fold (build (code, type, cval1, cval2));
4092 /* If this is a comparison of a field, we may be able to simplify it. */
4093 if ((TREE_CODE (arg0) == COMPONENT_REF
4094 || TREE_CODE (arg0) == BIT_FIELD_REF)
4095 && (code == EQ_EXPR || code == NE_EXPR)
4096 /* Handle the constant case even without -O
4097 to make sure the warnings are given. */
4098 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4100 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4104 /* From here on, the only cases we handle are when the result is
4105 known to be a constant.
4107 To compute GT, swap the arguments and do LT.
4108 To compute GE, do LT and invert the result.
4109 To compute LE, swap the arguments, do LT and invert the result.
4110 To compute NE, do EQ and invert the result.
4112 Therefore, the code below must handle only EQ and LT. */
4114 if (code == LE_EXPR || code == GT_EXPR)
4116 tem = arg0, arg0 = arg1, arg1 = tem;
4117 code = swap_tree_comparison (code);
4120 /* Note that it is safe to invert for real values here because we
4121 will check below in the one case that it matters. */
4124 if (code == NE_EXPR || code == GE_EXPR)
4127 code = invert_tree_comparison (code);
4130 /* Compute a result for LT or EQ if args permit;
4131 otherwise return T. */
4132 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4134 if (code == EQ_EXPR)
4135 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4136 == TREE_INT_CST_LOW (arg1))
4137 && (TREE_INT_CST_HIGH (arg0)
4138 == TREE_INT_CST_HIGH (arg1)),
4141 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4142 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4143 : INT_CST_LT (arg0, arg1)),
4147 /* Assume a nonexplicit constant cannot equal an explicit one,
4148 since such code would be undefined anyway.
4149 Exception: on sysvr4, using #pragma weak,
4150 a label can come out as 0. */
4151 else if (TREE_CODE (arg1) == INTEGER_CST
4152 && !integer_zerop (arg1)
4153 && TREE_CONSTANT (arg0)
4154 && TREE_CODE (arg0) == ADDR_EXPR
4156 t1 = build_int_2 (0, 0);
4158 /* Two real constants can be compared explicitly. */
4159 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4161 /* If either operand is a NaN, the result is false with two
4162 exceptions: First, an NE_EXPR is true on NaNs, but that case
4163 is already handled correctly since we will be inverting the
4164 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4165 or a GE_EXPR into a LT_EXPR, we must return true so that it
4166 will be inverted into false. */
4168 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4169 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4170 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4172 else if (code == EQ_EXPR)
4173 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4174 TREE_REAL_CST (arg1)),
4177 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4178 TREE_REAL_CST (arg1)),
4182 if (t1 == NULL_TREE)
4186 TREE_INT_CST_LOW (t1) ^= 1;
4188 TREE_TYPE (t1) = type;
4192 if (TREE_CODE (arg0) == INTEGER_CST)
4193 return TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1));
4194 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4195 return omit_one_operand (type, arg1, arg0);
4197 /* If the second operand is zero, invert the comparison and swap
4198 the second and third operands. Likewise if the second operand
4199 is constant and the third is not or if the third operand is
4200 equivalent to the first operand of the comparison. */
4202 if (integer_zerop (arg1)
4203 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4204 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4205 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4206 TREE_OPERAND (t, 2),
4207 TREE_OPERAND (arg0, 1))))
4209 /* See if this can be inverted. If it can't, possibly because
4210 it was a floating-point inequality comparison, don't do
4212 tem = invert_truthvalue (arg0);
4214 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4216 arg0 = TREE_OPERAND (t, 0) = tem;
4217 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4218 TREE_OPERAND (t, 2) = arg1;
4219 arg1 = TREE_OPERAND (t, 1);
4223 /* If we have A op B ? A : C, we may be able to convert this to a
4224 simpler expression, depending on the operation and the values
4225 of B and C. IEEE floating point prevents this though,
4226 because A or B might be -0.0 or a NaN. */
4228 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4229 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4230 || TREE_CODE (TREE_TYPE (TREE_OPERAND (arg0, 0))) != REAL_TYPE)
4231 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4232 arg1, TREE_OPERAND (arg0, 1)))
4234 tree arg2 = TREE_OPERAND (t, 2);
4235 enum tree_code comp_code = TREE_CODE (arg0);
4237 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4238 depending on the comparison operation. */
4239 if (integer_zerop (TREE_OPERAND (arg0, 1))
4240 && TREE_CODE (arg2) == NEGATE_EXPR
4241 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4245 return fold (build1 (NEGATE_EXPR, type, arg1));
4247 return convert (type, arg1);
4250 return fold (build1 (ABS_EXPR, type, arg1));
4253 return fold (build1 (NEGATE_EXPR, type,
4254 fold (build1 (ABS_EXPR, type, arg1))));
4257 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4260 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4262 if (comp_code == NE_EXPR)
4263 return convert (type, arg1);
4264 else if (comp_code == EQ_EXPR)
4265 return convert (type, integer_zero_node);
4268 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4269 or max (A, B), depending on the operation. */
4271 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4272 arg2, TREE_OPERAND (arg0, 0)))
4276 return convert (type, arg2);
4278 return convert (type, arg1);
4281 return fold (build (MIN_EXPR, type, arg1, arg2));
4284 return fold (build (MAX_EXPR, type, arg1, arg2));
4287 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4288 we might still be able to simplify this. For example,
4289 if C1 is one less or one more than C2, this might have started
4290 out as a MIN or MAX and been transformed by this function.
4291 Only good for INTEGER_TYPE, because we need TYPE_MAX_VALUE. */
4293 if (TREE_CODE (type) == INTEGER_TYPE
4294 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4295 && TREE_CODE (arg2) == INTEGER_CST)
4299 /* We can replace A with C1 in this case. */
4300 arg1 = TREE_OPERAND (t, 1)
4301 = convert (type, TREE_OPERAND (arg0, 1));
4305 /* If C1 is C2 + 1, this is min(A, C2). */
4306 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4307 && operand_equal_p (TREE_OPERAND (arg0, 1),
4308 const_binop (PLUS_EXPR, arg2,
4309 integer_one_node, 0), 1))
4310 return fold (build (MIN_EXPR, type, arg1, arg2));
4314 /* If C1 is C2 - 1, this is min(A, C2). */
4315 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4316 && operand_equal_p (TREE_OPERAND (arg0, 1),
4317 const_binop (MINUS_EXPR, arg2,
4318 integer_one_node, 0), 1))
4319 return fold (build (MIN_EXPR, type, arg1, arg2));
4323 /* If C1 is C2 - 1, this is max(A, C2). */
4324 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4325 && operand_equal_p (TREE_OPERAND (arg0, 1),
4326 const_binop (MINUS_EXPR, arg2,
4327 integer_one_node, 0), 1))
4328 return fold (build (MAX_EXPR, type, arg1, arg2));
4332 /* If C1 is C2 + 1, this is max(A, C2). */
4333 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4334 && operand_equal_p (TREE_OPERAND (arg0, 1),
4335 const_binop (PLUS_EXPR, arg2,
4336 integer_one_node, 0), 1))
4337 return fold (build (MAX_EXPR, type, arg1, arg2));
4342 /* Convert A ? 1 : 0 to simply A. */
4343 if (integer_onep (TREE_OPERAND (t, 1))
4344 && integer_zerop (TREE_OPERAND (t, 2))
4345 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4346 call to fold will try to move the conversion inside
4347 a COND, which will recurse. In that case, the COND_EXPR
4348 is probably the best choice, so leave it alone. */
4349 && type == TREE_TYPE (arg0))
4353 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4354 operation is simply A & 2. */
4356 if (integer_zerop (TREE_OPERAND (t, 2))
4357 && TREE_CODE (arg0) == NE_EXPR
4358 && integer_zerop (TREE_OPERAND (arg0, 1))
4359 && integer_pow2p (arg1)
4360 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4361 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4363 return convert (type, TREE_OPERAND (arg0, 0));
4368 if (TREE_SIDE_EFFECTS (arg0))
4370 /* Don't let (0, 0) be null pointer constant. */
4371 if (integer_zerop (arg1))
4372 return non_lvalue (arg1);
4377 } /* switch (code) */