1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ Fix lossage on folding division of big integers. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
31 /* The entry points in this file are fold, size_int and size_binop.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'. */
48 /* Handle floating overflow for `const_binop'. */
49 static jmp_buf float_error;
51 void lshift_double ();
52 void rshift_double ();
53 void lrotate_double ();
54 void rrotate_double ();
55 static tree const_binop ();
61 /* Yield nonzero if a signed left shift of A by B bits overflows. */
62 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
64 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
65 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
66 Then this yields nonzero if overflow occurred during the addition.
67 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
68 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
69 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
71 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
72 We do that by representing the two-word integer as MAX_SHORTS shorts,
73 with only 8 bits stored in each short, as a positive number. */
75 /* Unpack a two-word integer into MAX_SHORTS shorts.
76 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
77 SHORTS points to the array of shorts. */
80 encode (shorts, low, hi)
82 HOST_WIDE_INT low, hi;
86 for (i = 0; i < MAX_SHORTS / 2; i++)
88 shorts[i] = (low >> (i * 8)) & 0xff;
89 shorts[i + MAX_SHORTS / 2] = (hi >> (i * 8) & 0xff);
93 /* Pack an array of MAX_SHORTS shorts into a two-word integer.
94 SHORTS points to the array of shorts.
95 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
98 decode (shorts, low, hi)
100 HOST_WIDE_INT *low, *hi;
103 HOST_WIDE_INT lv = 0, hv = 0;
105 for (i = 0; i < MAX_SHORTS / 2; i++)
107 lv |= (HOST_WIDE_INT) shorts[i] << (i * 8);
108 hv |= (HOST_WIDE_INT) shorts[i + MAX_SHORTS / 2] << (i * 8);
114 /* Make the integer constant T valid for its type
115 by setting to 0 or 1 all the bits in the constant
116 that don't belong in the type.
117 Yield 1 if a signed overflow occurs, 0 otherwise.
118 If OVERFLOW is nonzero, a signed overflow has already occurred
119 in calculating T, so propagate it. */
122 force_fit_type (t, overflow)
126 HOST_WIDE_INT low, high;
129 if (TREE_CODE (t) != INTEGER_CST)
132 low = TREE_INT_CST_LOW (t);
133 high = TREE_INT_CST_HIGH (t);
135 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
138 prec = TYPE_PRECISION (TREE_TYPE (t));
140 /* First clear all bits that are beyond the type's precision. */
142 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
144 else if (prec > HOST_BITS_PER_WIDE_INT)
146 TREE_INT_CST_HIGH (t)
147 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
151 TREE_INT_CST_HIGH (t) = 0;
152 if (prec < HOST_BITS_PER_WIDE_INT)
153 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
156 /* Unsigned types do not suffer sign extension or overflow. */
157 if (TREE_UNSIGNED (TREE_TYPE (t)))
160 /* If the value's sign bit is set, extend the sign. */
161 if (prec != 2 * HOST_BITS_PER_WIDE_INT
162 && (prec > HOST_BITS_PER_WIDE_INT
163 ? (TREE_INT_CST_HIGH (t)
164 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
165 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
167 /* Value is negative:
168 set to 1 all the bits that are outside this type's precision. */
169 if (prec > HOST_BITS_PER_WIDE_INT)
171 TREE_INT_CST_HIGH (t)
172 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
176 TREE_INT_CST_HIGH (t) = -1;
177 if (prec < HOST_BITS_PER_WIDE_INT)
178 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
182 /* Yield nonzero if signed overflow occurred. */
184 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
188 /* Add two doubleword integers with doubleword result.
189 Each argument is given as two `HOST_WIDE_INT' pieces.
190 One argument is L1 and H1; the other, L2 and H2.
191 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
192 We use the 8-shorts representation internally. */
195 add_double (l1, h1, l2, h2, lv, hv)
196 HOST_WIDE_INT l1, h1, l2, h2;
197 HOST_WIDE_INT *lv, *hv;
199 short arg1[MAX_SHORTS];
200 short arg2[MAX_SHORTS];
201 register int carry = 0;
204 encode (arg1, l1, h1);
205 encode (arg2, l2, h2);
207 for (i = 0; i < MAX_SHORTS; i++)
209 carry += arg1[i] + arg2[i];
210 arg1[i] = carry & 0xff;
214 decode (arg1, lv, hv);
215 return overflow_sum_sign (h1, h2, *hv);
218 /* Negate a doubleword integer with doubleword result.
219 Return nonzero if the operation overflows, assuming it's signed.
220 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
221 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
222 We use the 8-shorts representation internally. */
225 neg_double (l1, h1, lv, hv)
226 HOST_WIDE_INT l1, h1;
227 HOST_WIDE_INT *lv, *hv;
233 return (*hv & h1) < 0;
243 /* Multiply two doubleword integers with doubleword result.
244 Return nonzero if the operation overflows, assuming it's signed.
245 Each argument is given as two `HOST_WIDE_INT' pieces.
246 One argument is L1 and H1; the other, L2 and H2.
247 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
248 We use the 8-shorts representation internally. */
251 mul_double (l1, h1, l2, h2, lv, hv)
252 HOST_WIDE_INT l1, h1, l2, h2;
253 HOST_WIDE_INT *lv, *hv;
255 short arg1[MAX_SHORTS];
256 short arg2[MAX_SHORTS];
257 short prod[MAX_SHORTS * 2];
258 register int carry = 0;
259 register int i, j, k;
260 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
262 /* These cases are used extensively, arising from pointer combinations. */
267 int overflow = left_shift_overflows (h1, 1);
268 unsigned HOST_WIDE_INT temp = l1 + l1;
269 *hv = (h1 << 1) + (temp < l1);
275 int overflow = left_shift_overflows (h1, 2);
276 unsigned HOST_WIDE_INT temp = l1 + l1;
277 h1 = (h1 << 2) + ((temp < l1) << 1);
287 int overflow = left_shift_overflows (h1, 3);
288 unsigned HOST_WIDE_INT temp = l1 + l1;
289 h1 = (h1 << 3) + ((temp < l1) << 2);
292 h1 += (temp < l1) << 1;
302 encode (arg1, l1, h1);
303 encode (arg2, l2, h2);
305 bzero (prod, sizeof prod);
307 for (i = 0; i < MAX_SHORTS; i++)
308 for (j = 0; j < MAX_SHORTS; j++)
311 carry = arg1[i] * arg2[j];
315 prod[k] = carry & 0xff;
321 decode (prod, lv, hv); /* This ignores
322 prod[MAX_SHORTS] -> prod[MAX_SHORTS*2-1] */
324 /* Check for overflow by calculating the top half of the answer in full;
325 it should agree with the low half's sign bit. */
326 decode (prod+MAX_SHORTS, &toplow, &tophigh);
329 neg_double (l2, h2, &neglow, &neghigh);
330 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
334 neg_double (l1, h1, &neglow, &neghigh);
335 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
337 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
340 /* Shift the doubleword integer in L1, H1 left by COUNT places
341 keeping only PREC bits of result.
342 Shift right if COUNT is negative.
343 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
344 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
347 lshift_double (l1, h1, count, prec, lv, hv, arith)
348 HOST_WIDE_INT l1, h1;
350 HOST_WIDE_INT *lv, *hv;
353 short arg1[MAX_SHORTS];
359 rshift_double (l1, h1, - count, prec, lv, hv, arith);
363 encode (arg1, l1, h1);
371 for (i = 0; i < MAX_SHORTS; i++)
373 carry += arg1[i] << 1;
374 arg1[i] = carry & 0xff;
380 decode (arg1, lv, hv);
383 /* Shift the doubleword integer in L1, H1 right by COUNT places
384 keeping only PREC bits of result. COUNT must be positive.
385 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
386 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
389 rshift_double (l1, h1, count, prec, lv, hv, arith)
390 HOST_WIDE_INT l1, h1, count, prec;
391 HOST_WIDE_INT *lv, *hv;
394 short arg1[MAX_SHORTS];
398 encode (arg1, l1, h1);
405 carry = arith && arg1[7] >> 7;
406 for (i = MAX_SHORTS - 1; i >= 0; i--)
410 arg1[i] = (carry >> 1) & 0xff;
415 decode (arg1, lv, hv);
418 /* Rotate the doubldword integer in L1, H1 left by COUNT places
419 keeping only PREC bits of result.
420 Rotate right if COUNT is negative.
421 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
424 lrotate_double (l1, h1, count, prec, lv, hv)
425 HOST_WIDE_INT l1, h1, count, prec;
426 HOST_WIDE_INT *lv, *hv;
428 short arg1[MAX_SHORTS];
434 rrotate_double (l1, h1, - count, prec, lv, hv);
438 encode (arg1, l1, h1);
443 carry = arg1[MAX_SHORTS - 1] >> 7;
446 for (i = 0; i < MAX_SHORTS; i++)
448 carry += arg1[i] << 1;
449 arg1[i] = carry & 0xff;
455 decode (arg1, lv, hv);
458 /* Rotate the doubleword integer in L1, H1 left by COUNT places
459 keeping only PREC bits of result. COUNT must be positive.
460 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
463 rrotate_double (l1, h1, count, prec, lv, hv)
464 HOST_WIDE_INT l1, h1, count, prec;
465 HOST_WIDE_INT *lv, *hv;
467 short arg1[MAX_SHORTS];
471 encode (arg1, l1, h1);
479 for (i = MAX_SHORTS - 1; i >= 0; i--)
483 arg1[i] = (carry >> 1) & 0xff;
488 decode (arg1, lv, hv);
491 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
492 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
493 CODE is a tree code for a kind of division, one of
494 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
496 It controls how the quotient is rounded to a integer.
497 Return nonzero if the operation overflows.
498 UNS nonzero says do unsigned division. */
501 div_and_round_double (code, uns,
502 lnum_orig, hnum_orig, lden_orig, hden_orig,
503 lquo, hquo, lrem, hrem)
506 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
507 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
508 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
511 short num[MAX_SHORTS + 1]; /* extra element for scaling. */
512 short den[MAX_SHORTS], quo[MAX_SHORTS];
513 register int i, j, work;
514 register int carry = 0;
515 unsigned HOST_WIDE_INT lnum = lnum_orig;
516 HOST_WIDE_INT hnum = hnum_orig;
517 unsigned HOST_WIDE_INT lden = lden_orig;
518 HOST_WIDE_INT hden = hden_orig;
521 if ((hden == 0) && (lden == 0))
524 /* calculate quotient sign and convert operands to unsigned. */
530 /* (minimum integer) / (-1) is the only overflow case. */
531 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
537 neg_double (lden, hden, &lden, &hden);
541 if (hnum == 0 && hden == 0)
542 { /* single precision */
544 *lquo = lnum / lden; /* rounds toward zero since positive args */
549 { /* trivial case: dividend < divisor */
550 /* hden != 0 already checked. */
557 bzero (quo, sizeof quo);
559 bzero (num, sizeof num); /* to zero 9th element */
560 bzero (den, sizeof den);
562 encode (num, lnum, hnum);
563 encode (den, lden, hden);
565 /* This code requires more than just hden == 0.
566 We also have to require that we don't need more than three bytes
567 to hold CARRY. If we ever did need four bytes to hold it, we
568 would lose part of it when computing WORK on the next round. */
569 if (hden == 0 && ((lden << 8) >> 8) == lden)
570 { /* simpler algorithm */
571 /* hnum != 0 already checked. */
572 for (i = MAX_SHORTS - 1; i >= 0; i--)
574 work = num[i] + (carry << 8);
575 quo[i] = work / lden;
579 else { /* full double precision,
580 with thanks to Don Knuth's
581 "Seminumerical Algorithms". */
583 int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
585 /* Find the highest non-zero divisor digit. */
586 for (i = MAX_SHORTS - 1; ; i--)
591 for (i = MAX_SHORTS - 1; ; i--)
596 quo_hi_sig = num_hi_sig - den_hi_sig + 1;
598 /* Insure that the first digit of the divisor is at least BASE/2.
599 This is required by the quotient digit estimation algorithm. */
601 scale = BASE / (den[den_hi_sig] + 1);
602 if (scale > 1) { /* scale divisor and dividend */
604 for (i = 0; i <= MAX_SHORTS - 1; i++) {
605 work = (num[i] * scale) + carry;
606 num[i] = work & 0xff;
608 if (num[i] != 0) num_hi_sig = i;
611 for (i = 0; i <= MAX_SHORTS - 1; i++) {
612 work = (den[i] * scale) + carry;
613 den[i] = work & 0xff;
615 if (den[i] != 0) den_hi_sig = i;
620 for (i = quo_hi_sig; i > 0; i--) {
621 /* guess the next quotient digit, quo_est, by dividing the first
622 two remaining dividend digits by the high order quotient digit.
623 quo_est is never low and is at most 2 high. */
625 int num_hi; /* index of highest remaining dividend digit */
627 num_hi = i + den_hi_sig;
629 work = (num[num_hi] * BASE) + (num_hi > 0 ? num[num_hi - 1] : 0);
630 if (num[num_hi] != den[den_hi_sig]) {
631 quo_est = work / den[den_hi_sig];
637 /* refine quo_est so it's usually correct, and at most one high. */
638 while ((den[den_hi_sig - 1] * quo_est)
639 > (((work - (quo_est * den[den_hi_sig])) * BASE)
640 + ((num_hi - 1) > 0 ? num[num_hi - 2] : 0)))
643 /* Try QUO_EST as the quotient digit, by multiplying the
644 divisor by QUO_EST and subtracting from the remaining dividend.
645 Keep in mind that QUO_EST is the I - 1st digit. */
649 for (j = 0; j <= den_hi_sig; j++)
653 work = num[i + j - 1] - (quo_est * den[j]) + carry;
661 num[i + j - 1] = digit;
664 /* if quo_est was high by one, then num[i] went negative and
665 we need to correct things. */
670 carry = 0; /* add divisor back in */
671 for (j = 0; j <= den_hi_sig; j++)
673 work = num[i + j - 1] + den[j] + carry;
683 num[i + j - 1] = work;
685 num [num_hi] += carry;
688 /* store the quotient digit. */
689 quo[i - 1] = quo_est;
693 decode (quo, lquo, hquo);
696 /* if result is negative, make it so. */
698 neg_double (*lquo, *hquo, lquo, hquo);
700 /* compute trial remainder: rem = num - (quo * den) */
701 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
702 neg_double (*lrem, *hrem, lrem, hrem);
703 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
708 case TRUNC_MOD_EXPR: /* round toward zero */
709 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
713 case FLOOR_MOD_EXPR: /* round toward negative infinity */
714 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
717 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
720 else return overflow;
724 case CEIL_MOD_EXPR: /* round toward positive infinity */
725 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
727 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
730 else return overflow;
734 case ROUND_MOD_EXPR: /* round to closest integer */
736 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
737 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
739 /* get absolute values */
740 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
741 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
743 /* if (2 * abs (lrem) >= abs (lden)) */
744 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
745 labs_rem, habs_rem, <wice, &htwice);
746 if (((unsigned HOST_WIDE_INT) habs_den
747 < (unsigned HOST_WIDE_INT) htwice)
748 || (((unsigned HOST_WIDE_INT) habs_den
749 == (unsigned HOST_WIDE_INT) htwice)
750 && ((HOST_WIDE_INT unsigned) labs_den
751 < (unsigned HOST_WIDE_INT) ltwice)))
755 add_double (*lquo, *hquo,
756 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
759 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
762 else return overflow;
770 /* compute true remainder: rem = num - (quo * den) */
771 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
772 neg_double (*lrem, *hrem, lrem, hrem);
773 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
777 /* Effectively truncate a real value to represent
778 the nearest possible value in a narrower mode.
779 The result is actually represented in the same data type as the argument,
780 but its value is usually different. */
783 real_value_truncate (mode, arg)
784 enum machine_mode mode;
788 /* Make sure the value is actually stored in memory before we turn off
792 REAL_VALUE_TYPE value;
793 jmp_buf handler, old_handler;
796 if (setjmp (handler))
798 error ("floating overflow");
801 handled = push_float_handler (handler, old_handler);
802 value = REAL_VALUE_TRUNCATE (mode, arg);
803 pop_float_handler (handled, old_handler);
807 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
809 /* Check for infinity in an IEEE double precision number. */
815 /* The IEEE 64-bit double format. */
820 unsigned exponent : 11;
821 unsigned mantissa1 : 20;
826 unsigned mantissa1 : 20;
827 unsigned exponent : 11;
833 if (u.big_endian.sign == 1)
836 return (u.big_endian.exponent == 2047
837 && u.big_endian.mantissa1 == 0
838 && u.big_endian.mantissa2 == 0);
843 return (u.little_endian.exponent == 2047
844 && u.little_endian.mantissa1 == 0
845 && u.little_endian.mantissa2 == 0);
849 /* Check whether an IEEE double precision number is a NaN. */
855 /* The IEEE 64-bit double format. */
860 unsigned exponent : 11;
861 unsigned mantissa1 : 20;
866 unsigned mantissa1 : 20;
867 unsigned exponent : 11;
873 if (u.big_endian.sign == 1)
876 return (u.big_endian.exponent == 2047
877 && (u.big_endian.mantissa1 != 0
878 || u.big_endian.mantissa2 != 0));
883 return (u.little_endian.exponent == 2047
884 && (u.little_endian.mantissa1 != 0
885 || u.little_endian.mantissa2 != 0));
889 /* Check for a negative IEEE double precision number. */
895 /* The IEEE 64-bit double format. */
900 unsigned exponent : 11;
901 unsigned mantissa1 : 20;
906 unsigned mantissa1 : 20;
907 unsigned exponent : 11;
913 if (u.big_endian.sign == 1)
916 return u.big_endian.sign;
921 return u.little_endian.sign;
924 #else /* Target not IEEE */
926 /* Let's assume other float formats don't have infinity.
927 (This can be overridden by redefining REAL_VALUE_ISINF.) */
935 /* Let's assume other float formats don't have NaNs.
936 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
944 /* Let's assume other float formats don't have minus zero.
945 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
952 #endif /* Target not IEEE */
954 /* Split a tree IN into a constant and a variable part
955 that could be combined with CODE to make IN.
956 CODE must be a commutative arithmetic operation.
957 Store the constant part into *CONP and the variable in &VARP.
958 Return 1 if this was done; zero means the tree IN did not decompose
961 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
962 Therefore, we must tell the caller whether the variable part
963 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
964 The value stored is the coefficient for the variable term.
965 The constant term we return should always be added;
966 we negate it if necessary. */
969 split_tree (in, code, varp, conp, varsignp)
975 register tree outtype = TREE_TYPE (in);
979 /* Strip any conversions that don't change the machine mode. */
980 while ((TREE_CODE (in) == NOP_EXPR
981 || TREE_CODE (in) == CONVERT_EXPR)
982 && (TYPE_MODE (TREE_TYPE (in))
983 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
984 in = TREE_OPERAND (in, 0);
986 if (TREE_CODE (in) == code
987 || (TREE_CODE (TREE_TYPE (in)) != REAL_TYPE
988 /* We can associate addition and subtraction together
989 (even though the C standard doesn't say so)
990 for integers because the value is not affected.
991 For reals, the value might be affected, so we can't. */
993 ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
994 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
996 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
997 if (code == INTEGER_CST)
999 *conp = TREE_OPERAND (in, 0);
1000 *varp = TREE_OPERAND (in, 1);
1001 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1002 && TREE_TYPE (*varp) != outtype)
1003 *varp = convert (outtype, *varp);
1004 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1007 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
1009 *conp = TREE_OPERAND (in, 1);
1010 *varp = TREE_OPERAND (in, 0);
1012 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1013 && TREE_TYPE (*varp) != outtype)
1014 *varp = convert (outtype, *varp);
1015 if (TREE_CODE (in) == MINUS_EXPR)
1017 /* If operation is subtraction and constant is second,
1018 must negate it to get an additive constant.
1019 And this cannot be done unless it is a manifest constant.
1020 It could also be the address of a static variable.
1021 We cannot negate that, so give up. */
1022 if (TREE_CODE (*conp) == INTEGER_CST)
1023 /* Subtracting from integer_zero_node loses for long long. */
1024 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
1030 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
1032 *conp = TREE_OPERAND (in, 0);
1033 *varp = TREE_OPERAND (in, 1);
1034 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1035 && TREE_TYPE (*varp) != outtype)
1036 *varp = convert (outtype, *varp);
1037 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1044 /* Combine two constants NUM and ARG2 under operation CODE
1045 to produce a new constant.
1046 We assume ARG1 and ARG2 have the same data type,
1047 or at least are the same kind of constant and the same machine mode.
1049 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1052 const_binop (code, arg1, arg2, notrunc)
1053 enum tree_code code;
1054 register tree arg1, arg2;
1057 if (TREE_CODE (arg1) == INTEGER_CST)
1059 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
1060 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
1061 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
1062 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
1063 HOST_WIDE_INT low, hi;
1064 HOST_WIDE_INT garbagel, garbageh;
1066 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1072 t = build_int_2 (int1l | int2l, int1h | int2h);
1076 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1080 t = build_int_2 (int1l & int2l, int1h & int2h);
1083 case BIT_ANDTC_EXPR:
1084 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1090 /* It's unclear from the C standard whether shifts can overflow.
1091 The following code ignores overflow; perhaps a C standard
1092 interpretation ruling is needed. */
1093 lshift_double (int1l, int1h, int2l,
1094 TYPE_PRECISION (TREE_TYPE (arg1)),
1097 t = build_int_2 (low, hi);
1098 TREE_TYPE (t) = TREE_TYPE (arg1);
1100 force_fit_type (t, 0);
1101 TREE_CONSTANT_OVERFLOW (t)
1102 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1108 lrotate_double (int1l, int1h, int2l,
1109 TYPE_PRECISION (TREE_TYPE (arg1)),
1111 t = build_int_2 (low, hi);
1118 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1121 overflow = int2h < hi;
1123 t = build_int_2 (int2l, int2h);
1129 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1132 overflow = int1h < hi;
1134 t = build_int_2 (int1l, int1h);
1137 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1138 t = build_int_2 (low, hi);
1142 if (int2h == 0 && int2l == 0)
1144 t = build_int_2 (int1l, int1h);
1147 neg_double (int2l, int2h, &low, &hi);
1148 add_double (int1l, int1h, low, hi, &low, &hi);
1149 overflow = overflow_sum_sign (hi, int2h, int1h);
1150 t = build_int_2 (low, hi);
1154 /* Optimize simple cases. */
1157 unsigned HOST_WIDE_INT temp;
1162 t = build_int_2 (0, 0);
1165 t = build_int_2 (int2l, int2h);
1168 overflow = left_shift_overflows (int2h, 1);
1169 temp = int2l + int2l;
1170 int2h = (int2h << 1) + (temp < int2l);
1171 t = build_int_2 (temp, int2h);
1173 #if 0 /* This code can lose carries. */
1175 temp = int2l + int2l + int2l;
1176 int2h = int2h * 3 + (temp < int2l);
1177 t = build_int_2 (temp, int2h);
1181 overflow = left_shift_overflows (int2h, 2);
1182 temp = int2l + int2l;
1183 int2h = (int2h << 2) + ((temp < int2l) << 1);
1186 int2h += (temp < int2l);
1187 t = build_int_2 (temp, int2h);
1190 overflow = left_shift_overflows (int2h, 3);
1191 temp = int2l + int2l;
1192 int2h = (int2h << 3) + ((temp < int2l) << 2);
1195 int2h += (temp < int2l) << 1;
1198 int2h += (temp < int2l);
1199 t = build_int_2 (temp, int2h);
1210 t = build_int_2 (0, 0);
1215 t = build_int_2 (int1l, int1h);
1220 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1221 t = build_int_2 (low, hi);
1224 case TRUNC_DIV_EXPR:
1225 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1226 case EXACT_DIV_EXPR:
1227 /* This is a shortcut for a common special case.
1228 It reduces the number of tree nodes generated
1230 if (int2h == 0 && int2l > 0
1231 && TREE_TYPE (arg1) == sizetype
1232 && int1h == 0 && int1l >= 0)
1234 if (code == CEIL_DIV_EXPR)
1236 return size_int (int1l / int2l);
1238 case ROUND_DIV_EXPR:
1239 if (int2h == 0 && int2l == 1)
1241 t = build_int_2 (int1l, int1h);
1244 if (int1l == int2l && int1h == int2h)
1246 if ((int1l | int1h) == 0)
1248 t = build_int_2 (1, 0);
1251 overflow = div_and_round_double (code, uns,
1252 int1l, int1h, int2l, int2h,
1253 &low, &hi, &garbagel, &garbageh);
1254 t = build_int_2 (low, hi);
1257 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1258 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1259 overflow = div_and_round_double (code, uns,
1260 int1l, int1h, int2l, int2h,
1261 &garbagel, &garbageh, &low, &hi);
1262 t = build_int_2 (low, hi);
1269 low = (((unsigned HOST_WIDE_INT) int1h
1270 < (unsigned HOST_WIDE_INT) int2h)
1271 || (((unsigned HOST_WIDE_INT) int1h
1272 == (unsigned HOST_WIDE_INT) int2h)
1273 && ((unsigned HOST_WIDE_INT) int1l
1274 < (unsigned HOST_WIDE_INT) int2l)));
1278 low = ((int1h < int2h)
1279 || ((int1h == int2h)
1280 && ((unsigned HOST_WIDE_INT) int1l
1281 < (unsigned HOST_WIDE_INT) int2l)));
1283 if (low == (code == MIN_EXPR))
1284 t = build_int_2 (int1l, int1h);
1286 t = build_int_2 (int2l, int2h);
1293 TREE_TYPE (t) = TREE_TYPE (arg1);
1294 TREE_CONSTANT_OVERFLOW (t)
1295 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1296 | TREE_CONSTANT_OVERFLOW (arg1)
1297 | TREE_CONSTANT_OVERFLOW (arg2));
1300 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1301 if (TREE_CODE (arg1) == REAL_CST)
1303 register REAL_VALUE_TYPE d1;
1304 register REAL_VALUE_TYPE d2;
1305 register REAL_VALUE_TYPE value;
1308 d1 = TREE_REAL_CST (arg1);
1309 d2 = TREE_REAL_CST (arg2);
1310 if (setjmp (float_error))
1312 pedwarn ("floating overflow in constant expression");
1313 return build (code, TREE_TYPE (arg1), arg1, arg2);
1315 set_float_handler (float_error);
1317 #ifdef REAL_ARITHMETIC
1318 REAL_ARITHMETIC (value, code, d1, d2);
1335 #ifndef REAL_INFINITY
1344 value = MIN (d1, d2);
1348 value = MAX (d1, d2);
1354 #endif /* no REAL_ARITHMETIC */
1355 t = build_real (TREE_TYPE (arg1),
1356 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1357 set_float_handler (NULL_PTR);
1360 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1361 if (TREE_CODE (arg1) == COMPLEX_CST)
1363 register tree r1 = TREE_REALPART (arg1);
1364 register tree i1 = TREE_IMAGPART (arg1);
1365 register tree r2 = TREE_REALPART (arg2);
1366 register tree i2 = TREE_IMAGPART (arg2);
1372 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1373 const_binop (PLUS_EXPR, i1, i2, notrunc));
1377 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1378 const_binop (MINUS_EXPR, i1, i2, notrunc));
1382 t = build_complex (const_binop (MINUS_EXPR,
1383 const_binop (MULT_EXPR,
1385 const_binop (MULT_EXPR,
1388 const_binop (PLUS_EXPR,
1389 const_binop (MULT_EXPR,
1391 const_binop (MULT_EXPR,
1398 register tree magsquared
1399 = const_binop (PLUS_EXPR,
1400 const_binop (MULT_EXPR, r2, r2, notrunc),
1401 const_binop (MULT_EXPR, i2, i2, notrunc),
1403 t = build_complex (const_binop (RDIV_EXPR,
1404 const_binop (PLUS_EXPR,
1405 const_binop (MULT_EXPR, r1, r2, notrunc),
1406 const_binop (MULT_EXPR, i1, i2, notrunc),
1408 magsquared, notrunc),
1409 const_binop (RDIV_EXPR,
1410 const_binop (MINUS_EXPR,
1411 const_binop (MULT_EXPR, i1, r2, notrunc),
1412 const_binop (MULT_EXPR, r1, i2, notrunc),
1414 magsquared, notrunc));
1421 TREE_TYPE (t) = TREE_TYPE (arg1);
1427 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1431 unsigned int number;
1434 /* Type-size nodes already made for small sizes. */
1435 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1437 if (number >= 0 && number < 2*HOST_BITS_PER_WIDE_INT + 1
1438 && size_table[number] != 0)
1439 return size_table[number];
1440 if (number >= 0 && number < 2*HOST_BITS_PER_WIDE_INT + 1)
1442 push_obstacks_nochange ();
1443 /* Make this a permanent node. */
1444 end_temporary_allocation ();
1445 t = build_int_2 (number, 0);
1446 TREE_TYPE (t) = sizetype;
1447 size_table[number] = t;
1452 t = build_int_2 (number, 0);
1453 TREE_TYPE (t) = sizetype;
1458 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1459 CODE is a tree code. Data type is taken from `sizetype',
1460 If the operands are constant, so is the result. */
1463 size_binop (code, arg0, arg1)
1464 enum tree_code code;
1467 /* Handle the special case of two integer constants faster. */
1468 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1470 /* And some specific cases even faster than that. */
1471 if (code == PLUS_EXPR
1472 && TREE_INT_CST_LOW (arg0) == 0
1473 && TREE_INT_CST_HIGH (arg0) == 0)
1475 if (code == MINUS_EXPR
1476 && TREE_INT_CST_LOW (arg1) == 0
1477 && TREE_INT_CST_HIGH (arg1) == 0)
1479 if (code == MULT_EXPR
1480 && TREE_INT_CST_LOW (arg0) == 1
1481 && TREE_INT_CST_HIGH (arg0) == 0)
1483 /* Handle general case of two integer constants. */
1484 return const_binop (code, arg0, arg1, 1);
1487 if (arg0 == error_mark_node || arg1 == error_mark_node)
1488 return error_mark_node;
1490 return fold (build (code, sizetype, arg0, arg1));
1493 /* Given T, a tree representing type conversion of ARG1, a constant,
1494 return a constant tree representing the result of conversion. */
1497 fold_convert (t, arg1)
1501 register tree type = TREE_TYPE (t);
1503 if (TREE_CODE (type) == POINTER_TYPE
1504 || TREE_CODE (type) == INTEGER_TYPE
1505 || TREE_CODE (type) == ENUMERAL_TYPE)
1507 if (TREE_CODE (arg1) == INTEGER_CST)
1509 /* Given an integer constant, make new constant with new type,
1510 appropriately sign-extended or truncated. */
1511 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1512 TREE_INT_CST_HIGH (arg1));
1513 TREE_TYPE (t) = type;
1514 /* Indicate an overflow if (1) ARG1 already overflowed,
1515 or (2) ARG1 is a too-large unsigned value and T is signed,
1516 or (3) force_fit_type indicates an overflow.
1517 force_fit_type can't detect (2), since it sees only T's type. */
1518 TREE_CONSTANT_OVERFLOW (t) =
1519 (TREE_CONSTANT_OVERFLOW (arg1)
1520 | (TREE_INT_CST_HIGH (arg1) < 0
1521 & TREE_UNSIGNED (type) < TREE_UNSIGNED (TREE_TYPE (arg1)))
1522 | force_fit_type (t, 0));
1524 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1525 else if (TREE_CODE (arg1) == REAL_CST)
1528 l = real_value_from_int_cst (TYPE_MIN_VALUE (type)),
1529 x = TREE_REAL_CST (arg1),
1530 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1531 /* See if X will be in range after truncation towards 0.
1532 To compensate for truncation, move the bounds away from 0,
1533 but reject if X exactly equals the adjusted bounds. */
1534 #ifdef REAL_ARITHMETIC
1535 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1536 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1541 if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1543 pedwarn ("real constant out of range for integer conversion");
1546 #ifndef REAL_ARITHMETIC
1549 HOST_WIDE_INT low, high;
1550 HOST_WIDE_INT half_word
1551 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1553 d = TREE_REAL_CST (arg1);
1557 high = (HOST_WIDE_INT) (d / half_word / half_word);
1558 d -= (REAL_VALUE_TYPE) high * half_word * half_word;
1559 if (d >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1561 low = d - (REAL_VALUE_TYPE) half_word * half_word / 2;
1562 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1565 low = (HOST_WIDE_INT) d;
1566 if (TREE_REAL_CST (arg1) < 0)
1567 neg_double (low, high, &low, &high);
1568 t = build_int_2 (low, high);
1572 HOST_WIDE_INT low, high;
1573 REAL_VALUE_TO_INT (low, high, TREE_REAL_CST (arg1));
1574 t = build_int_2 (low, high);
1577 TREE_TYPE (t) = type;
1578 force_fit_type (t, 0);
1580 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1581 TREE_TYPE (t) = type;
1583 else if (TREE_CODE (type) == REAL_TYPE)
1585 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1586 if (TREE_CODE (arg1) == INTEGER_CST)
1587 return build_real_from_int_cst (type, arg1);
1588 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1589 if (TREE_CODE (arg1) == REAL_CST)
1591 if (setjmp (float_error))
1593 pedwarn ("floating overflow in constant expression");
1596 set_float_handler (float_error);
1598 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1599 TREE_REAL_CST (arg1)));
1600 set_float_handler (NULL_PTR);
1604 TREE_CONSTANT (t) = 1;
1608 /* Return an expr equal to X but certainly not valid as an lvalue.
1609 Also make sure it is not valid as an null pointer constant. */
1617 /* These things are certainly not lvalues. */
1618 if (TREE_CODE (x) == NON_LVALUE_EXPR
1619 || TREE_CODE (x) == INTEGER_CST
1620 || TREE_CODE (x) == REAL_CST
1621 || TREE_CODE (x) == STRING_CST
1622 || TREE_CODE (x) == ADDR_EXPR)
1624 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1626 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1627 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1633 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1634 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1638 /* Given a tree comparison code, return the code that is the logical inverse
1639 of the given code. It is not safe to do this for floating-point
1640 comparisons, except for NE_EXPR and EQ_EXPR. */
1642 static enum tree_code
1643 invert_tree_comparison (code)
1644 enum tree_code code;
1665 /* Similar, but return the comparison that results if the operands are
1666 swapped. This is safe for floating-point. */
1668 static enum tree_code
1669 swap_tree_comparison (code)
1670 enum tree_code code;
1690 /* Return nonzero if two operands are necessarily equal.
1691 If ONLY_CONST is non-zero, only return non-zero for constants.
1692 This function tests whether the operands are indistinguishable;
1693 it does not test whether they are equal using C's == operation.
1694 The distinction is important for IEEE floating point, because
1695 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1696 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1699 operand_equal_p (arg0, arg1, only_const)
1703 /* If both types don't have the same signedness, then we can't consider
1704 them equal. We must check this before the STRIP_NOPS calls
1705 because they may change the signedness of the arguments. */
1706 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1712 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1713 We don't care about side effects in that case because the SAVE_EXPR
1714 takes care of that for us. */
1715 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1716 return ! only_const;
1718 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1721 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1722 && TREE_CODE (arg0) == ADDR_EXPR
1723 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1726 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1727 && TREE_CODE (arg0) == INTEGER_CST
1728 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1729 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1732 /* Detect when real constants are equal. */
1733 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1734 && TREE_CODE (arg0) == REAL_CST)
1735 return !bcmp (&TREE_REAL_CST (arg0), &TREE_REAL_CST (arg1),
1736 sizeof (REAL_VALUE_TYPE));
1744 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1746 /* This is needed for conversions and for COMPONENT_REF.
1747 Might as well play it safe and always test this. */
1748 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1751 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1754 /* Two conversions are equal only if signedness and modes match. */
1755 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1756 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1757 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1760 return operand_equal_p (TREE_OPERAND (arg0, 0),
1761 TREE_OPERAND (arg1, 0), 0);
1765 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1766 TREE_OPERAND (arg1, 0), 0)
1767 && operand_equal_p (TREE_OPERAND (arg0, 1),
1768 TREE_OPERAND (arg1, 1), 0));
1771 switch (TREE_CODE (arg0))
1774 return operand_equal_p (TREE_OPERAND (arg0, 0),
1775 TREE_OPERAND (arg1, 0), 0);
1779 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1780 TREE_OPERAND (arg1, 0), 0)
1781 && operand_equal_p (TREE_OPERAND (arg0, 1),
1782 TREE_OPERAND (arg1, 1), 0));
1785 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1786 TREE_OPERAND (arg1, 0), 0)
1787 && operand_equal_p (TREE_OPERAND (arg0, 1),
1788 TREE_OPERAND (arg1, 1), 0)
1789 && operand_equal_p (TREE_OPERAND (arg0, 2),
1790 TREE_OPERAND (arg1, 2), 0));
1798 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1799 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1801 When in doubt, return 0. */
1804 operand_equal_for_comparison_p (arg0, arg1, other)
1808 int unsignedp1, unsignedpo;
1809 tree primarg1, primother;
1812 if (operand_equal_p (arg0, arg1, 0))
1815 if (TREE_CODE (TREE_TYPE (arg0)) != INTEGER_TYPE)
1818 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1819 actual comparison operand, ARG0.
1821 First throw away any conversions to wider types
1822 already present in the operands. */
1824 primarg1 = get_narrower (arg1, &unsignedp1);
1825 primother = get_narrower (other, &unsignedpo);
1827 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1828 if (unsignedp1 == unsignedpo
1829 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1830 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1832 tree type = TREE_TYPE (arg0);
1834 /* Make sure shorter operand is extended the right way
1835 to match the longer operand. */
1836 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1837 TREE_TYPE (primarg1)),
1840 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1847 /* See if ARG is an expression that is either a comparison or is performing
1848 arithmetic on comparisons. The comparisons must only be comparing
1849 two different values, which will be stored in *CVAL1 and *CVAL2; if
1850 they are non-zero it means that some operands have already been found.
1851 No variables may be used anywhere else in the expression except in the
1854 If this is true, return 1. Otherwise, return zero. */
1857 twoval_comparison_p (arg, cval1, cval2)
1859 tree *cval1, *cval2;
1861 enum tree_code code = TREE_CODE (arg);
1862 char class = TREE_CODE_CLASS (code);
1864 /* We can handle some of the 'e' cases here. */
1866 && (code == TRUTH_NOT_EXPR
1867 || (code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)))
1869 else if (class == 'e'
1870 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1871 || code == COMPOUND_EXPR))
1877 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2);
1880 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2)
1881 && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2));
1887 if (code == COND_EXPR)
1888 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2)
1889 && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2)
1890 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1895 /* First see if we can handle the first operand, then the second. For
1896 the second operand, we know *CVAL1 can't be zero. It must be that
1897 one side of the comparison is each of the values; test for the
1898 case where this isn't true by failing if the two operands
1901 if (operand_equal_p (TREE_OPERAND (arg, 0),
1902 TREE_OPERAND (arg, 1), 0))
1906 *cval1 = TREE_OPERAND (arg, 0);
1907 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1909 else if (*cval2 == 0)
1910 *cval2 = TREE_OPERAND (arg, 0);
1911 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1916 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1918 else if (*cval2 == 0)
1919 *cval2 = TREE_OPERAND (arg, 1);
1920 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1931 /* ARG is a tree that is known to contain just arithmetic operations and
1932 comparisons. Evaluate the operations in the tree substituting NEW0 for
1933 any occurrence of OLD0 as an operand of a comparison and likewise for
1937 eval_subst (arg, old0, new0, old1, new1)
1939 tree old0, new0, old1, new1;
1941 tree type = TREE_TYPE (arg);
1942 enum tree_code code = TREE_CODE (arg);
1943 char class = TREE_CODE_CLASS (code);
1945 /* We can handle some of the 'e' cases here. */
1946 if (class == 'e' && code == TRUTH_NOT_EXPR)
1948 else if (class == 'e'
1949 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1955 return fold (build1 (code, type,
1956 eval_subst (TREE_OPERAND (arg, 0),
1957 old0, new0, old1, new1)));
1960 return fold (build (code, type,
1961 eval_subst (TREE_OPERAND (arg, 0),
1962 old0, new0, old1, new1),
1963 eval_subst (TREE_OPERAND (arg, 1),
1964 old0, new0, old1, new1)));
1970 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1973 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1976 return fold (build (code, type,
1977 eval_subst (TREE_OPERAND (arg, 0),
1978 old0, new0, old1, new1),
1979 eval_subst (TREE_OPERAND (arg, 1),
1980 old0, new0, old1, new1),
1981 eval_subst (TREE_OPERAND (arg, 2),
1982 old0, new0, old1, new1)));
1987 tree arg0 = TREE_OPERAND (arg, 0);
1988 tree arg1 = TREE_OPERAND (arg, 1);
1990 /* We need to check both for exact equality and tree equality. The
1991 former will be true if the operand has a side-effect. In that
1992 case, we know the operand occurred exactly once. */
1994 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1996 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1999 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2001 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2004 return fold (build (code, type, arg0, arg1));
2011 /* Return a tree for the case when the result of an expression is RESULT
2012 converted to TYPE and OMITTED was previously an operand of the expression
2013 but is now not needed (e.g., we folded OMITTED * 0).
2015 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2016 the conversion of RESULT to TYPE. */
2019 omit_one_operand (type, result, omitted)
2020 tree type, result, omitted;
2022 tree t = convert (type, result);
2024 if (TREE_SIDE_EFFECTS (omitted))
2025 return build (COMPOUND_EXPR, type, omitted, t);
2027 return non_lvalue (t);
2030 /* Return a simplified tree node for the truth-negation of ARG. This
2031 never alters ARG itself. We assume that ARG is an operation that
2032 returns a truth value (0 or 1). */
2035 invert_truthvalue (arg)
2038 tree type = TREE_TYPE (arg);
2039 enum tree_code code = TREE_CODE (arg);
2041 /* If this is a comparison, we can simply invert it, except for
2042 floating-point non-equality comparisons, in which case we just
2043 enclose a TRUTH_NOT_EXPR around what we have. */
2045 if (TREE_CODE_CLASS (code) == '<')
2047 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (arg, 0))) == REAL_TYPE
2048 && code != NE_EXPR && code != EQ_EXPR)
2049 return build1 (TRUTH_NOT_EXPR, type, arg);
2051 return build (invert_tree_comparison (code), type,
2052 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2058 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2059 && TREE_INT_CST_HIGH (arg) == 0, 0));
2061 case TRUTH_AND_EXPR:
2062 return build (TRUTH_OR_EXPR, type,
2063 invert_truthvalue (TREE_OPERAND (arg, 0)),
2064 invert_truthvalue (TREE_OPERAND (arg, 1)));
2067 return build (TRUTH_AND_EXPR, type,
2068 invert_truthvalue (TREE_OPERAND (arg, 0)),
2069 invert_truthvalue (TREE_OPERAND (arg, 1)));
2071 case TRUTH_XOR_EXPR:
2072 /* Here we can invert either operand. We invert the first operand
2073 unless the second operand is a TRUTH_NOT_EXPR in which case our
2074 result is the XOR of the first operand with the inside of the
2075 negation of the second operand. */
2077 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2078 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2079 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2081 return build (TRUTH_XOR_EXPR, type,
2082 invert_truthvalue (TREE_OPERAND (arg, 0)),
2083 TREE_OPERAND (arg, 1));
2085 case TRUTH_ANDIF_EXPR:
2086 return build (TRUTH_ORIF_EXPR, type,
2087 invert_truthvalue (TREE_OPERAND (arg, 0)),
2088 invert_truthvalue (TREE_OPERAND (arg, 1)));
2090 case TRUTH_ORIF_EXPR:
2091 return build (TRUTH_ANDIF_EXPR, type,
2092 invert_truthvalue (TREE_OPERAND (arg, 0)),
2093 invert_truthvalue (TREE_OPERAND (arg, 1)));
2095 case TRUTH_NOT_EXPR:
2096 return TREE_OPERAND (arg, 0);
2099 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2100 invert_truthvalue (TREE_OPERAND (arg, 1)),
2101 invert_truthvalue (TREE_OPERAND (arg, 2)));
2104 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2105 invert_truthvalue (TREE_OPERAND (arg, 1)));
2107 case NON_LVALUE_EXPR:
2108 return invert_truthvalue (TREE_OPERAND (arg, 0));
2113 return build1 (TREE_CODE (arg), type,
2114 invert_truthvalue (TREE_OPERAND (arg, 0)));
2117 if (! integer_onep (TREE_OPERAND (arg, 1)))
2119 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2125 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2126 operands are another bit-wise operation with a common input. If so,
2127 distribute the bit operations to save an operation and possibly two if
2128 constants are involved. For example, convert
2129 (A | B) & (A | C) into A | (B & C)
2130 Further simplification will occur if B and C are constants.
2132 If this optimization cannot be done, 0 will be returned. */
2135 distribute_bit_expr (code, type, arg0, arg1)
2136 enum tree_code code;
2143 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2144 || TREE_CODE (arg0) == code
2145 || (TREE_CODE (arg0) != BIT_AND_EXPR
2146 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2149 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2151 common = TREE_OPERAND (arg0, 0);
2152 left = TREE_OPERAND (arg0, 1);
2153 right = TREE_OPERAND (arg1, 1);
2155 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2157 common = TREE_OPERAND (arg0, 0);
2158 left = TREE_OPERAND (arg0, 1);
2159 right = TREE_OPERAND (arg1, 0);
2161 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2163 common = TREE_OPERAND (arg0, 1);
2164 left = TREE_OPERAND (arg0, 0);
2165 right = TREE_OPERAND (arg1, 1);
2167 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2169 common = TREE_OPERAND (arg0, 1);
2170 left = TREE_OPERAND (arg0, 0);
2171 right = TREE_OPERAND (arg1, 0);
2176 return fold (build (TREE_CODE (arg0), type, common,
2177 fold (build (code, type, left, right))));
2180 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2181 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2184 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2187 int bitsize, bitpos;
2190 tree result = build (BIT_FIELD_REF, type, inner,
2191 size_int (bitsize), size_int (bitpos));
2193 TREE_UNSIGNED (result) = unsignedp;
2198 /* Optimize a bit-field compare.
2200 There are two cases: First is a compare against a constant and the
2201 second is a comparison of two items where the fields are at the same
2202 bit position relative to the start of a chunk (byte, halfword, word)
2203 large enough to contain it. In these cases we can avoid the shift
2204 implicit in bitfield extractions.
2206 For constants, we emit a compare of the shifted constant with the
2207 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2208 compared. For two fields at the same position, we do the ANDs with the
2209 similar mask and compare the result of the ANDs.
2211 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2212 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2213 are the left and right operands of the comparison, respectively.
2215 If the optimization described above can be done, we return the resulting
2216 tree. Otherwise we return zero. */
2219 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2220 enum tree_code code;
2224 int lbitpos, lbitsize, rbitpos, rbitsize;
2225 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2226 tree type = TREE_TYPE (lhs);
2227 tree signed_type, unsigned_type;
2228 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2229 enum machine_mode lmode, rmode, lnmode, rnmode;
2230 int lunsignedp, runsignedp;
2231 int lvolatilep = 0, rvolatilep = 0;
2232 tree linner, rinner;
2236 /* Get all the information about the extractions being done. If the bit size
2237 if the same as the size of the underlying object, we aren't doing an
2238 extraction at all and so can do nothing. */
2239 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2240 &lunsignedp, &lvolatilep);
2241 if (lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2247 /* If this is not a constant, we can only do something if bit positions,
2248 sizes, and signedness are the same. */
2249 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2250 &rmode, &runsignedp, &rvolatilep);
2252 if (lbitpos != rbitpos || lbitsize != rbitsize
2253 || lunsignedp != runsignedp || offset != 0)
2257 /* See if we can find a mode to refer to this field. We should be able to,
2258 but fail if we can't. */
2259 lnmode = get_best_mode (lbitsize, lbitpos,
2260 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2262 if (lnmode == VOIDmode)
2265 /* Set signed and unsigned types of the precision of this mode for the
2267 signed_type = type_for_mode (lnmode, 0);
2268 unsigned_type = type_for_mode (lnmode, 1);
2272 rnmode = get_best_mode (rbitsize, rbitpos,
2273 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2275 if (rnmode == VOIDmode)
2279 /* Compute the bit position and size for the new reference and our offset
2280 within it. If the new reference is the same size as the original, we
2281 won't optimize anything, so return zero. */
2282 lnbitsize = GET_MODE_BITSIZE (lnmode);
2283 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2284 lbitpos -= lnbitpos;
2285 if (lnbitsize == lbitsize)
2290 rnbitsize = GET_MODE_BITSIZE (rnmode);
2291 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2292 rbitpos -= rnbitpos;
2293 if (rnbitsize == rbitsize)
2297 #if BYTES_BIG_ENDIAN
2298 lbitpos = lnbitsize - lbitsize - lbitpos;
2301 /* Make the mask to be used against the extracted field. */
2302 mask = build_int_2 (~0, ~0);
2303 TREE_TYPE (mask) = unsigned_type;
2304 force_fit_type (mask);
2305 mask = convert (unsigned_type, mask);
2306 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2307 mask = const_binop (RSHIFT_EXPR, mask,
2308 size_int (lnbitsize - lbitsize - lbitpos), 0);
2311 /* If not comparing with constant, just rework the comparison
2313 return build (code, compare_type,
2314 build (BIT_AND_EXPR, unsigned_type,
2315 make_bit_field_ref (linner, unsigned_type,
2316 lnbitsize, lnbitpos, 1),
2318 build (BIT_AND_EXPR, unsigned_type,
2319 make_bit_field_ref (rinner, unsigned_type,
2320 rnbitsize, rnbitpos, 1),
2323 /* Otherwise, we are handling the constant case. See if the constant is too
2324 big for the field. Warn and return a tree of for 0 (false) if so. We do
2325 this not only for its own sake, but to avoid having to test for this
2326 error case below. If we didn't, we might generate wrong code.
2328 For unsigned fields, the constant shifted right by the field length should
2329 be all zero. For signed fields, the high-order bits should agree with
2334 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2335 convert (unsigned_type, rhs),
2336 size_int (lbitsize), 0)))
2338 warning ("comparison is always %s due to width of bitfield",
2339 code == NE_EXPR ? "one" : "zero");
2340 return convert (compare_type,
2342 ? integer_one_node : integer_zero_node));
2347 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2348 size_int (lbitsize - 1), 0);
2349 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2351 warning ("comparison is always %s due to width of bitfield",
2352 code == NE_EXPR ? "one" : "zero");
2353 return convert (compare_type,
2355 ? integer_one_node : integer_zero_node));
2359 /* Single-bit compares should always be against zero. */
2360 if (lbitsize == 1 && ! integer_zerop (rhs))
2362 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2363 rhs = convert (type, integer_zero_node);
2366 /* Make a new bitfield reference, shift the constant over the
2367 appropriate number of bits and mask it with the computed mask
2368 (in case this was a signed field). If we changed it, make a new one. */
2369 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2371 rhs = fold (const_binop (BIT_AND_EXPR,
2372 const_binop (LSHIFT_EXPR,
2373 convert (unsigned_type, rhs),
2374 size_int (lbitpos)),
2377 return build (code, compare_type,
2378 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2382 /* Subroutine for fold_truthop: decode a field reference.
2384 If EXP is a comparison reference, we return the innermost reference.
2386 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2387 set to the starting bit number.
2389 If the innermost field can be completely contained in a mode-sized
2390 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2392 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2393 otherwise it is not changed.
2395 *PUNSIGNEDP is set to the signedness of the field.
2397 *PMASK is set to the mask used. This is either contained in a
2398 BIT_AND_EXPR or derived from the width of the field.
2400 Return 0 if this is not a component reference or is one that we can't
2401 do anything with. */
2404 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2407 int *pbitsize, *pbitpos;
2408 enum machine_mode *pmode;
2409 int *punsignedp, *pvolatilep;
2418 if (TREE_CODE (exp) == BIT_AND_EXPR)
2420 mask = TREE_OPERAND (exp, 1);
2421 exp = TREE_OPERAND (exp, 0);
2422 STRIP_NOPS (exp); STRIP_NOPS (mask);
2423 if (TREE_CODE (mask) != INTEGER_CST)
2427 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2428 && TREE_CODE (exp) != BIT_FIELD_REF)
2431 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2432 punsignedp, pvolatilep);
2433 if (*pbitsize < 0 || offset != 0)
2438 tree unsigned_type = type_for_size (*pbitsize, 1);
2439 int precision = TYPE_PRECISION (unsigned_type);
2441 mask = build_int_2 (~0, ~0);
2442 TREE_TYPE (mask) = unsigned_type;
2443 force_fit_type (mask, 0);
2444 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2445 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2452 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2456 all_ones_mask_p (mask, size)
2460 tree type = TREE_TYPE (mask);
2461 int precision = TYPE_PRECISION (type);
2464 tmask = build_int_2 (~0, ~0);
2465 TREE_TYPE (tmask) = signed_type (type);
2466 force_fit_type (tmask);
2468 operand_equal_p (mask,
2469 const_binop (RSHIFT_EXPR,
2470 const_binop (LSHIFT_EXPR, tmask,
2471 size_int (precision - size), 0),
2472 size_int (precision - size), 0),
2476 /* Subroutine for fold_truthop: determine if an operand is simple enough
2477 to be evaluated unconditionally. */
2483 simple_operand_p (exp)
2486 /* Strip any conversions that don't change the machine mode. */
2487 while ((TREE_CODE (exp) == NOP_EXPR
2488 || TREE_CODE (exp) == CONVERT_EXPR)
2489 && (TYPE_MODE (TREE_TYPE (exp))
2490 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2491 exp = TREE_OPERAND (exp, 0);
2493 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2494 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2495 && ! TREE_ADDRESSABLE (exp)
2496 && ! TREE_THIS_VOLATILE (exp)
2497 && ! DECL_NONLOCAL (exp)
2498 /* Don't regard global variables as simple. They may be
2499 allocated in ways unknown to the compiler (shared memory,
2500 #pragma weak, etc). */
2501 && ! TREE_PUBLIC (exp)
2502 && ! DECL_EXTERNAL (exp)
2503 /* Loading a static variable is unduly expensive, but global
2504 registers aren't expensive. */
2505 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2508 /* Subroutine for fold_truthop: try to optimize a range test.
2510 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2512 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2513 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2514 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2517 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2518 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2519 larger than HI_CST (they may be equal).
2521 We return the simplified tree or 0 if no optimization is possible. */
2524 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2525 enum tree_code jcode, lo_code, hi_code;
2526 tree type, var, lo_cst, hi_cst;
2529 enum tree_code rcode;
2531 /* See if this is a range test and normalize the constant terms. */
2533 if (jcode == TRUTH_AND_EXPR)
2538 /* See if we have VAR != CST && VAR != CST+1. */
2539 if (! (hi_code == NE_EXPR
2540 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2541 && tree_int_cst_equal (integer_one_node,
2542 const_binop (MINUS_EXPR,
2543 hi_cst, lo_cst, 0))))
2551 if (hi_code == LT_EXPR)
2552 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2553 else if (hi_code != LE_EXPR)
2556 if (lo_code == GT_EXPR)
2557 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2559 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2572 /* See if we have VAR == CST || VAR == CST+1. */
2573 if (! (hi_code == EQ_EXPR
2574 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2575 && tree_int_cst_equal (integer_one_node,
2576 const_binop (MINUS_EXPR,
2577 hi_cst, lo_cst, 0))))
2585 if (hi_code == GE_EXPR)
2586 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2587 else if (hi_code != GT_EXPR)
2590 if (lo_code == LE_EXPR)
2591 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2593 /* We now have VAR < LO_CST || VAR > HI_CST. */
2602 /* When normalizing, it is possible to both increment the smaller constant
2603 and decrement the larger constant. See if they are still ordered. */
2604 if (tree_int_cst_lt (hi_cst, lo_cst))
2607 /* Fail if VAR isn't an integer. */
2608 utype = TREE_TYPE (var);
2609 if (TREE_CODE (utype) != INTEGER_TYPE
2610 && TREE_CODE (utype) != ENUMERAL_TYPE)
2613 /* The range test is invalid if subtracting the two constants results
2614 in overflow. This can happen in traditional mode. */
2615 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2616 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2619 if (! TREE_UNSIGNED (utype))
2621 utype = unsigned_type (utype);
2622 var = convert (utype, var);
2623 lo_cst = convert (utype, lo_cst);
2624 hi_cst = convert (utype, hi_cst);
2627 return fold (convert (type,
2628 build (rcode, utype,
2629 build (MINUS_EXPR, utype, var, lo_cst),
2630 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2633 /* Find ways of folding logical expressions of LHS and RHS:
2634 Try to merge two comparisons to the same innermost item.
2635 Look for range tests like "ch >= '0' && ch <= '9'".
2636 Look for combinations of simple terms on machines with expensive branches
2637 and evaluate the RHS unconditionally.
2639 For example, if we have p->a == 2 && p->b == 4 and we can make an
2640 object large enough to span both A and B, we can do this with a comparison
2641 against the object ANDed with the a mask.
2643 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2644 operations to do this with one comparison.
2646 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2647 function and the one above.
2649 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2650 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2652 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2655 We return the simplified tree or 0 if no optimization is possible. */
2658 fold_truthop (code, truth_type, lhs, rhs)
2659 enum tree_code code;
2660 tree truth_type, lhs, rhs;
2662 /* If this is the "or" of two comparisons, we can do something if we
2663 the comparisons are NE_EXPR. If this is the "and", we can do something
2664 if the comparisons are EQ_EXPR. I.e.,
2665 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2667 WANTED_CODE is this operation code. For single bit fields, we can
2668 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2669 comparison for one-bit fields. */
2671 enum tree_code wanted_code;
2672 enum tree_code lcode, rcode;
2673 tree ll_arg, lr_arg, rl_arg, rr_arg;
2674 tree ll_inner, lr_inner, rl_inner, rr_inner;
2675 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2676 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2677 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2678 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2679 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2680 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2681 enum machine_mode lnmode, rnmode;
2682 tree ll_mask, lr_mask, rl_mask, rr_mask;
2683 tree l_const, r_const;
2685 int first_bit, end_bit;
2688 /* Start by getting the comparison codes and seeing if this looks like
2689 a range test. Fail if anything is volatile. */
2691 if (TREE_SIDE_EFFECTS (lhs)
2692 || TREE_SIDE_EFFECTS (rhs))
2695 lcode = TREE_CODE (lhs);
2696 rcode = TREE_CODE (rhs);
2698 if (TREE_CODE_CLASS (lcode) != '<'
2699 || TREE_CODE_CLASS (rcode) != '<')
2702 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2703 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2705 ll_arg = TREE_OPERAND (lhs, 0);
2706 lr_arg = TREE_OPERAND (lhs, 1);
2707 rl_arg = TREE_OPERAND (rhs, 0);
2708 rr_arg = TREE_OPERAND (rhs, 1);
2710 if (TREE_CODE (lr_arg) == INTEGER_CST
2711 && TREE_CODE (rr_arg) == INTEGER_CST
2712 && operand_equal_p (ll_arg, rl_arg, 0))
2714 if (tree_int_cst_lt (lr_arg, rr_arg))
2715 result = range_test (code, truth_type, lcode, rcode,
2716 ll_arg, lr_arg, rr_arg);
2718 result = range_test (code, truth_type, rcode, lcode,
2719 ll_arg, rr_arg, lr_arg);
2721 /* If this isn't a range test, it also isn't a comparison that
2722 can be merged. However, it wins to evaluate the RHS unconditionally
2723 on machines with expensive branches. */
2725 if (result == 0 && BRANCH_COST >= 2)
2727 if (TREE_CODE (ll_arg) != VAR_DECL
2728 && TREE_CODE (ll_arg) != PARM_DECL)
2730 /* Avoid evaluating the variable part twice. */
2731 ll_arg = save_expr (ll_arg);
2732 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2733 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2735 return build (code, truth_type, lhs, rhs);
2740 /* If the RHS can be evaluated unconditionally and its operands are
2741 simple, it wins to evaluate the RHS unconditionally on machines
2742 with expensive branches. In this case, this isn't a comparison
2743 that can be merged. */
2745 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2746 are with zero (tmw). */
2748 if (BRANCH_COST >= 2
2749 && TREE_CODE (TREE_TYPE (rhs)) == INTEGER_TYPE
2750 && simple_operand_p (rl_arg)
2751 && simple_operand_p (rr_arg))
2752 return build (code, truth_type, lhs, rhs);
2754 /* See if the comparisons can be merged. Then get all the parameters for
2757 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2758 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2762 ll_inner = decode_field_reference (ll_arg,
2763 &ll_bitsize, &ll_bitpos, &ll_mode,
2764 &ll_unsignedp, &volatilep, &ll_mask);
2765 lr_inner = decode_field_reference (lr_arg,
2766 &lr_bitsize, &lr_bitpos, &lr_mode,
2767 &lr_unsignedp, &volatilep, &lr_mask);
2768 rl_inner = decode_field_reference (rl_arg,
2769 &rl_bitsize, &rl_bitpos, &rl_mode,
2770 &rl_unsignedp, &volatilep, &rl_mask);
2771 rr_inner = decode_field_reference (rr_arg,
2772 &rr_bitsize, &rr_bitpos, &rr_mode,
2773 &rr_unsignedp, &volatilep, &rr_mask);
2775 /* It must be true that the inner operation on the lhs of each
2776 comparison must be the same if we are to be able to do anything.
2777 Then see if we have constants. If not, the same must be true for
2779 if (volatilep || ll_inner == 0 || rl_inner == 0
2780 || ! operand_equal_p (ll_inner, rl_inner, 0))
2783 if (TREE_CODE (lr_arg) == INTEGER_CST
2784 && TREE_CODE (rr_arg) == INTEGER_CST)
2785 l_const = lr_arg, r_const = rr_arg;
2786 else if (lr_inner == 0 || rr_inner == 0
2787 || ! operand_equal_p (lr_inner, rr_inner, 0))
2790 l_const = r_const = 0;
2792 /* If either comparison code is not correct for our logical operation,
2793 fail. However, we can convert a one-bit comparison against zero into
2794 the opposite comparison against that bit being set in the field. */
2796 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2797 if (lcode != wanted_code)
2799 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2805 if (rcode != wanted_code)
2807 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2813 /* See if we can find a mode that contains both fields being compared on
2814 the left. If we can't, fail. Otherwise, update all constants and masks
2815 to be relative to a field of that size. */
2816 first_bit = MIN (ll_bitpos, rl_bitpos);
2817 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2818 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2819 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2821 if (lnmode == VOIDmode)
2824 lnbitsize = GET_MODE_BITSIZE (lnmode);
2825 lnbitpos = first_bit & ~ (lnbitsize - 1);
2826 type = type_for_size (lnbitsize, 1);
2827 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2829 #if BYTES_BIG_ENDIAN
2830 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2831 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2834 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2835 size_int (xll_bitpos), 0);
2836 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2837 size_int (xrl_bitpos), 0);
2839 /* Make sure the constants are interpreted as unsigned, so we
2840 don't have sign bits outside the range of their type. */
2844 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2845 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2846 size_int (xll_bitpos), 0);
2850 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2851 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2852 size_int (xrl_bitpos), 0);
2855 /* If the right sides are not constant, do the same for it. Also,
2856 disallow this optimization if a size or signedness mismatch occurs
2857 between the left and right sides. */
2860 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2861 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2862 /* Make sure the two fields on the right
2863 correspond to the left without being swapped. */
2864 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2867 first_bit = MIN (lr_bitpos, rr_bitpos);
2868 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2869 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2870 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2872 if (rnmode == VOIDmode)
2875 rnbitsize = GET_MODE_BITSIZE (rnmode);
2876 rnbitpos = first_bit & ~ (rnbitsize - 1);
2877 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2879 #if BYTES_BIG_ENDIAN
2880 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2881 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2884 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2885 size_int (xlr_bitpos), 0);
2886 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2887 size_int (xrr_bitpos), 0);
2889 /* Make a mask that corresponds to both fields being compared.
2890 Do this for both items being compared. If the masks agree,
2891 we can do this by masking both and comparing the masked
2893 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2894 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2895 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2897 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2898 ll_unsignedp || rl_unsignedp);
2899 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2900 lr_unsignedp || rr_unsignedp);
2901 if (! all_ones_mask_p (ll_mask, lnbitsize))
2903 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2904 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2906 return build (wanted_code, truth_type, lhs, rhs);
2909 /* There is still another way we can do something: If both pairs of
2910 fields being compared are adjacent, we may be able to make a wider
2911 field containing them both. */
2912 if ((ll_bitsize + ll_bitpos == rl_bitpos
2913 && lr_bitsize + lr_bitpos == rr_bitpos)
2914 || (ll_bitpos == rl_bitpos + rl_bitsize
2915 && lr_bitpos == rr_bitpos + rr_bitsize))
2916 return build (wanted_code, truth_type,
2917 make_bit_field_ref (ll_inner, type,
2918 ll_bitsize + rl_bitsize,
2919 MIN (ll_bitpos, rl_bitpos),
2921 make_bit_field_ref (lr_inner, type,
2922 lr_bitsize + rr_bitsize,
2923 MIN (lr_bitpos, rr_bitpos),
2929 /* Handle the case of comparisons with constants. If there is something in
2930 common between the masks, those bits of the constants must be the same.
2931 If not, the condition is always false. Test for this to avoid generating
2932 incorrect code below. */
2933 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2934 if (! integer_zerop (result)
2935 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2936 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2938 if (wanted_code == NE_EXPR)
2940 warning ("`or' of unmatched not-equal tests is always 1");
2941 return convert (truth_type, integer_one_node);
2945 warning ("`and' of mutually exclusive equal-tests is always zero");
2946 return convert (truth_type, integer_zero_node);
2950 /* Construct the expression we will return. First get the component
2951 reference we will make. Unless the mask is all ones the width of
2952 that field, perform the mask operation. Then compare with the
2954 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2955 ll_unsignedp || rl_unsignedp);
2957 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2958 if (! all_ones_mask_p (ll_mask, lnbitsize))
2959 result = build (BIT_AND_EXPR, type, result, ll_mask);
2961 return build (wanted_code, truth_type, result,
2962 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
2965 /* Perform constant folding and related simplification of EXPR.
2966 The related simplifications include x*1 => x, x*0 => 0, etc.,
2967 and application of the associative law.
2968 NOP_EXPR conversions may be removed freely (as long as we
2969 are careful not to change the C type of the overall expression)
2970 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
2971 but we can constant-fold them if they have constant operands. */
2977 register tree t = expr;
2978 tree t1 = NULL_TREE;
2980 tree type = TREE_TYPE (expr);
2981 register tree arg0, arg1;
2982 register enum tree_code code = TREE_CODE (t);
2986 /* WINS will be nonzero when the switch is done
2987 if all operands are constant. */
2991 /* Return right away if already constant. */
2992 if (TREE_CONSTANT (t))
2994 if (code == CONST_DECL)
2995 return DECL_INITIAL (t);
2999 kind = TREE_CODE_CLASS (code);
3000 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3004 /* Special case for conversion ops that can have fixed point args. */
3005 arg0 = TREE_OPERAND (t, 0);
3007 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3009 STRIP_TYPE_NOPS (arg0);
3011 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3012 subop = TREE_REALPART (arg0);
3016 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3017 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3018 && TREE_CODE (subop) != REAL_CST
3019 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3021 /* Note that TREE_CONSTANT isn't enough:
3022 static var addresses are constant but we can't
3023 do arithmetic on them. */
3026 else if (kind == 'e' || kind == '<'
3027 || kind == '1' || kind == '2' || kind == 'r')
3029 register int len = tree_code_length[(int) code];
3031 for (i = 0; i < len; i++)
3033 tree op = TREE_OPERAND (t, i);
3037 continue; /* Valid for CALL_EXPR, at least. */
3039 /* Strip any conversions that don't change the mode. */
3042 if (TREE_CODE (op) == COMPLEX_CST)
3043 subop = TREE_REALPART (op);
3047 if (TREE_CODE (subop) != INTEGER_CST
3048 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3049 && TREE_CODE (subop) != REAL_CST
3050 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3052 /* Note that TREE_CONSTANT isn't enough:
3053 static var addresses are constant but we can't
3054 do arithmetic on them. */
3064 /* If this is a commutative operation, and ARG0 is a constant, move it
3065 to ARG1 to reduce the number of tests below. */
3066 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3067 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3068 || code == BIT_AND_EXPR)
3069 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3071 tem = arg0; arg0 = arg1; arg1 = tem;
3073 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3074 TREE_OPERAND (t, 1) = tem;
3077 /* Now WINS is set as described above,
3078 ARG0 is the first operand of EXPR,
3079 and ARG1 is the second operand (if it has more than one operand).
3081 First check for cases where an arithmetic operation is applied to a
3082 compound, conditional, or comparison operation. Push the arithmetic
3083 operation inside the compound or conditional to see if any folding
3084 can then be done. Convert comparison to conditional for this purpose.
3085 The also optimizes non-constant cases that used to be done in
3087 if (TREE_CODE_CLASS (code) == '1')
3089 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3090 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3091 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3092 else if (TREE_CODE (arg0) == COND_EXPR)
3094 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3095 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3096 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3098 /* If this was a conversion, and all we did was to move into
3099 inside the COND_EXPR, bring it back out. Then return so we
3100 don't get into an infinite recursion loop taking the conversion
3101 out and then back in. */
3103 if ((code == NOP_EXPR || code == CONVERT_EXPR
3104 || code == NON_LVALUE_EXPR)
3105 && TREE_CODE (t) == COND_EXPR
3106 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3107 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3108 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3109 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
3110 t = build1 (code, type,
3112 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3113 TREE_OPERAND (t, 0),
3114 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3115 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3118 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3119 return fold (build (COND_EXPR, type, arg0,
3120 fold (build1 (code, type, integer_one_node)),
3121 fold (build1 (code, type, integer_zero_node))));
3123 else if (TREE_CODE_CLASS (code) == '2')
3125 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3126 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3127 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3128 else if (TREE_CODE (arg1) == COND_EXPR
3129 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3131 tree test, true_value, false_value;
3133 if (TREE_CODE (arg1) == COND_EXPR)
3135 test = TREE_OPERAND (arg1, 0);
3136 true_value = TREE_OPERAND (arg1, 1);
3137 false_value = TREE_OPERAND (arg1, 2);
3142 true_value = integer_one_node;
3143 false_value = integer_zero_node;
3146 if (TREE_CODE (arg0) != VAR_DECL && TREE_CODE (arg0) != PARM_DECL)
3147 arg0 = save_expr (arg0);
3148 test = fold (build (COND_EXPR, type, test,
3149 fold (build (code, type, arg0, true_value)),
3150 fold (build (code, type, arg0, false_value))));
3151 if (TREE_CODE (arg0) == SAVE_EXPR)
3152 return build (COMPOUND_EXPR, type,
3153 convert (void_type_node, arg0), test);
3155 return convert (type, test);
3158 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3159 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3160 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3161 else if (TREE_CODE (arg0) == COND_EXPR
3162 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3164 tree test, true_value, false_value;
3166 if (TREE_CODE (arg0) == COND_EXPR)
3168 test = TREE_OPERAND (arg0, 0);
3169 true_value = TREE_OPERAND (arg0, 1);
3170 false_value = TREE_OPERAND (arg0, 2);
3175 true_value = integer_one_node;
3176 false_value = integer_zero_node;
3179 if (TREE_CODE (arg1) != VAR_DECL && TREE_CODE (arg1) != PARM_DECL)
3180 arg1 = save_expr (arg1);
3181 test = fold (build (COND_EXPR, type, test,
3182 fold (build (code, type, true_value, arg1)),
3183 fold (build (code, type, false_value, arg1))));
3184 if (TREE_CODE (arg1) == SAVE_EXPR)
3185 return build (COMPOUND_EXPR, type,
3186 convert (void_type_node, arg1), test);
3188 return convert (type, test);
3191 else if (TREE_CODE_CLASS (code) == '<'
3192 && TREE_CODE (arg0) == COMPOUND_EXPR)
3193 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3194 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3195 else if (TREE_CODE_CLASS (code) == '<'
3196 && TREE_CODE (arg1) == COMPOUND_EXPR)
3197 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3198 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3210 return fold (DECL_INITIAL (t));
3215 case FIX_TRUNC_EXPR:
3216 /* Other kinds of FIX are not handled properly by fold_convert. */
3217 /* Two conversions in a row are not needed unless:
3218 - the intermediate type is narrower than both initial and final, or
3219 - the intermediate type and innermost type differ in signedness,
3220 and the outermost type is wider than the intermediate, or
3221 - the initial type is a pointer type and the precisions of the
3222 intermediate and final types differ, or
3223 - the final type is a pointer type and the precisions of the
3224 initial and intermediate types differ. */
3225 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3226 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3227 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3228 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3230 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3231 > TYPE_PRECISION (TREE_TYPE (t)))
3232 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3234 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3236 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3237 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3238 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3239 < TYPE_PRECISION (TREE_TYPE (t))))
3240 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3241 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3242 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3244 (TREE_UNSIGNED (TREE_TYPE (t))
3245 && (TYPE_PRECISION (TREE_TYPE (t))
3246 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3247 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3249 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3250 != TYPE_PRECISION (TREE_TYPE (t))))
3251 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3252 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3253 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3254 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3256 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3257 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3258 /* Detect assigning a bitfield. */
3259 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3260 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3262 /* Don't leave an assignment inside a conversion
3263 unless assigning a bitfield. */
3264 tree prev = TREE_OPERAND (t, 0);
3265 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3266 /* First do the assignment, then return converted constant. */
3267 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3273 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3276 return fold_convert (t, arg0);
3278 #if 0 /* This loses on &"foo"[0]. */
3283 /* Fold an expression like: "foo"[2] */
3284 if (TREE_CODE (arg0) == STRING_CST
3285 && TREE_CODE (arg1) == INTEGER_CST
3286 && !TREE_INT_CST_HIGH (arg1)
3287 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3289 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3290 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3291 force_fit_type (t, 0);
3298 TREE_CONSTANT (t) = wins;
3304 if (TREE_CODE (arg0) == INTEGER_CST)
3306 HOST_WIDE_INT low, high;
3307 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3308 TREE_INT_CST_HIGH (arg0),
3310 t = build_int_2 (low, high);
3311 TREE_TYPE (t) = type;
3312 TREE_CONSTANT_OVERFLOW (t)
3313 = (TREE_CONSTANT_OVERFLOW (arg0)
3314 | force_fit_type (t, overflow));
3316 else if (TREE_CODE (arg0) == REAL_CST)
3317 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3318 TREE_TYPE (t) = type;
3320 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3321 return TREE_OPERAND (arg0, 0);
3323 /* Convert - (a - b) to (b - a) for non-floating-point. */
3324 else if (TREE_CODE (arg0) == MINUS_EXPR && TREE_CODE (type) != REAL_TYPE)
3325 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3326 TREE_OPERAND (arg0, 0));
3333 if (TREE_CODE (arg0) == INTEGER_CST)
3335 if (! TREE_UNSIGNED (type)
3336 && TREE_INT_CST_HIGH (arg0) < 0)
3338 HOST_WIDE_INT low, high;
3339 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3340 TREE_INT_CST_HIGH (arg0),
3342 t = build_int_2 (low, high);
3343 TREE_TYPE (t) = type;
3344 TREE_CONSTANT_OVERFLOW (t)
3345 = (TREE_CONSTANT_OVERFLOW (arg0)
3346 | force_fit_type (t, overflow));
3349 else if (TREE_CODE (arg0) == REAL_CST)
3351 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3352 t = build_real (type,
3353 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3355 TREE_TYPE (t) = type;
3357 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3358 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3364 if (TREE_CODE (arg0) == INTEGER_CST)
3365 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3366 ~ TREE_INT_CST_HIGH (arg0));
3367 TREE_TYPE (t) = type;
3368 force_fit_type (t, 0);
3369 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3371 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3372 return TREE_OPERAND (arg0, 0);
3376 /* A + (-B) -> A - B */
3377 if (TREE_CODE (arg1) == NEGATE_EXPR)
3378 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3379 else if (TREE_CODE (type) != REAL_TYPE)
3381 if (integer_zerop (arg1))
3382 return non_lvalue (convert (type, arg0));
3384 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3385 with a constant, and the two constants have no bits in common,
3386 we should treat this as a BIT_IOR_EXPR since this may produce more
3388 if (TREE_CODE (arg0) == BIT_AND_EXPR
3389 && TREE_CODE (arg1) == BIT_AND_EXPR
3390 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3391 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3392 && integer_zerop (const_binop (BIT_AND_EXPR,
3393 TREE_OPERAND (arg0, 1),
3394 TREE_OPERAND (arg1, 1), 0)))
3396 code = BIT_IOR_EXPR;
3400 /* In IEEE floating point, x+0 may not equal x. */
3401 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3402 && real_zerop (arg1))
3403 return non_lvalue (convert (type, arg0));
3405 /* In most languages, can't associate operations on floats
3406 through parentheses. Rather than remember where the parentheses
3407 were, we don't associate floats at all. It shouldn't matter much. */
3408 if (TREE_CODE (type) == REAL_TYPE)
3410 /* The varsign == -1 cases happen only for addition and subtraction.
3411 It says that the arg that was split was really CON minus VAR.
3412 The rest of the code applies to all associative operations. */
3418 if (split_tree (arg0, code, &var, &con, &varsign))
3422 /* EXPR is (CON-VAR) +- ARG1. */
3423 /* If it is + and VAR==ARG1, return just CONST. */
3424 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3425 return convert (TREE_TYPE (t), con);
3427 /* If ARG0 is a constant, don't change things around;
3428 instead keep all the constant computations together. */
3430 if (TREE_CONSTANT (arg0))
3433 /* Otherwise return (CON +- ARG1) - VAR. */
3434 TREE_SET_CODE (t, MINUS_EXPR);
3435 TREE_OPERAND (t, 1) = var;
3437 = fold (build (code, TREE_TYPE (t), con, arg1));
3441 /* EXPR is (VAR+CON) +- ARG1. */
3442 /* If it is - and VAR==ARG1, return just CONST. */
3443 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3444 return convert (TREE_TYPE (t), con);
3446 /* If ARG0 is a constant, don't change things around;
3447 instead keep all the constant computations together. */
3449 if (TREE_CONSTANT (arg0))
3452 /* Otherwise return VAR +- (ARG1 +- CON). */
3453 TREE_OPERAND (t, 1) = tem
3454 = fold (build (code, TREE_TYPE (t), arg1, con));
3455 TREE_OPERAND (t, 0) = var;
3456 if (integer_zerop (tem)
3457 && (code == PLUS_EXPR || code == MINUS_EXPR))
3458 return convert (type, var);
3459 /* If we have x +/- (c - d) [c an explicit integer]
3460 change it to x -/+ (d - c) since if d is relocatable
3461 then the latter can be a single immediate insn
3462 and the former cannot. */
3463 if (TREE_CODE (tem) == MINUS_EXPR
3464 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3466 tree tem1 = TREE_OPERAND (tem, 1);
3467 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3468 TREE_OPERAND (tem, 0) = tem1;
3470 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3476 if (split_tree (arg1, code, &var, &con, &varsign))
3478 /* EXPR is ARG0 +- (CON +- VAR). */
3479 if (TREE_CODE (t) == MINUS_EXPR
3480 && operand_equal_p (var, arg0, 0))
3482 /* If VAR and ARG0 cancel, return just CON or -CON. */
3483 if (code == PLUS_EXPR)
3484 return convert (TREE_TYPE (t), con);
3485 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3486 convert (TREE_TYPE (t), con)));
3488 if (TREE_CONSTANT (arg1))
3492 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3494 = fold (build (code, TREE_TYPE (t), arg0, con));
3495 TREE_OPERAND (t, 1) = var;
3496 if (integer_zerop (TREE_OPERAND (t, 0))
3497 && TREE_CODE (t) == PLUS_EXPR)
3498 return convert (TREE_TYPE (t), var);
3503 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3504 if (TREE_CODE (arg1) == REAL_CST)
3506 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3508 t1 = const_binop (code, arg0, arg1, 0);
3509 if (t1 != NULL_TREE)
3511 /* The return value should always have
3512 the same type as the original expression. */
3513 TREE_TYPE (t1) = TREE_TYPE (t);
3519 if (TREE_CODE (type) != REAL_TYPE)
3521 if (! wins && integer_zerop (arg0))
3522 return build1 (NEGATE_EXPR, type, arg1);
3523 if (integer_zerop (arg1))
3524 return non_lvalue (convert (type, arg0));
3526 /* Convert A - (-B) to A + B. */
3527 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3528 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3529 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
3531 /* Except with IEEE floating point, 0-x equals -x. */
3532 if (! wins && real_zerop (arg0))
3533 return build1 (NEGATE_EXPR, type, arg1);
3534 /* Except with IEEE floating point, x-0 equals x. */
3535 if (real_zerop (arg1))
3536 return non_lvalue (convert (type, arg0));
3538 /* Fold &x - &x. This can happen from &x.foo - &x.
3539 This is unsafe for certain floats even in non-IEEE formats.
3540 In IEEE, it is unsafe because it does wrong for NaNs.
3541 Also note that operand_equal_p is always false if an operand
3544 if (operand_equal_p (arg0, arg1,
3545 TREE_CODE (type) == REAL_TYPE))
3546 return convert (type, integer_zero_node);
3551 if (TREE_CODE (type) != REAL_TYPE)
3553 if (integer_zerop (arg1))
3554 return omit_one_operand (type, arg1, arg0);
3555 if (integer_onep (arg1))
3556 return non_lvalue (convert (type, arg0));
3558 /* (a * (1 << b)) is (a << b) */
3559 if (TREE_CODE (arg1) == LSHIFT_EXPR
3560 && integer_onep (TREE_OPERAND (arg1, 0)))
3561 return fold (build (LSHIFT_EXPR, type, arg0,
3562 TREE_OPERAND (arg1, 1)));
3563 if (TREE_CODE (arg0) == LSHIFT_EXPR
3564 && integer_onep (TREE_OPERAND (arg0, 0)))
3565 return fold (build (LSHIFT_EXPR, type, arg1,
3566 TREE_OPERAND (arg0, 1)));
3570 /* x*0 is 0, except for IEEE floating point. */
3571 if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3572 && real_zerop (arg1))
3573 return omit_one_operand (type, arg1, arg0);
3574 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3575 However, ANSI says we can drop signals,
3576 so we can do this anyway. */
3577 if (real_onep (arg1))
3578 return non_lvalue (convert (type, arg0));
3580 if (! wins && real_twop (arg1))
3582 tree arg = save_expr (arg0);
3583 return build (PLUS_EXPR, type, arg, arg);
3590 if (integer_all_onesp (arg1))
3591 return omit_one_operand (type, arg1, arg0);
3592 if (integer_zerop (arg1))
3593 return non_lvalue (convert (type, arg0));
3594 t1 = distribute_bit_expr (code, type, arg0, arg1);
3595 if (t1 != NULL_TREE)
3598 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3599 is a rotate of A by C1 bits. */
3601 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3602 || TREE_CODE (arg0) == LSHIFT_EXPR)
3603 && (TREE_CODE (arg1) == RSHIFT_EXPR
3604 || TREE_CODE (arg1) == LSHIFT_EXPR)
3605 && TREE_CODE (arg0) != TREE_CODE (arg1)
3606 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3607 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3608 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3609 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3610 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3611 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3612 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3613 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3614 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3615 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3616 TREE_CODE (arg0) == LSHIFT_EXPR
3617 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3622 if (integer_zerop (arg1))
3623 return non_lvalue (convert (type, arg0));
3624 if (integer_all_onesp (arg1))
3625 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3630 if (integer_all_onesp (arg1))
3631 return non_lvalue (convert (type, arg0));
3632 if (integer_zerop (arg1))
3633 return omit_one_operand (type, arg1, arg0);
3634 t1 = distribute_bit_expr (code, type, arg0, arg1);
3635 if (t1 != NULL_TREE)
3637 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3638 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3639 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3641 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3642 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3643 && (~TREE_INT_CST_LOW (arg0)
3644 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3645 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3647 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3648 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3650 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3651 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3652 && (~TREE_INT_CST_LOW (arg1)
3653 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3654 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3658 case BIT_ANDTC_EXPR:
3659 if (integer_all_onesp (arg0))
3660 return non_lvalue (convert (type, arg1));
3661 if (integer_zerop (arg0))
3662 return omit_one_operand (type, arg0, arg1);
3663 if (TREE_CODE (arg1) == INTEGER_CST)
3665 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3666 code = BIT_AND_EXPR;
3671 case TRUNC_DIV_EXPR:
3672 case ROUND_DIV_EXPR:
3673 case FLOOR_DIV_EXPR:
3675 case EXACT_DIV_EXPR:
3677 if (integer_onep (arg1))
3678 return non_lvalue (convert (type, arg0));
3679 if (integer_zerop (arg1))
3682 /* If we have ((a * C1) / C2) and C1 % C2 == 0, we can replace this with
3683 (a * (C1/C2). Also look for when we have a SAVE_EXPR in
3685 if (TREE_CODE (arg1) == INTEGER_CST
3686 && TREE_INT_CST_LOW (arg1) > 0 && TREE_INT_CST_HIGH (arg1) == 0
3687 && TREE_CODE (arg0) == MULT_EXPR
3688 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3689 && TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) > 0
3690 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3691 && 0 == (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3692 % TREE_INT_CST_LOW (arg1)))
3695 = build_int_2 (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3696 / TREE_INT_CST_LOW (arg1), 0);
3698 TREE_TYPE (new_op) = type;
3699 return build (MULT_EXPR, type, TREE_OPERAND (arg0, 0), new_op);
3702 else if (TREE_CODE (arg1) == INTEGER_CST
3703 && TREE_INT_CST_LOW (arg1) > 0 && TREE_INT_CST_HIGH (arg1) == 0
3704 && TREE_CODE (arg0) == SAVE_EXPR
3705 && TREE_CODE (TREE_OPERAND (arg0, 0)) == MULT_EXPR
3706 && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3708 && (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3710 && (TREE_INT_CST_HIGH (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3712 && (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3713 % TREE_INT_CST_LOW (arg1)) == 0)
3716 = build_int_2 (TREE_INT_CST_LOW (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1))
3717 / TREE_INT_CST_LOW (arg1), 0);
3719 TREE_TYPE (new_op) = type;
3720 return build (MULT_EXPR, type,
3721 TREE_OPERAND (TREE_OPERAND (arg0, 0), 0), new_op);
3724 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3725 #ifndef REAL_INFINITY
3726 if (TREE_CODE (arg1) == REAL_CST
3727 && real_zerop (arg1))
3730 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3735 case FLOOR_MOD_EXPR:
3736 case ROUND_MOD_EXPR:
3737 case TRUNC_MOD_EXPR:
3738 if (integer_onep (arg1))
3739 return omit_one_operand (type, integer_zero_node, arg0);
3740 if (integer_zerop (arg1))
3748 if (integer_zerop (arg1))
3749 return non_lvalue (convert (type, arg0));
3750 /* Since negative shift count is not well-defined,
3751 don't try to compute it in the compiler. */
3752 if (tree_int_cst_lt (arg1, integer_zero_node))
3757 if (operand_equal_p (arg0, arg1, 0))
3759 if (TREE_CODE (type) == INTEGER_TYPE
3760 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
3761 return omit_one_operand (type, arg1, arg0);
3765 if (operand_equal_p (arg0, arg1, 0))
3767 if (TREE_CODE (type) == INTEGER_TYPE
3768 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
3769 return omit_one_operand (type, arg1, arg0);
3772 case TRUTH_NOT_EXPR:
3773 /* Note that the operand of this must be an int
3774 and its values must be 0 or 1.
3775 ("true" is a fixed value perhaps depending on the language,
3776 but we don't handle values other than 1 correctly yet.) */
3777 return invert_truthvalue (arg0);
3779 case TRUTH_ANDIF_EXPR:
3780 /* Note that the operands of this must be ints
3781 and their values must be 0 or 1.
3782 ("true" is a fixed value perhaps depending on the language.) */
3783 /* If first arg is constant zero, return it. */
3784 if (integer_zerop (arg0))
3786 case TRUTH_AND_EXPR:
3787 /* If either arg is constant true, drop it. */
3788 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
3789 return non_lvalue (arg1);
3790 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
3791 return non_lvalue (arg0);
3792 /* If second arg is constant zero, result is zero, but first arg
3793 must be evaluated. */
3794 if (integer_zerop (arg1))
3795 return omit_one_operand (type, arg1, arg0);
3798 /* Check for the possibility of merging component references. If our
3799 lhs is another similar operation, try to merge its rhs with our
3800 rhs. Then try to merge our lhs and rhs. */
3803 if (TREE_CODE (arg0) == code)
3805 tem = fold_truthop (code, type,
3806 TREE_OPERAND (arg0, 1), arg1);
3808 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
3811 tem = fold_truthop (code, type, arg0, arg1);
3817 case TRUTH_ORIF_EXPR:
3818 /* Note that the operands of this must be ints
3819 and their values must be 0 or true.
3820 ("true" is a fixed value perhaps depending on the language.) */
3821 /* If first arg is constant true, return it. */
3822 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
3825 /* If either arg is constant zero, drop it. */
3826 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
3827 return non_lvalue (arg1);
3828 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
3829 return non_lvalue (arg0);
3830 /* If second arg is constant true, result is true, but we must
3831 evaluate first arg. */
3832 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
3833 return omit_one_operand (type, arg1, arg0);
3836 case TRUTH_XOR_EXPR:
3837 /* If either arg is constant zero, drop it. */
3838 if (integer_zerop (arg0))
3839 return non_lvalue (arg1);
3840 if (integer_zerop (arg1))
3841 return non_lvalue (arg0);
3842 /* If either arg is constant true, this is a logical inversion. */
3843 if (integer_onep (arg0))
3844 return non_lvalue (invert_truthvalue (arg1));
3845 if (integer_onep (arg1))
3846 return non_lvalue (invert_truthvalue (arg0));
3855 /* If one arg is a constant integer, put it last. */
3856 if (TREE_CODE (arg0) == INTEGER_CST
3857 && TREE_CODE (arg1) != INTEGER_CST)
3859 TREE_OPERAND (t, 0) = arg1;
3860 TREE_OPERAND (t, 1) = arg0;
3861 arg0 = TREE_OPERAND (t, 0);
3862 arg1 = TREE_OPERAND (t, 1);
3863 code = swap_tree_comparison (code);
3864 TREE_SET_CODE (t, code);
3867 /* Convert foo++ == CONST into ++foo == CONST + INCR.
3868 First, see if one arg is constant; find the constant arg
3869 and the other one. */
3871 tree constop = 0, varop;
3874 if (TREE_CONSTANT (arg1))
3875 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
3876 if (TREE_CONSTANT (arg0))
3877 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
3879 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
3881 /* This optimization is invalid for ordered comparisons
3882 if CONST+INCR overflows or if foo+incr might overflow.
3883 This optimization is invalid for floating point due to rounding.
3884 For pointer types we assume overflow doesn't happen. */
3885 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
3886 || (TREE_CODE (TREE_TYPE (varop)) != REAL_TYPE
3887 && (code == EQ_EXPR || code == NE_EXPR)))
3890 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
3891 constop, TREE_OPERAND (varop, 1)));
3892 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
3893 *constoploc = newconst;
3897 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
3899 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
3900 || (TREE_CODE (TREE_TYPE (varop)) != REAL_TYPE
3901 && (code == EQ_EXPR || code == NE_EXPR)))
3904 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
3905 constop, TREE_OPERAND (varop, 1)));
3906 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
3907 *constoploc = newconst;
3913 /* Change X >= CST to X > (CST - 1) if CST is positive. */
3914 if (TREE_CODE (arg1) == INTEGER_CST
3915 && TREE_CODE (arg0) != INTEGER_CST
3916 && ! tree_int_cst_lt (arg1, integer_one_node))
3918 switch (TREE_CODE (t))
3922 TREE_SET_CODE (t, code);
3923 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
3924 TREE_OPERAND (t, 1) = arg1;
3929 TREE_SET_CODE (t, code);
3930 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
3931 TREE_OPERAND (t, 1) = arg1;
3935 /* If this is an EQ or NE comparison with zero and ARG0 is
3936 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
3937 two operations, but the latter can be done in one less insn
3938 one machine that have only two-operand insns or on which a
3939 constant cannot be the first operand. */
3940 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
3941 && TREE_CODE (arg0) == BIT_AND_EXPR)
3943 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
3944 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
3946 fold (build (code, type,
3947 build (BIT_AND_EXPR, TREE_TYPE (arg0),
3949 TREE_TYPE (TREE_OPERAND (arg0, 0)),
3950 TREE_OPERAND (arg0, 1),
3951 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
3952 convert (TREE_TYPE (arg0),
3955 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
3956 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
3958 fold (build (code, type,
3959 build (BIT_AND_EXPR, TREE_TYPE (arg0),
3961 TREE_TYPE (TREE_OPERAND (arg0, 1)),
3962 TREE_OPERAND (arg0, 0),
3963 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
3964 convert (TREE_TYPE (arg0),
3969 /* If this is an NE comparison of zero with an AND of one, remove the
3970 comparison since the AND will give the correct value. */
3971 if (code == NE_EXPR && integer_zerop (arg1)
3972 && TREE_CODE (arg0) == BIT_AND_EXPR
3973 && integer_onep (TREE_OPERAND (arg0, 1)))
3974 return convert (type, arg0);
3976 /* If we have (A & C) == C where C is a power of 2, convert this into
3977 (A & C) != 0. Similarly for NE_EXPR. */
3978 if ((code == EQ_EXPR || code == NE_EXPR)
3979 && TREE_CODE (arg0) == BIT_AND_EXPR
3980 && integer_pow2p (TREE_OPERAND (arg0, 1))
3981 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3982 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
3983 arg0, integer_zero_node);
3985 /* Simplify comparison of something with itself. (For IEEE
3986 floating-point, we can only do some of these simplifications.) */
3987 if (operand_equal_p (arg0, arg1, 0))
3994 if (TREE_CODE (TREE_TYPE (arg0)) == INTEGER_TYPE)
3996 t = build_int_2 (1, 0);
3997 TREE_TYPE (t) = type;
4001 TREE_SET_CODE (t, code);
4005 /* For NE, we can only do this simplification if integer. */
4006 if (TREE_CODE (TREE_TYPE (arg0)) != INTEGER_TYPE)
4008 /* ... fall through ... */
4011 t = build_int_2 (0, 0);
4012 TREE_TYPE (t) = type;
4017 /* An unsigned comparison against 0 can be simplified. */
4018 if (integer_zerop (arg1)
4019 && (TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
4020 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4021 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4023 switch (TREE_CODE (t))
4027 TREE_SET_CODE (t, NE_EXPR);
4031 TREE_SET_CODE (t, EQ_EXPR);
4034 return omit_one_operand (integer_type_node,
4035 integer_one_node, arg0);
4037 return omit_one_operand (integer_type_node,
4038 integer_zero_node, arg0);
4042 /* If we are comparing an expression that just has comparisons
4043 of two integer values, arithmetic expressions of those comparisons,
4044 and constants, we can simplify it. There are only three cases
4045 to check: the two values can either be equal, the first can be
4046 greater, or the second can be greater. Fold the expression for
4047 those three values. Since each value must be 0 or 1, we have
4048 eight possibilities, each of which corresponds to the constant 0
4049 or 1 or one of the six possible comparisons.
4051 This handles common cases like (a > b) == 0 but also handles
4052 expressions like ((x > y) - (y > x)) > 0, which supposedly
4053 occur in macroized code. */
4055 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4057 tree cval1 = 0, cval2 = 0;
4059 if (twoval_comparison_p (arg0, &cval1, &cval2)
4060 /* Don't handle degenerate cases here; they should already
4061 have been handled anyway. */
4062 && cval1 != 0 && cval2 != 0
4063 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4064 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4065 && TREE_CODE (TREE_TYPE (cval1)) == INTEGER_TYPE
4066 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4067 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4069 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4070 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4072 /* We can't just pass T to eval_subst in case cval1 or cval2
4073 was the same as ARG1. */
4076 = fold (build (code, type,
4077 eval_subst (arg0, cval1, maxval, cval2, minval),
4080 = fold (build (code, type,
4081 eval_subst (arg0, cval1, maxval, cval2, maxval),
4084 = fold (build (code, type,
4085 eval_subst (arg0, cval1, minval, cval2, maxval),
4088 /* All three of these results should be 0 or 1. Confirm they
4089 are. Then use those values to select the proper code
4092 if ((integer_zerop (high_result)
4093 || integer_onep (high_result))
4094 && (integer_zerop (equal_result)
4095 || integer_onep (equal_result))
4096 && (integer_zerop (low_result)
4097 || integer_onep (low_result)))
4099 /* Make a 3-bit mask with the high-order bit being the
4100 value for `>', the next for '=', and the low for '<'. */
4101 switch ((integer_onep (high_result) * 4)
4102 + (integer_onep (equal_result) * 2)
4103 + integer_onep (low_result))
4107 return omit_one_operand (type, integer_zero_node, arg0);
4128 return omit_one_operand (type, integer_one_node, arg0);
4131 return fold (build (code, type, cval1, cval2));
4136 /* If this is a comparison of a field, we may be able to simplify it. */
4137 if ((TREE_CODE (arg0) == COMPONENT_REF
4138 || TREE_CODE (arg0) == BIT_FIELD_REF)
4139 && (code == EQ_EXPR || code == NE_EXPR)
4140 /* Handle the constant case even without -O
4141 to make sure the warnings are given. */
4142 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4144 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4148 /* From here on, the only cases we handle are when the result is
4149 known to be a constant.
4151 To compute GT, swap the arguments and do LT.
4152 To compute GE, do LT and invert the result.
4153 To compute LE, swap the arguments, do LT and invert the result.
4154 To compute NE, do EQ and invert the result.
4156 Therefore, the code below must handle only EQ and LT. */
4158 if (code == LE_EXPR || code == GT_EXPR)
4160 tem = arg0, arg0 = arg1, arg1 = tem;
4161 code = swap_tree_comparison (code);
4164 /* Note that it is safe to invert for real values here because we
4165 will check below in the one case that it matters. */
4168 if (code == NE_EXPR || code == GE_EXPR)
4171 code = invert_tree_comparison (code);
4174 /* Compute a result for LT or EQ if args permit;
4175 otherwise return T. */
4176 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4178 if (code == EQ_EXPR)
4179 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4180 == TREE_INT_CST_LOW (arg1))
4181 && (TREE_INT_CST_HIGH (arg0)
4182 == TREE_INT_CST_HIGH (arg1)),
4185 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4186 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4187 : INT_CST_LT (arg0, arg1)),
4191 /* Assume a nonexplicit constant cannot equal an explicit one,
4192 since such code would be undefined anyway.
4193 Exception: on sysvr4, using #pragma weak,
4194 a label can come out as 0. */
4195 else if (TREE_CODE (arg1) == INTEGER_CST
4196 && !integer_zerop (arg1)
4197 && TREE_CONSTANT (arg0)
4198 && TREE_CODE (arg0) == ADDR_EXPR
4200 t1 = build_int_2 (0, 0);
4202 /* Two real constants can be compared explicitly. */
4203 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4205 /* If either operand is a NaN, the result is false with two
4206 exceptions: First, an NE_EXPR is true on NaNs, but that case
4207 is already handled correctly since we will be inverting the
4208 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4209 or a GE_EXPR into a LT_EXPR, we must return true so that it
4210 will be inverted into false. */
4212 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4213 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4214 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4216 else if (code == EQ_EXPR)
4217 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4218 TREE_REAL_CST (arg1)),
4221 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4222 TREE_REAL_CST (arg1)),
4226 if (t1 == NULL_TREE)
4230 TREE_INT_CST_LOW (t1) ^= 1;
4232 TREE_TYPE (t1) = type;
4236 if (TREE_CODE (arg0) == INTEGER_CST)
4237 return TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1));
4238 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4239 return omit_one_operand (type, arg1, arg0);
4241 /* If the second operand is zero, invert the comparison and swap
4242 the second and third operands. Likewise if the second operand
4243 is constant and the third is not or if the third operand is
4244 equivalent to the first operand of the comparison. */
4246 if (integer_zerop (arg1)
4247 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4248 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4249 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4250 TREE_OPERAND (t, 2),
4251 TREE_OPERAND (arg0, 1))))
4253 /* See if this can be inverted. If it can't, possibly because
4254 it was a floating-point inequality comparison, don't do
4256 tem = invert_truthvalue (arg0);
4258 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4260 arg0 = TREE_OPERAND (t, 0) = tem;
4261 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4262 TREE_OPERAND (t, 2) = arg1;
4263 arg1 = TREE_OPERAND (t, 1);
4267 /* If we have A op B ? A : C, we may be able to convert this to a
4268 simpler expression, depending on the operation and the values
4269 of B and C. IEEE floating point prevents this though,
4270 because A or B might be -0.0 or a NaN. */
4272 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4273 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4274 || TREE_CODE (TREE_TYPE (TREE_OPERAND (arg0, 0))) != REAL_TYPE)
4275 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4276 arg1, TREE_OPERAND (arg0, 1)))
4278 tree arg2 = TREE_OPERAND (t, 2);
4279 enum tree_code comp_code = TREE_CODE (arg0);
4281 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4282 depending on the comparison operation. */
4283 if (integer_zerop (TREE_OPERAND (arg0, 1))
4284 && TREE_CODE (arg2) == NEGATE_EXPR
4285 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4289 return fold (build1 (NEGATE_EXPR, type, arg1));
4291 return convert (type, arg1);
4294 return fold (build1 (ABS_EXPR, type, arg1));
4297 return fold (build1 (NEGATE_EXPR, type,
4298 fold (build1 (ABS_EXPR, type, arg1))));
4301 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4304 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4306 if (comp_code == NE_EXPR)
4307 return convert (type, arg1);
4308 else if (comp_code == EQ_EXPR)
4309 return convert (type, integer_zero_node);
4312 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4313 or max (A, B), depending on the operation. */
4315 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4316 arg2, TREE_OPERAND (arg0, 0)))
4320 return convert (type, arg2);
4322 return convert (type, arg1);
4325 return fold (build (MIN_EXPR, type, arg1, arg2));
4328 return fold (build (MAX_EXPR, type, arg1, arg2));
4331 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4332 we might still be able to simplify this. For example,
4333 if C1 is one less or one more than C2, this might have started
4334 out as a MIN or MAX and been transformed by this function.
4335 Only good for INTEGER_TYPE, because we need TYPE_MAX_VALUE. */
4337 if (TREE_CODE (type) == INTEGER_TYPE
4338 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4339 && TREE_CODE (arg2) == INTEGER_CST)
4343 /* We can replace A with C1 in this case. */
4344 arg1 = TREE_OPERAND (t, 1)
4345 = convert (type, TREE_OPERAND (arg0, 1));
4349 /* If C1 is C2 + 1, this is min(A, C2). */
4350 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4351 && operand_equal_p (TREE_OPERAND (arg0, 1),
4352 const_binop (PLUS_EXPR, arg2,
4353 integer_one_node, 0), 1))
4354 return fold (build (MIN_EXPR, type, arg1, arg2));
4358 /* If C1 is C2 - 1, this is min(A, C2). */
4359 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4360 && operand_equal_p (TREE_OPERAND (arg0, 1),
4361 const_binop (MINUS_EXPR, arg2,
4362 integer_one_node, 0), 1))
4363 return fold (build (MIN_EXPR, type, arg1, arg2));
4367 /* If C1 is C2 - 1, this is max(A, C2). */
4368 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4369 && operand_equal_p (TREE_OPERAND (arg0, 1),
4370 const_binop (MINUS_EXPR, arg2,
4371 integer_one_node, 0), 1))
4372 return fold (build (MAX_EXPR, type, arg1, arg2));
4376 /* If C1 is C2 + 1, this is max(A, C2). */
4377 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4378 && operand_equal_p (TREE_OPERAND (arg0, 1),
4379 const_binop (PLUS_EXPR, arg2,
4380 integer_one_node, 0), 1))
4381 return fold (build (MAX_EXPR, type, arg1, arg2));
4386 /* Convert A ? 1 : 0 to simply A. */
4387 if (integer_onep (TREE_OPERAND (t, 1))
4388 && integer_zerop (TREE_OPERAND (t, 2))
4389 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4390 call to fold will try to move the conversion inside
4391 a COND, which will recurse. In that case, the COND_EXPR
4392 is probably the best choice, so leave it alone. */
4393 && type == TREE_TYPE (arg0))
4397 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4398 operation is simply A & 2. */
4400 if (integer_zerop (TREE_OPERAND (t, 2))
4401 && TREE_CODE (arg0) == NE_EXPR
4402 && integer_zerop (TREE_OPERAND (arg0, 1))
4403 && integer_pow2p (arg1)
4404 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4405 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4407 return convert (type, TREE_OPERAND (arg0, 0));
4412 if (TREE_SIDE_EFFECTS (arg0))
4414 /* Don't let (0, 0) be null pointer constant. */
4415 if (integer_zerop (arg1))
4416 return non_lvalue (arg1);
4421 return build_complex (arg0, arg1);
4425 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4427 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4428 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4429 TREE_OPERAND (arg0, 1));
4430 else if (TREE_CODE (arg0) == COMPLEX_CST)
4431 return TREE_REALPART (arg0);
4432 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4433 return build_binary_op (TREE_CODE (arg0), type,
4434 build_unary_op (REALPART_EXPR,
4435 TREE_OPERAND (arg0, 0),
4437 build_unary_op (REALPART_EXPR,
4438 TREE_OPERAND (arg0, 1),
4444 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4445 return convert (type, integer_zero_node);
4446 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4447 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4448 TREE_OPERAND (arg0, 0));
4449 else if (TREE_CODE (arg0) == COMPLEX_CST)
4450 return TREE_IMAGPART (arg0);
4451 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4452 return build_binary_op (TREE_CODE (arg0), type,
4453 build_unary_op (IMAGPART_EXPR,
4454 TREE_OPERAND (arg0, 0),
4456 build_unary_op (IMAGPART_EXPR,
4457 TREE_OPERAND (arg0, 1),
4464 } /* switch (code) */