1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ Fix lossage on folding division of big integers. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
31 /* The entry points in this file are fold, size_int and size_binop.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'. */
48 /* Handle floating overflow for `const_binop'. */
49 static jmp_buf float_error;
51 static void encode PROTO((short *, HOST_WIDE_INT, HOST_WIDE_INT));
52 static void decode PROTO((short *, HOST_WIDE_INT *, HOST_WIDE_INT *));
53 static int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
54 HOST_WIDE_INT, HOST_WIDE_INT,
55 HOST_WIDE_INT, HOST_WIDE_INT *,
56 HOST_WIDE_INT *, HOST_WIDE_INT *,
58 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
59 static tree const_binop PROTO((enum tree_code, tree, tree, int));
60 static tree fold_convert PROTO((tree, tree));
61 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
62 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
63 static int truth_value_p PROTO((enum tree_code));
64 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
65 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
66 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
67 static tree omit_one_operand PROTO((tree, tree, tree));
68 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
69 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
70 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
72 static tree decode_field_reference PROTO((tree, int *, int *,
73 enum machine_mode *, int *,
75 static int all_ones_mask_p PROTO((tree, int));
76 static int simple_operand_p PROTO((tree));
77 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
78 enum tree_code, tree, tree, tree));
79 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
85 /* Yield nonzero if a signed left shift of A by B bits overflows. */
86 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
88 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
89 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
90 Then this yields nonzero if overflow occurred during the addition.
91 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
92 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
93 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
95 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
96 We do that by representing the two-word integer as MAX_SHORTS shorts,
97 with only 8 bits stored in each short, as a positive number. */
99 /* Unpack a two-word integer into MAX_SHORTS shorts.
100 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
101 SHORTS points to the array of shorts. */
104 encode (shorts, low, hi)
106 HOST_WIDE_INT low, hi;
110 for (i = 0; i < MAX_SHORTS / 2; i++)
112 shorts[i] = (low >> (i * 8)) & 0xff;
113 shorts[i + MAX_SHORTS / 2] = (hi >> (i * 8) & 0xff);
117 /* Pack an array of MAX_SHORTS shorts into a two-word integer.
118 SHORTS points to the array of shorts.
119 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
122 decode (shorts, low, hi)
124 HOST_WIDE_INT *low, *hi;
127 HOST_WIDE_INT lv = 0, hv = 0;
129 for (i = 0; i < MAX_SHORTS / 2; i++)
131 lv |= (HOST_WIDE_INT) shorts[i] << (i * 8);
132 hv |= (HOST_WIDE_INT) shorts[i + MAX_SHORTS / 2] << (i * 8);
138 /* Make the integer constant T valid for its type
139 by setting to 0 or 1 all the bits in the constant
140 that don't belong in the type.
141 Yield 1 if a signed overflow occurs, 0 otherwise.
142 If OVERFLOW is nonzero, a signed overflow has already occurred
143 in calculating T, so propagate it. */
146 force_fit_type (t, overflow)
150 HOST_WIDE_INT low, high;
153 if (TREE_CODE (t) != INTEGER_CST)
156 low = TREE_INT_CST_LOW (t);
157 high = TREE_INT_CST_HIGH (t);
159 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
162 prec = TYPE_PRECISION (TREE_TYPE (t));
164 /* First clear all bits that are beyond the type's precision. */
166 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
168 else if (prec > HOST_BITS_PER_WIDE_INT)
170 TREE_INT_CST_HIGH (t)
171 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
175 TREE_INT_CST_HIGH (t) = 0;
176 if (prec < HOST_BITS_PER_WIDE_INT)
177 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
180 /* Unsigned types do not suffer sign extension or overflow. */
181 if (TREE_UNSIGNED (TREE_TYPE (t)))
184 /* If the value's sign bit is set, extend the sign. */
185 if (prec != 2 * HOST_BITS_PER_WIDE_INT
186 && (prec > HOST_BITS_PER_WIDE_INT
187 ? (TREE_INT_CST_HIGH (t)
188 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
189 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
191 /* Value is negative:
192 set to 1 all the bits that are outside this type's precision. */
193 if (prec > HOST_BITS_PER_WIDE_INT)
195 TREE_INT_CST_HIGH (t)
196 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
200 TREE_INT_CST_HIGH (t) = -1;
201 if (prec < HOST_BITS_PER_WIDE_INT)
202 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
206 /* Yield nonzero if signed overflow occurred. */
208 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
212 /* Add two doubleword integers with doubleword result.
213 Each argument is given as two `HOST_WIDE_INT' pieces.
214 One argument is L1 and H1; the other, L2 and H2.
215 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
216 We use the 8-shorts representation internally. */
219 add_double (l1, h1, l2, h2, lv, hv)
220 HOST_WIDE_INT l1, h1, l2, h2;
221 HOST_WIDE_INT *lv, *hv;
223 short arg1[MAX_SHORTS];
224 short arg2[MAX_SHORTS];
225 register int carry = 0;
228 encode (arg1, l1, h1);
229 encode (arg2, l2, h2);
231 for (i = 0; i < MAX_SHORTS; i++)
233 carry += arg1[i] + arg2[i];
234 arg1[i] = carry & 0xff;
238 decode (arg1, lv, hv);
239 return overflow_sum_sign (h1, h2, *hv);
242 /* Negate a doubleword integer with doubleword result.
243 Return nonzero if the operation overflows, assuming it's signed.
244 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
245 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
246 We use the 8-shorts representation internally. */
249 neg_double (l1, h1, lv, hv)
250 HOST_WIDE_INT l1, h1;
251 HOST_WIDE_INT *lv, *hv;
257 return (*hv & h1) < 0;
267 /* Multiply two doubleword integers with doubleword result.
268 Return nonzero if the operation overflows, assuming it's signed.
269 Each argument is given as two `HOST_WIDE_INT' pieces.
270 One argument is L1 and H1; the other, L2 and H2.
271 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
272 We use the 8-shorts representation internally. */
275 mul_double (l1, h1, l2, h2, lv, hv)
276 HOST_WIDE_INT l1, h1, l2, h2;
277 HOST_WIDE_INT *lv, *hv;
279 short arg1[MAX_SHORTS];
280 short arg2[MAX_SHORTS];
281 short prod[MAX_SHORTS * 2];
282 register int carry = 0;
283 register int i, j, k;
284 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
286 /* These cases are used extensively, arising from pointer combinations. */
291 int overflow = left_shift_overflows (h1, 1);
292 unsigned HOST_WIDE_INT temp = l1 + l1;
293 *hv = (h1 << 1) + (temp < l1);
299 int overflow = left_shift_overflows (h1, 2);
300 unsigned HOST_WIDE_INT temp = l1 + l1;
301 h1 = (h1 << 2) + ((temp < l1) << 1);
311 int overflow = left_shift_overflows (h1, 3);
312 unsigned HOST_WIDE_INT temp = l1 + l1;
313 h1 = (h1 << 3) + ((temp < l1) << 2);
316 h1 += (temp < l1) << 1;
326 encode (arg1, l1, h1);
327 encode (arg2, l2, h2);
329 bzero (prod, sizeof prod);
331 for (i = 0; i < MAX_SHORTS; i++)
332 for (j = 0; j < MAX_SHORTS; j++)
335 carry = arg1[i] * arg2[j];
339 prod[k] = carry & 0xff;
345 decode (prod, lv, hv); /* This ignores
346 prod[MAX_SHORTS] -> prod[MAX_SHORTS*2-1] */
348 /* Check for overflow by calculating the top half of the answer in full;
349 it should agree with the low half's sign bit. */
350 decode (prod+MAX_SHORTS, &toplow, &tophigh);
353 neg_double (l2, h2, &neglow, &neghigh);
354 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
358 neg_double (l1, h1, &neglow, &neghigh);
359 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
361 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
364 /* Shift the doubleword integer in L1, H1 left by COUNT places
365 keeping only PREC bits of result.
366 Shift right if COUNT is negative.
367 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
368 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
371 lshift_double (l1, h1, count, prec, lv, hv, arith)
372 HOST_WIDE_INT l1, h1, count;
374 HOST_WIDE_INT *lv, *hv;
377 short arg1[MAX_SHORTS];
383 rshift_double (l1, h1, - count, prec, lv, hv, arith);
387 encode (arg1, l1, h1);
395 for (i = 0; i < MAX_SHORTS; i++)
397 carry += arg1[i] << 1;
398 arg1[i] = carry & 0xff;
404 decode (arg1, lv, hv);
407 /* Shift the doubleword integer in L1, H1 right by COUNT places
408 keeping only PREC bits of result. COUNT must be positive.
409 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
410 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
413 rshift_double (l1, h1, count, prec, lv, hv, arith)
414 HOST_WIDE_INT l1, h1, count;
416 HOST_WIDE_INT *lv, *hv;
419 short arg1[MAX_SHORTS];
423 encode (arg1, l1, h1);
430 carry = arith && arg1[7] >> 7;
431 for (i = MAX_SHORTS - 1; i >= 0; i--)
435 arg1[i] = (carry >> 1) & 0xff;
440 decode (arg1, lv, hv);
443 /* Rotate the doubldword integer in L1, H1 left by COUNT places
444 keeping only PREC bits of result.
445 Rotate right if COUNT is negative.
446 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
449 lrotate_double (l1, h1, count, prec, lv, hv)
450 HOST_WIDE_INT l1, h1, count;
452 HOST_WIDE_INT *lv, *hv;
454 short arg1[MAX_SHORTS];
460 rrotate_double (l1, h1, - count, prec, lv, hv);
464 encode (arg1, l1, h1);
469 carry = arg1[MAX_SHORTS - 1] >> 7;
472 for (i = 0; i < MAX_SHORTS; i++)
474 carry += arg1[i] << 1;
475 arg1[i] = carry & 0xff;
481 decode (arg1, lv, hv);
484 /* Rotate the doubleword integer in L1, H1 left by COUNT places
485 keeping only PREC bits of result. COUNT must be positive.
486 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
489 rrotate_double (l1, h1, count, prec, lv, hv)
490 HOST_WIDE_INT l1, h1, count;
492 HOST_WIDE_INT *lv, *hv;
494 short arg1[MAX_SHORTS];
498 encode (arg1, l1, h1);
506 for (i = MAX_SHORTS - 1; i >= 0; i--)
510 arg1[i] = (carry >> 1) & 0xff;
515 decode (arg1, lv, hv);
518 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
519 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
520 CODE is a tree code for a kind of division, one of
521 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
523 It controls how the quotient is rounded to a integer.
524 Return nonzero if the operation overflows.
525 UNS nonzero says do unsigned division. */
528 div_and_round_double (code, uns,
529 lnum_orig, hnum_orig, lden_orig, hden_orig,
530 lquo, hquo, lrem, hrem)
533 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
534 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
535 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
538 short num[MAX_SHORTS + 1]; /* extra element for scaling. */
539 short den[MAX_SHORTS], quo[MAX_SHORTS];
540 register int i, j, work;
541 register int carry = 0;
542 HOST_WIDE_INT lnum = lnum_orig;
543 HOST_WIDE_INT hnum = hnum_orig;
544 HOST_WIDE_INT lden = lden_orig;
545 HOST_WIDE_INT hden = hden_orig;
548 if ((hden == 0) && (lden == 0))
551 /* calculate quotient sign and convert operands to unsigned. */
557 /* (minimum integer) / (-1) is the only overflow case. */
558 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
564 neg_double (lden, hden, &lden, &hden);
568 if (hnum == 0 && hden == 0)
569 { /* single precision */
571 /* This unsigned division rounds toward zero. */
572 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
577 { /* trivial case: dividend < divisor */
578 /* hden != 0 already checked. */
585 bzero (quo, sizeof quo);
587 bzero (num, sizeof num); /* to zero 9th element */
588 bzero (den, sizeof den);
590 encode (num, lnum, hnum);
591 encode (den, lden, hden);
593 /* This code requires more than just hden == 0.
594 We also have to require that we don't need more than three bytes
595 to hold CARRY. If we ever did need four bytes to hold it, we
596 would lose part of it when computing WORK on the next round. */
597 if (hden == 0 && (((unsigned HOST_WIDE_INT) lden << 8) >> 8) == lden)
598 { /* simpler algorithm */
599 /* hnum != 0 already checked. */
600 for (i = MAX_SHORTS - 1; i >= 0; i--)
602 work = num[i] + (carry << 8);
603 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
604 carry = work % (unsigned HOST_WIDE_INT) lden;
607 else { /* full double precision,
608 with thanks to Don Knuth's
609 "Seminumerical Algorithms". */
611 int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
613 /* Find the highest non-zero divisor digit. */
614 for (i = MAX_SHORTS - 1; ; i--)
619 for (i = MAX_SHORTS - 1; ; i--)
624 quo_hi_sig = num_hi_sig - den_hi_sig + 1;
626 /* Insure that the first digit of the divisor is at least BASE/2.
627 This is required by the quotient digit estimation algorithm. */
629 scale = BASE / (den[den_hi_sig] + 1);
630 if (scale > 1) { /* scale divisor and dividend */
632 for (i = 0; i <= MAX_SHORTS - 1; i++) {
633 work = (num[i] * scale) + carry;
634 num[i] = work & 0xff;
636 if (num[i] != 0) num_hi_sig = i;
639 for (i = 0; i <= MAX_SHORTS - 1; i++) {
640 work = (den[i] * scale) + carry;
641 den[i] = work & 0xff;
643 if (den[i] != 0) den_hi_sig = i;
648 for (i = quo_hi_sig; i > 0; i--) {
649 /* guess the next quotient digit, quo_est, by dividing the first
650 two remaining dividend digits by the high order quotient digit.
651 quo_est is never low and is at most 2 high. */
653 int num_hi; /* index of highest remaining dividend digit */
655 num_hi = i + den_hi_sig;
657 work = (num[num_hi] * BASE) + (num_hi > 0 ? num[num_hi - 1] : 0);
658 if (num[num_hi] != den[den_hi_sig]) {
659 quo_est = work / den[den_hi_sig];
665 /* refine quo_est so it's usually correct, and at most one high. */
666 while ((den[den_hi_sig - 1] * quo_est)
667 > (((work - (quo_est * den[den_hi_sig])) * BASE)
668 + ((num_hi - 1) > 0 ? num[num_hi - 2] : 0)))
671 /* Try QUO_EST as the quotient digit, by multiplying the
672 divisor by QUO_EST and subtracting from the remaining dividend.
673 Keep in mind that QUO_EST is the I - 1st digit. */
677 for (j = 0; j <= den_hi_sig; j++)
681 work = num[i + j - 1] - (quo_est * den[j]) + carry;
689 num[i + j - 1] = digit;
692 /* if quo_est was high by one, then num[i] went negative and
693 we need to correct things. */
698 carry = 0; /* add divisor back in */
699 for (j = 0; j <= den_hi_sig; j++)
701 work = num[i + j - 1] + den[j] + carry;
711 num[i + j - 1] = work;
713 num [num_hi] += carry;
716 /* store the quotient digit. */
717 quo[i - 1] = quo_est;
721 decode (quo, lquo, hquo);
724 /* if result is negative, make it so. */
726 neg_double (*lquo, *hquo, lquo, hquo);
728 /* compute trial remainder: rem = num - (quo * den) */
729 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
730 neg_double (*lrem, *hrem, lrem, hrem);
731 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
736 case TRUNC_MOD_EXPR: /* round toward zero */
737 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
741 case FLOOR_MOD_EXPR: /* round toward negative infinity */
742 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
745 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
748 else return overflow;
752 case CEIL_MOD_EXPR: /* round toward positive infinity */
753 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
755 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
758 else return overflow;
762 case ROUND_MOD_EXPR: /* round to closest integer */
764 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
765 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
767 /* get absolute values */
768 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
769 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
771 /* if (2 * abs (lrem) >= abs (lden)) */
772 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
773 labs_rem, habs_rem, <wice, &htwice);
774 if (((unsigned HOST_WIDE_INT) habs_den
775 < (unsigned HOST_WIDE_INT) htwice)
776 || (((unsigned HOST_WIDE_INT) habs_den
777 == (unsigned HOST_WIDE_INT) htwice)
778 && ((HOST_WIDE_INT unsigned) labs_den
779 < (unsigned HOST_WIDE_INT) ltwice)))
783 add_double (*lquo, *hquo,
784 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
787 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
790 else return overflow;
798 /* compute true remainder: rem = num - (quo * den) */
799 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
800 neg_double (*lrem, *hrem, lrem, hrem);
801 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
805 #ifndef REAL_ARITHMETIC
806 /* Effectively truncate a real value to represent
807 the nearest possible value in a narrower mode.
808 The result is actually represented in the same data type as the argument,
809 but its value is usually different. */
812 real_value_truncate (mode, arg)
813 enum machine_mode mode;
817 /* Make sure the value is actually stored in memory before we turn off
821 REAL_VALUE_TYPE value;
822 jmp_buf handler, old_handler;
825 if (setjmp (handler))
827 error ("floating overflow");
830 handled = push_float_handler (handler, old_handler);
831 value = REAL_VALUE_TRUNCATE (mode, arg);
832 pop_float_handler (handled, old_handler);
836 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
838 /* Check for infinity in an IEEE double precision number. */
844 /* The IEEE 64-bit double format. */
849 unsigned exponent : 11;
850 unsigned mantissa1 : 20;
855 unsigned mantissa1 : 20;
856 unsigned exponent : 11;
862 if (u.big_endian.sign == 1)
865 return (u.big_endian.exponent == 2047
866 && u.big_endian.mantissa1 == 0
867 && u.big_endian.mantissa2 == 0);
872 return (u.little_endian.exponent == 2047
873 && u.little_endian.mantissa1 == 0
874 && u.little_endian.mantissa2 == 0);
878 /* Check whether an IEEE double precision number is a NaN. */
884 /* The IEEE 64-bit double format. */
889 unsigned exponent : 11;
890 unsigned mantissa1 : 20;
895 unsigned mantissa1 : 20;
896 unsigned exponent : 11;
902 if (u.big_endian.sign == 1)
905 return (u.big_endian.exponent == 2047
906 && (u.big_endian.mantissa1 != 0
907 || u.big_endian.mantissa2 != 0));
912 return (u.little_endian.exponent == 2047
913 && (u.little_endian.mantissa1 != 0
914 || u.little_endian.mantissa2 != 0));
918 /* Check for a negative IEEE double precision number. */
924 /* The IEEE 64-bit double format. */
929 unsigned exponent : 11;
930 unsigned mantissa1 : 20;
935 unsigned mantissa1 : 20;
936 unsigned exponent : 11;
942 if (u.big_endian.sign == 1)
945 return u.big_endian.sign;
950 return u.little_endian.sign;
953 #else /* Target not IEEE */
955 /* Let's assume other float formats don't have infinity.
956 (This can be overridden by redefining REAL_VALUE_ISINF.) */
964 /* Let's assume other float formats don't have NaNs.
965 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
973 /* Let's assume other float formats don't have minus zero.
974 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
981 #endif /* Target not IEEE */
982 #endif /* no REAL_ARITHMETIC */
984 /* Split a tree IN into a constant and a variable part
985 that could be combined with CODE to make IN.
986 CODE must be a commutative arithmetic operation.
987 Store the constant part into *CONP and the variable in &VARP.
988 Return 1 if this was done; zero means the tree IN did not decompose
991 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
992 Therefore, we must tell the caller whether the variable part
993 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
994 The value stored is the coefficient for the variable term.
995 The constant term we return should always be added;
996 we negate it if necessary. */
999 split_tree (in, code, varp, conp, varsignp)
1001 enum tree_code code;
1005 register tree outtype = TREE_TYPE (in);
1009 /* Strip any conversions that don't change the machine mode. */
1010 while ((TREE_CODE (in) == NOP_EXPR
1011 || TREE_CODE (in) == CONVERT_EXPR)
1012 && (TYPE_MODE (TREE_TYPE (in))
1013 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
1014 in = TREE_OPERAND (in, 0);
1016 if (TREE_CODE (in) == code
1017 || (! FLOAT_TYPE_P (TREE_TYPE (in))
1018 /* We can associate addition and subtraction together
1019 (even though the C standard doesn't say so)
1020 for integers because the value is not affected.
1021 For reals, the value might be affected, so we can't. */
1022 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1023 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1025 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
1026 if (code == INTEGER_CST)
1028 *conp = TREE_OPERAND (in, 0);
1029 *varp = TREE_OPERAND (in, 1);
1030 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1031 && TREE_TYPE (*varp) != outtype)
1032 *varp = convert (outtype, *varp);
1033 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1036 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
1038 *conp = TREE_OPERAND (in, 1);
1039 *varp = TREE_OPERAND (in, 0);
1041 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1042 && TREE_TYPE (*varp) != outtype)
1043 *varp = convert (outtype, *varp);
1044 if (TREE_CODE (in) == MINUS_EXPR)
1046 /* If operation is subtraction and constant is second,
1047 must negate it to get an additive constant.
1048 And this cannot be done unless it is a manifest constant.
1049 It could also be the address of a static variable.
1050 We cannot negate that, so give up. */
1051 if (TREE_CODE (*conp) == INTEGER_CST)
1052 /* Subtracting from integer_zero_node loses for long long. */
1053 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
1059 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
1061 *conp = TREE_OPERAND (in, 0);
1062 *varp = TREE_OPERAND (in, 1);
1063 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1064 && TREE_TYPE (*varp) != outtype)
1065 *varp = convert (outtype, *varp);
1066 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1073 /* Combine two constants NUM and ARG2 under operation CODE
1074 to produce a new constant.
1075 We assume ARG1 and ARG2 have the same data type,
1076 or at least are the same kind of constant and the same machine mode.
1078 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1081 const_binop (code, arg1, arg2, notrunc)
1082 enum tree_code code;
1083 register tree arg1, arg2;
1086 if (TREE_CODE (arg1) == INTEGER_CST)
1088 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
1089 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
1090 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
1091 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
1092 HOST_WIDE_INT low, hi;
1093 HOST_WIDE_INT garbagel, garbageh;
1095 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1101 t = build_int_2 (int1l | int2l, int1h | int2h);
1105 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1109 t = build_int_2 (int1l & int2l, int1h & int2h);
1112 case BIT_ANDTC_EXPR:
1113 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1119 /* It's unclear from the C standard whether shifts can overflow.
1120 The following code ignores overflow; perhaps a C standard
1121 interpretation ruling is needed. */
1122 lshift_double (int1l, int1h, int2l,
1123 TYPE_PRECISION (TREE_TYPE (arg1)),
1126 t = build_int_2 (low, hi);
1127 TREE_TYPE (t) = TREE_TYPE (arg1);
1129 force_fit_type (t, 0);
1130 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1131 TREE_CONSTANT_OVERFLOW (t)
1132 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1138 lrotate_double (int1l, int1h, int2l,
1139 TYPE_PRECISION (TREE_TYPE (arg1)),
1141 t = build_int_2 (low, hi);
1148 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1151 overflow = int2h < hi;
1153 t = build_int_2 (int2l, int2h);
1159 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1162 overflow = int1h < hi;
1164 t = build_int_2 (int1l, int1h);
1167 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1168 t = build_int_2 (low, hi);
1172 if (int2h == 0 && int2l == 0)
1174 t = build_int_2 (int1l, int1h);
1177 neg_double (int2l, int2h, &low, &hi);
1178 add_double (int1l, int1h, low, hi, &low, &hi);
1179 overflow = overflow_sum_sign (hi, int2h, int1h);
1180 t = build_int_2 (low, hi);
1184 /* Optimize simple cases. */
1187 unsigned HOST_WIDE_INT temp;
1192 t = build_int_2 (0, 0);
1195 t = build_int_2 (int2l, int2h);
1198 overflow = left_shift_overflows (int2h, 1);
1199 temp = int2l + int2l;
1200 int2h = (int2h << 1) + (temp < int2l);
1201 t = build_int_2 (temp, int2h);
1203 #if 0 /* This code can lose carries. */
1205 temp = int2l + int2l + int2l;
1206 int2h = int2h * 3 + (temp < int2l);
1207 t = build_int_2 (temp, int2h);
1211 overflow = left_shift_overflows (int2h, 2);
1212 temp = int2l + int2l;
1213 int2h = (int2h << 2) + ((temp < int2l) << 1);
1216 int2h += (temp < int2l);
1217 t = build_int_2 (temp, int2h);
1220 overflow = left_shift_overflows (int2h, 3);
1221 temp = int2l + int2l;
1222 int2h = (int2h << 3) + ((temp < int2l) << 2);
1225 int2h += (temp < int2l) << 1;
1228 int2h += (temp < int2l);
1229 t = build_int_2 (temp, int2h);
1240 t = build_int_2 (0, 0);
1245 t = build_int_2 (int1l, int1h);
1250 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1251 t = build_int_2 (low, hi);
1254 case TRUNC_DIV_EXPR:
1255 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1256 case EXACT_DIV_EXPR:
1257 /* This is a shortcut for a common special case.
1258 It reduces the number of tree nodes generated
1260 if (int2h == 0 && int2l > 0
1261 && TREE_TYPE (arg1) == sizetype
1262 && int1h == 0 && int1l >= 0)
1264 if (code == CEIL_DIV_EXPR)
1266 return size_int (int1l / int2l);
1268 case ROUND_DIV_EXPR:
1269 if (int2h == 0 && int2l == 1)
1271 t = build_int_2 (int1l, int1h);
1274 if (int1l == int2l && int1h == int2h)
1276 if ((int1l | int1h) == 0)
1278 t = build_int_2 (1, 0);
1281 overflow = div_and_round_double (code, uns,
1282 int1l, int1h, int2l, int2h,
1283 &low, &hi, &garbagel, &garbageh);
1284 t = build_int_2 (low, hi);
1287 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1288 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1289 overflow = div_and_round_double (code, uns,
1290 int1l, int1h, int2l, int2h,
1291 &garbagel, &garbageh, &low, &hi);
1292 t = build_int_2 (low, hi);
1299 low = (((unsigned HOST_WIDE_INT) int1h
1300 < (unsigned HOST_WIDE_INT) int2h)
1301 || (((unsigned HOST_WIDE_INT) int1h
1302 == (unsigned HOST_WIDE_INT) int2h)
1303 && ((unsigned HOST_WIDE_INT) int1l
1304 < (unsigned HOST_WIDE_INT) int2l)));
1308 low = ((int1h < int2h)
1309 || ((int1h == int2h)
1310 && ((unsigned HOST_WIDE_INT) int1l
1311 < (unsigned HOST_WIDE_INT) int2l)));
1313 if (low == (code == MIN_EXPR))
1314 t = build_int_2 (int1l, int1h);
1316 t = build_int_2 (int2l, int2h);
1323 TREE_TYPE (t) = TREE_TYPE (arg1);
1325 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1326 | TREE_OVERFLOW (arg1)
1327 | TREE_OVERFLOW (arg2));
1328 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1329 | TREE_CONSTANT_OVERFLOW (arg1)
1330 | TREE_CONSTANT_OVERFLOW (arg2));
1333 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1334 if (TREE_CODE (arg1) == REAL_CST)
1338 REAL_VALUE_TYPE value;
1341 d1 = TREE_REAL_CST (arg1);
1342 d2 = TREE_REAL_CST (arg2);
1343 if (setjmp (float_error))
1345 pedwarn ("floating overflow in constant expression");
1346 return build (code, TREE_TYPE (arg1), arg1, arg2);
1348 set_float_handler (float_error);
1350 #ifdef REAL_ARITHMETIC
1351 REAL_ARITHMETIC (value, code, d1, d2);
1368 #ifndef REAL_INFINITY
1377 value = MIN (d1, d2);
1381 value = MAX (d1, d2);
1387 #endif /* no REAL_ARITHMETIC */
1388 t = build_real (TREE_TYPE (arg1),
1389 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1390 set_float_handler (NULL_PTR);
1393 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1394 if (TREE_CODE (arg1) == COMPLEX_CST)
1396 register tree r1 = TREE_REALPART (arg1);
1397 register tree i1 = TREE_IMAGPART (arg1);
1398 register tree r2 = TREE_REALPART (arg2);
1399 register tree i2 = TREE_IMAGPART (arg2);
1405 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1406 const_binop (PLUS_EXPR, i1, i2, notrunc));
1410 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1411 const_binop (MINUS_EXPR, i1, i2, notrunc));
1415 t = build_complex (const_binop (MINUS_EXPR,
1416 const_binop (MULT_EXPR,
1418 const_binop (MULT_EXPR,
1421 const_binop (PLUS_EXPR,
1422 const_binop (MULT_EXPR,
1424 const_binop (MULT_EXPR,
1431 register tree magsquared
1432 = const_binop (PLUS_EXPR,
1433 const_binop (MULT_EXPR, r2, r2, notrunc),
1434 const_binop (MULT_EXPR, i2, i2, notrunc),
1436 t = build_complex (const_binop (RDIV_EXPR,
1437 const_binop (PLUS_EXPR,
1438 const_binop (MULT_EXPR, r1, r2, notrunc),
1439 const_binop (MULT_EXPR, i1, i2, notrunc),
1441 magsquared, notrunc),
1442 const_binop (RDIV_EXPR,
1443 const_binop (MINUS_EXPR,
1444 const_binop (MULT_EXPR, i1, r2, notrunc),
1445 const_binop (MULT_EXPR, r1, i2, notrunc),
1447 magsquared, notrunc));
1454 TREE_TYPE (t) = TREE_TYPE (arg1);
1460 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1464 unsigned int number;
1467 /* Type-size nodes already made for small sizes. */
1468 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1470 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1471 && size_table[number] != 0)
1472 return size_table[number];
1473 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1475 push_obstacks_nochange ();
1476 /* Make this a permanent node. */
1477 end_temporary_allocation ();
1478 t = build_int_2 (number, 0);
1479 TREE_TYPE (t) = sizetype;
1480 size_table[number] = t;
1485 t = build_int_2 (number, 0);
1486 TREE_TYPE (t) = sizetype;
1491 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1492 CODE is a tree code. Data type is taken from `sizetype',
1493 If the operands are constant, so is the result. */
1496 size_binop (code, arg0, arg1)
1497 enum tree_code code;
1500 /* Handle the special case of two integer constants faster. */
1501 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1503 /* And some specific cases even faster than that. */
1504 if (code == PLUS_EXPR
1505 && TREE_INT_CST_LOW (arg0) == 0
1506 && TREE_INT_CST_HIGH (arg0) == 0)
1508 if (code == MINUS_EXPR
1509 && TREE_INT_CST_LOW (arg1) == 0
1510 && TREE_INT_CST_HIGH (arg1) == 0)
1512 if (code == MULT_EXPR
1513 && TREE_INT_CST_LOW (arg0) == 1
1514 && TREE_INT_CST_HIGH (arg0) == 0)
1516 /* Handle general case of two integer constants. */
1517 return const_binop (code, arg0, arg1, 1);
1520 if (arg0 == error_mark_node || arg1 == error_mark_node)
1521 return error_mark_node;
1523 return fold (build (code, sizetype, arg0, arg1));
1526 /* Given T, a tree representing type conversion of ARG1, a constant,
1527 return a constant tree representing the result of conversion. */
1530 fold_convert (t, arg1)
1534 register tree type = TREE_TYPE (t);
1536 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1538 if (TREE_CODE (arg1) == INTEGER_CST)
1540 /* Given an integer constant, make new constant with new type,
1541 appropriately sign-extended or truncated. */
1542 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1543 TREE_INT_CST_HIGH (arg1));
1544 TREE_TYPE (t) = type;
1545 /* Indicate an overflow if (1) ARG1 already overflowed,
1546 or (2) force_fit_type indicates an overflow.
1547 Tell force_fit_type that an overflow has already occurred
1548 if ARG1 is a too-large unsigned value and T is signed. */
1550 = (TREE_OVERFLOW (arg1)
1551 | force_fit_type (t,
1552 (TREE_INT_CST_HIGH (arg1) < 0
1553 & (TREE_UNSIGNED (type)
1554 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1555 TREE_CONSTANT_OVERFLOW (t)
1556 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1558 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1559 else if (TREE_CODE (arg1) == REAL_CST)
1561 REAL_VALUE_TYPE l, x, u;
1563 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1564 x = TREE_REAL_CST (arg1);
1565 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1567 /* See if X will be in range after truncation towards 0.
1568 To compensate for truncation, move the bounds away from 0,
1569 but reject if X exactly equals the adjusted bounds. */
1570 #ifdef REAL_ARITHMETIC
1571 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1572 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1577 if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1579 pedwarn ("real constant out of range for integer conversion");
1582 #ifndef REAL_ARITHMETIC
1585 HOST_WIDE_INT low, high;
1586 HOST_WIDE_INT half_word
1587 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1589 d = TREE_REAL_CST (arg1);
1593 high = (HOST_WIDE_INT) (d / half_word / half_word);
1594 d -= (REAL_VALUE_TYPE) high * half_word * half_word;
1595 if (d >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1597 low = d - (REAL_VALUE_TYPE) half_word * half_word / 2;
1598 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1601 low = (HOST_WIDE_INT) d;
1602 if (TREE_REAL_CST (arg1) < 0)
1603 neg_double (low, high, &low, &high);
1604 t = build_int_2 (low, high);
1608 HOST_WIDE_INT low, high;
1609 REAL_VALUE_TO_INT (&low, &high, (TREE_REAL_CST (arg1)));
1610 t = build_int_2 (low, high);
1613 TREE_TYPE (t) = type;
1614 force_fit_type (t, 0);
1616 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1617 TREE_TYPE (t) = type;
1619 else if (TREE_CODE (type) == REAL_TYPE)
1621 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1622 if (TREE_CODE (arg1) == INTEGER_CST)
1623 return build_real_from_int_cst (type, arg1);
1624 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1625 if (TREE_CODE (arg1) == REAL_CST)
1627 if (setjmp (float_error))
1629 pedwarn ("floating overflow in constant expression");
1632 set_float_handler (float_error);
1634 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1635 TREE_REAL_CST (arg1)));
1636 set_float_handler (NULL_PTR);
1640 TREE_CONSTANT (t) = 1;
1644 /* Return an expr equal to X but certainly not valid as an lvalue.
1645 Also make sure it is not valid as an null pointer constant. */
1653 /* These things are certainly not lvalues. */
1654 if (TREE_CODE (x) == NON_LVALUE_EXPR
1655 || TREE_CODE (x) == INTEGER_CST
1656 || TREE_CODE (x) == REAL_CST
1657 || TREE_CODE (x) == STRING_CST
1658 || TREE_CODE (x) == ADDR_EXPR)
1660 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1662 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1663 so convert_for_assignment won't strip it.
1664 This is so this 0 won't be treated as a null pointer constant. */
1665 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1666 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1672 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1673 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1677 /* When pedantic, return an expr equal to X but certainly not valid as a
1678 pedantic lvalue. Otherwise, return X. */
1681 pedantic_non_lvalue (x)
1685 return non_lvalue (x);
1690 /* Given a tree comparison code, return the code that is the logical inverse
1691 of the given code. It is not safe to do this for floating-point
1692 comparisons, except for NE_EXPR and EQ_EXPR. */
1694 static enum tree_code
1695 invert_tree_comparison (code)
1696 enum tree_code code;
1717 /* Similar, but return the comparison that results if the operands are
1718 swapped. This is safe for floating-point. */
1720 static enum tree_code
1721 swap_tree_comparison (code)
1722 enum tree_code code;
1742 /* Return nonzero if CODE is a tree code that represents a truth value. */
1745 truth_value_p (code)
1746 enum tree_code code;
1748 return (TREE_CODE_CLASS (code) == '<'
1749 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1750 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1751 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1754 /* Return nonzero if two operands are necessarily equal.
1755 If ONLY_CONST is non-zero, only return non-zero for constants.
1756 This function tests whether the operands are indistinguishable;
1757 it does not test whether they are equal using C's == operation.
1758 The distinction is important for IEEE floating point, because
1759 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1760 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1763 operand_equal_p (arg0, arg1, only_const)
1767 /* If both types don't have the same signedness, then we can't consider
1768 them equal. We must check this before the STRIP_NOPS calls
1769 because they may change the signedness of the arguments. */
1770 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1776 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1777 We don't care about side effects in that case because the SAVE_EXPR
1778 takes care of that for us. */
1779 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1780 return ! only_const;
1782 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1785 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1786 && TREE_CODE (arg0) == ADDR_EXPR
1787 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1790 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1791 && TREE_CODE (arg0) == INTEGER_CST
1792 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1793 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1796 /* Detect when real constants are equal. */
1797 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1798 && TREE_CODE (arg0) == REAL_CST)
1799 return !bcmp (&TREE_REAL_CST (arg0), &TREE_REAL_CST (arg1),
1800 sizeof (REAL_VALUE_TYPE));
1808 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1810 /* This is needed for conversions and for COMPONENT_REF.
1811 Might as well play it safe and always test this. */
1812 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1815 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1818 /* Two conversions are equal only if signedness and modes match. */
1819 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1820 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1821 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1824 return operand_equal_p (TREE_OPERAND (arg0, 0),
1825 TREE_OPERAND (arg1, 0), 0);
1829 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1830 TREE_OPERAND (arg1, 0), 0)
1831 && operand_equal_p (TREE_OPERAND (arg0, 1),
1832 TREE_OPERAND (arg1, 1), 0));
1835 switch (TREE_CODE (arg0))
1838 return operand_equal_p (TREE_OPERAND (arg0, 0),
1839 TREE_OPERAND (arg1, 0), 0);
1843 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1844 TREE_OPERAND (arg1, 0), 0)
1845 && operand_equal_p (TREE_OPERAND (arg0, 1),
1846 TREE_OPERAND (arg1, 1), 0));
1849 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1850 TREE_OPERAND (arg1, 0), 0)
1851 && operand_equal_p (TREE_OPERAND (arg0, 1),
1852 TREE_OPERAND (arg1, 1), 0)
1853 && operand_equal_p (TREE_OPERAND (arg0, 2),
1854 TREE_OPERAND (arg1, 2), 0));
1862 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1863 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1865 When in doubt, return 0. */
1868 operand_equal_for_comparison_p (arg0, arg1, other)
1872 int unsignedp1, unsignedpo;
1873 tree primarg1, primother;
1876 if (operand_equal_p (arg0, arg1, 0))
1879 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1882 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1883 actual comparison operand, ARG0.
1885 First throw away any conversions to wider types
1886 already present in the operands. */
1888 primarg1 = get_narrower (arg1, &unsignedp1);
1889 primother = get_narrower (other, &unsignedpo);
1891 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1892 if (unsignedp1 == unsignedpo
1893 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1894 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1896 tree type = TREE_TYPE (arg0);
1898 /* Make sure shorter operand is extended the right way
1899 to match the longer operand. */
1900 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1901 TREE_TYPE (primarg1)),
1904 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1911 /* See if ARG is an expression that is either a comparison or is performing
1912 arithmetic on comparisons. The comparisons must only be comparing
1913 two different values, which will be stored in *CVAL1 and *CVAL2; if
1914 they are non-zero it means that some operands have already been found.
1915 No variables may be used anywhere else in the expression except in the
1916 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1917 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1919 If this is true, return 1. Otherwise, return zero. */
1922 twoval_comparison_p (arg, cval1, cval2, save_p)
1924 tree *cval1, *cval2;
1927 enum tree_code code = TREE_CODE (arg);
1928 char class = TREE_CODE_CLASS (code);
1930 /* We can handle some of the 'e' cases here. */
1931 if (class == 'e' && code == TRUTH_NOT_EXPR)
1933 else if (class == 'e'
1934 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1935 || code == COMPOUND_EXPR))
1938 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1939 the expression. There may be no way to make this work, but it needs
1940 to be looked at again for 2.6. */
1942 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1944 /* If we've already found a CVAL1 or CVAL2, this expression is
1945 two complex to handle. */
1946 if (*cval1 || *cval2)
1957 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1960 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1961 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1962 cval1, cval2, save_p));
1968 if (code == COND_EXPR)
1969 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1970 cval1, cval2, save_p)
1971 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1972 cval1, cval2, save_p)
1973 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1974 cval1, cval2, save_p));
1978 /* First see if we can handle the first operand, then the second. For
1979 the second operand, we know *CVAL1 can't be zero. It must be that
1980 one side of the comparison is each of the values; test for the
1981 case where this isn't true by failing if the two operands
1984 if (operand_equal_p (TREE_OPERAND (arg, 0),
1985 TREE_OPERAND (arg, 1), 0))
1989 *cval1 = TREE_OPERAND (arg, 0);
1990 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1992 else if (*cval2 == 0)
1993 *cval2 = TREE_OPERAND (arg, 0);
1994 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1999 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
2001 else if (*cval2 == 0)
2002 *cval2 = TREE_OPERAND (arg, 1);
2003 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
2014 /* ARG is a tree that is known to contain just arithmetic operations and
2015 comparisons. Evaluate the operations in the tree substituting NEW0 for
2016 any occurrence of OLD0 as an operand of a comparison and likewise for
2020 eval_subst (arg, old0, new0, old1, new1)
2022 tree old0, new0, old1, new1;
2024 tree type = TREE_TYPE (arg);
2025 enum tree_code code = TREE_CODE (arg);
2026 char class = TREE_CODE_CLASS (code);
2028 /* We can handle some of the 'e' cases here. */
2029 if (class == 'e' && code == TRUTH_NOT_EXPR)
2031 else if (class == 'e'
2032 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2038 return fold (build1 (code, type,
2039 eval_subst (TREE_OPERAND (arg, 0),
2040 old0, new0, old1, new1)));
2043 return fold (build (code, type,
2044 eval_subst (TREE_OPERAND (arg, 0),
2045 old0, new0, old1, new1),
2046 eval_subst (TREE_OPERAND (arg, 1),
2047 old0, new0, old1, new1)));
2053 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
2056 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
2059 return fold (build (code, type,
2060 eval_subst (TREE_OPERAND (arg, 0),
2061 old0, new0, old1, new1),
2062 eval_subst (TREE_OPERAND (arg, 1),
2063 old0, new0, old1, new1),
2064 eval_subst (TREE_OPERAND (arg, 2),
2065 old0, new0, old1, new1)));
2070 tree arg0 = TREE_OPERAND (arg, 0);
2071 tree arg1 = TREE_OPERAND (arg, 1);
2073 /* We need to check both for exact equality and tree equality. The
2074 former will be true if the operand has a side-effect. In that
2075 case, we know the operand occurred exactly once. */
2077 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
2079 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
2082 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2084 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2087 return fold (build (code, type, arg0, arg1));
2094 /* Return a tree for the case when the result of an expression is RESULT
2095 converted to TYPE and OMITTED was previously an operand of the expression
2096 but is now not needed (e.g., we folded OMITTED * 0).
2098 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2099 the conversion of RESULT to TYPE. */
2102 omit_one_operand (type, result, omitted)
2103 tree type, result, omitted;
2105 tree t = convert (type, result);
2107 if (TREE_SIDE_EFFECTS (omitted))
2108 return build (COMPOUND_EXPR, type, omitted, t);
2110 return non_lvalue (t);
2113 /* Return a simplified tree node for the truth-negation of ARG. This
2114 never alters ARG itself. We assume that ARG is an operation that
2115 returns a truth value (0 or 1). */
2118 invert_truthvalue (arg)
2121 tree type = TREE_TYPE (arg);
2122 enum tree_code code = TREE_CODE (arg);
2124 if (code == ERROR_MARK)
2127 /* If this is a comparison, we can simply invert it, except for
2128 floating-point non-equality comparisons, in which case we just
2129 enclose a TRUTH_NOT_EXPR around what we have. */
2131 if (TREE_CODE_CLASS (code) == '<')
2133 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2134 && code != NE_EXPR && code != EQ_EXPR)
2135 return build1 (TRUTH_NOT_EXPR, type, arg);
2137 return build (invert_tree_comparison (code), type,
2138 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2144 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2145 && TREE_INT_CST_HIGH (arg) == 0, 0));
2147 case TRUTH_AND_EXPR:
2148 return build (TRUTH_OR_EXPR, type,
2149 invert_truthvalue (TREE_OPERAND (arg, 0)),
2150 invert_truthvalue (TREE_OPERAND (arg, 1)));
2153 return build (TRUTH_AND_EXPR, type,
2154 invert_truthvalue (TREE_OPERAND (arg, 0)),
2155 invert_truthvalue (TREE_OPERAND (arg, 1)));
2157 case TRUTH_XOR_EXPR:
2158 /* Here we can invert either operand. We invert the first operand
2159 unless the second operand is a TRUTH_NOT_EXPR in which case our
2160 result is the XOR of the first operand with the inside of the
2161 negation of the second operand. */
2163 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2164 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2165 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2167 return build (TRUTH_XOR_EXPR, type,
2168 invert_truthvalue (TREE_OPERAND (arg, 0)),
2169 TREE_OPERAND (arg, 1));
2171 case TRUTH_ANDIF_EXPR:
2172 return build (TRUTH_ORIF_EXPR, type,
2173 invert_truthvalue (TREE_OPERAND (arg, 0)),
2174 invert_truthvalue (TREE_OPERAND (arg, 1)));
2176 case TRUTH_ORIF_EXPR:
2177 return build (TRUTH_ANDIF_EXPR, type,
2178 invert_truthvalue (TREE_OPERAND (arg, 0)),
2179 invert_truthvalue (TREE_OPERAND (arg, 1)));
2181 case TRUTH_NOT_EXPR:
2182 return TREE_OPERAND (arg, 0);
2185 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2186 invert_truthvalue (TREE_OPERAND (arg, 1)),
2187 invert_truthvalue (TREE_OPERAND (arg, 2)));
2190 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2191 invert_truthvalue (TREE_OPERAND (arg, 1)));
2193 case NON_LVALUE_EXPR:
2194 return invert_truthvalue (TREE_OPERAND (arg, 0));
2199 return build1 (TREE_CODE (arg), type,
2200 invert_truthvalue (TREE_OPERAND (arg, 0)));
2203 if (!integer_onep (TREE_OPERAND (arg, 1)))
2205 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2208 return build1 (TRUTH_NOT_EXPR, type, arg);
2210 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2212 return build1 (TRUTH_NOT_EXPR, type, arg);
2215 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2216 operands are another bit-wise operation with a common input. If so,
2217 distribute the bit operations to save an operation and possibly two if
2218 constants are involved. For example, convert
2219 (A | B) & (A | C) into A | (B & C)
2220 Further simplification will occur if B and C are constants.
2222 If this optimization cannot be done, 0 will be returned. */
2225 distribute_bit_expr (code, type, arg0, arg1)
2226 enum tree_code code;
2233 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2234 || TREE_CODE (arg0) == code
2235 || (TREE_CODE (arg0) != BIT_AND_EXPR
2236 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2239 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2241 common = TREE_OPERAND (arg0, 0);
2242 left = TREE_OPERAND (arg0, 1);
2243 right = TREE_OPERAND (arg1, 1);
2245 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2247 common = TREE_OPERAND (arg0, 0);
2248 left = TREE_OPERAND (arg0, 1);
2249 right = TREE_OPERAND (arg1, 0);
2251 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2253 common = TREE_OPERAND (arg0, 1);
2254 left = TREE_OPERAND (arg0, 0);
2255 right = TREE_OPERAND (arg1, 1);
2257 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2259 common = TREE_OPERAND (arg0, 1);
2260 left = TREE_OPERAND (arg0, 0);
2261 right = TREE_OPERAND (arg1, 0);
2266 return fold (build (TREE_CODE (arg0), type, common,
2267 fold (build (code, type, left, right))));
2270 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2271 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2274 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2277 int bitsize, bitpos;
2280 tree result = build (BIT_FIELD_REF, type, inner,
2281 size_int (bitsize), size_int (bitpos));
2283 TREE_UNSIGNED (result) = unsignedp;
2288 /* Optimize a bit-field compare.
2290 There are two cases: First is a compare against a constant and the
2291 second is a comparison of two items where the fields are at the same
2292 bit position relative to the start of a chunk (byte, halfword, word)
2293 large enough to contain it. In these cases we can avoid the shift
2294 implicit in bitfield extractions.
2296 For constants, we emit a compare of the shifted constant with the
2297 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2298 compared. For two fields at the same position, we do the ANDs with the
2299 similar mask and compare the result of the ANDs.
2301 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2302 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2303 are the left and right operands of the comparison, respectively.
2305 If the optimization described above can be done, we return the resulting
2306 tree. Otherwise we return zero. */
2309 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2310 enum tree_code code;
2314 int lbitpos, lbitsize, rbitpos, rbitsize;
2315 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2316 tree type = TREE_TYPE (lhs);
2317 tree signed_type, unsigned_type;
2318 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2319 enum machine_mode lmode, rmode, lnmode, rnmode;
2320 int lunsignedp, runsignedp;
2321 int lvolatilep = 0, rvolatilep = 0;
2322 tree linner, rinner;
2326 /* Get all the information about the extractions being done. If the bit size
2327 if the same as the size of the underlying object, we aren't doing an
2328 extraction at all and so can do nothing. */
2329 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2330 &lunsignedp, &lvolatilep);
2331 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2337 /* If this is not a constant, we can only do something if bit positions,
2338 sizes, and signedness are the same. */
2339 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2340 &rmode, &runsignedp, &rvolatilep);
2342 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2343 || lunsignedp != runsignedp || offset != 0)
2347 /* See if we can find a mode to refer to this field. We should be able to,
2348 but fail if we can't. */
2349 lnmode = get_best_mode (lbitsize, lbitpos,
2350 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2352 if (lnmode == VOIDmode)
2355 /* Set signed and unsigned types of the precision of this mode for the
2357 signed_type = type_for_mode (lnmode, 0);
2358 unsigned_type = type_for_mode (lnmode, 1);
2362 rnmode = get_best_mode (rbitsize, rbitpos,
2363 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2365 if (rnmode == VOIDmode)
2369 /* Compute the bit position and size for the new reference and our offset
2370 within it. If the new reference is the same size as the original, we
2371 won't optimize anything, so return zero. */
2372 lnbitsize = GET_MODE_BITSIZE (lnmode);
2373 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2374 lbitpos -= lnbitpos;
2375 if (lnbitsize == lbitsize)
2380 rnbitsize = GET_MODE_BITSIZE (rnmode);
2381 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2382 rbitpos -= rnbitpos;
2383 if (rnbitsize == rbitsize)
2387 #if BYTES_BIG_ENDIAN
2388 lbitpos = lnbitsize - lbitsize - lbitpos;
2391 /* Make the mask to be used against the extracted field. */
2392 mask = build_int_2 (~0, ~0);
2393 TREE_TYPE (mask) = unsigned_type;
2394 force_fit_type (mask, 0);
2395 mask = convert (unsigned_type, mask);
2396 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2397 mask = const_binop (RSHIFT_EXPR, mask,
2398 size_int (lnbitsize - lbitsize - lbitpos), 0);
2401 /* If not comparing with constant, just rework the comparison
2403 return build (code, compare_type,
2404 build (BIT_AND_EXPR, unsigned_type,
2405 make_bit_field_ref (linner, unsigned_type,
2406 lnbitsize, lnbitpos, 1),
2408 build (BIT_AND_EXPR, unsigned_type,
2409 make_bit_field_ref (rinner, unsigned_type,
2410 rnbitsize, rnbitpos, 1),
2413 /* Otherwise, we are handling the constant case. See if the constant is too
2414 big for the field. Warn and return a tree of for 0 (false) if so. We do
2415 this not only for its own sake, but to avoid having to test for this
2416 error case below. If we didn't, we might generate wrong code.
2418 For unsigned fields, the constant shifted right by the field length should
2419 be all zero. For signed fields, the high-order bits should agree with
2424 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2425 convert (unsigned_type, rhs),
2426 size_int (lbitsize), 0)))
2428 warning ("comparison is always %s due to width of bitfield",
2429 code == NE_EXPR ? "one" : "zero");
2430 return convert (compare_type,
2432 ? integer_one_node : integer_zero_node));
2437 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2438 size_int (lbitsize - 1), 0);
2439 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2441 warning ("comparison is always %s due to width of bitfield",
2442 code == NE_EXPR ? "one" : "zero");
2443 return convert (compare_type,
2445 ? integer_one_node : integer_zero_node));
2449 /* Single-bit compares should always be against zero. */
2450 if (lbitsize == 1 && ! integer_zerop (rhs))
2452 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2453 rhs = convert (type, integer_zero_node);
2456 /* Make a new bitfield reference, shift the constant over the
2457 appropriate number of bits and mask it with the computed mask
2458 (in case this was a signed field). If we changed it, make a new one. */
2459 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2462 TREE_SIDE_EFFECTS (lhs) = 1;
2463 TREE_THIS_VOLATILE (lhs) = 1;
2466 rhs = fold (const_binop (BIT_AND_EXPR,
2467 const_binop (LSHIFT_EXPR,
2468 convert (unsigned_type, rhs),
2469 size_int (lbitpos), 0),
2472 return build (code, compare_type,
2473 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2477 /* Subroutine for fold_truthop: decode a field reference.
2479 If EXP is a comparison reference, we return the innermost reference.
2481 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2482 set to the starting bit number.
2484 If the innermost field can be completely contained in a mode-sized
2485 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2487 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2488 otherwise it is not changed.
2490 *PUNSIGNEDP is set to the signedness of the field.
2492 *PMASK is set to the mask used. This is either contained in a
2493 BIT_AND_EXPR or derived from the width of the field.
2495 Return 0 if this is not a component reference or is one that we can't
2496 do anything with. */
2499 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2502 int *pbitsize, *pbitpos;
2503 enum machine_mode *pmode;
2504 int *punsignedp, *pvolatilep;
2511 /* All the optimizations using this function assume integer fields.
2512 There are problems with FP fields since the type_for_size call
2513 below can fail for, e.g., XFmode. */
2514 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2519 if (TREE_CODE (exp) == BIT_AND_EXPR)
2521 mask = TREE_OPERAND (exp, 1);
2522 exp = TREE_OPERAND (exp, 0);
2523 STRIP_NOPS (exp); STRIP_NOPS (mask);
2524 if (TREE_CODE (mask) != INTEGER_CST)
2528 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2529 && TREE_CODE (exp) != BIT_FIELD_REF)
2532 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2533 punsignedp, pvolatilep);
2534 if (inner == exp || *pbitsize < 0 || offset != 0)
2539 tree unsigned_type = type_for_size (*pbitsize, 1);
2540 int precision = TYPE_PRECISION (unsigned_type);
2542 mask = build_int_2 (~0, ~0);
2543 TREE_TYPE (mask) = unsigned_type;
2544 force_fit_type (mask, 0);
2545 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2546 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2553 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2557 all_ones_mask_p (mask, size)
2561 tree type = TREE_TYPE (mask);
2562 int precision = TYPE_PRECISION (type);
2565 tmask = build_int_2 (~0, ~0);
2566 TREE_TYPE (tmask) = signed_type (type);
2567 force_fit_type (tmask, 0);
2569 operand_equal_p (mask,
2570 const_binop (RSHIFT_EXPR,
2571 const_binop (LSHIFT_EXPR, tmask,
2572 size_int (precision - size), 0),
2573 size_int (precision - size), 0),
2577 /* Subroutine for fold_truthop: determine if an operand is simple enough
2578 to be evaluated unconditionally. */
2581 simple_operand_p (exp)
2584 /* Strip any conversions that don't change the machine mode. */
2585 while ((TREE_CODE (exp) == NOP_EXPR
2586 || TREE_CODE (exp) == CONVERT_EXPR)
2587 && (TYPE_MODE (TREE_TYPE (exp))
2588 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2589 exp = TREE_OPERAND (exp, 0);
2591 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2592 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2593 && ! TREE_ADDRESSABLE (exp)
2594 && ! TREE_THIS_VOLATILE (exp)
2595 && ! DECL_NONLOCAL (exp)
2596 /* Don't regard global variables as simple. They may be
2597 allocated in ways unknown to the compiler (shared memory,
2598 #pragma weak, etc). */
2599 && ! TREE_PUBLIC (exp)
2600 && ! DECL_EXTERNAL (exp)
2601 /* Loading a static variable is unduly expensive, but global
2602 registers aren't expensive. */
2603 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2606 /* Subroutine for fold_truthop: try to optimize a range test.
2608 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2610 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2611 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2612 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2615 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2616 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2617 larger than HI_CST (they may be equal).
2619 We return the simplified tree or 0 if no optimization is possible. */
2622 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2623 enum tree_code jcode, lo_code, hi_code;
2624 tree type, var, lo_cst, hi_cst;
2627 enum tree_code rcode;
2629 /* See if this is a range test and normalize the constant terms. */
2631 if (jcode == TRUTH_AND_EXPR)
2636 /* See if we have VAR != CST && VAR != CST+1. */
2637 if (! (hi_code == NE_EXPR
2638 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2639 && tree_int_cst_equal (integer_one_node,
2640 const_binop (MINUS_EXPR,
2641 hi_cst, lo_cst, 0))))
2649 if (hi_code == LT_EXPR)
2650 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2651 else if (hi_code != LE_EXPR)
2654 if (lo_code == GT_EXPR)
2655 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2657 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2670 /* See if we have VAR == CST || VAR == CST+1. */
2671 if (! (hi_code == EQ_EXPR
2672 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2673 && tree_int_cst_equal (integer_one_node,
2674 const_binop (MINUS_EXPR,
2675 hi_cst, lo_cst, 0))))
2683 if (hi_code == GE_EXPR)
2684 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2685 else if (hi_code != GT_EXPR)
2688 if (lo_code == LE_EXPR)
2689 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2691 /* We now have VAR < LO_CST || VAR > HI_CST. */
2700 /* When normalizing, it is possible to both increment the smaller constant
2701 and decrement the larger constant. See if they are still ordered. */
2702 if (tree_int_cst_lt (hi_cst, lo_cst))
2705 /* Fail if VAR isn't an integer. */
2706 utype = TREE_TYPE (var);
2707 if (! INTEGRAL_TYPE_P (utype))
2710 /* The range test is invalid if subtracting the two constants results
2711 in overflow. This can happen in traditional mode. */
2712 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2713 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2716 if (! TREE_UNSIGNED (utype))
2718 utype = unsigned_type (utype);
2719 var = convert (utype, var);
2720 lo_cst = convert (utype, lo_cst);
2721 hi_cst = convert (utype, hi_cst);
2724 return fold (convert (type,
2725 build (rcode, utype,
2726 build (MINUS_EXPR, utype, var, lo_cst),
2727 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2730 /* Find ways of folding logical expressions of LHS and RHS:
2731 Try to merge two comparisons to the same innermost item.
2732 Look for range tests like "ch >= '0' && ch <= '9'".
2733 Look for combinations of simple terms on machines with expensive branches
2734 and evaluate the RHS unconditionally.
2736 For example, if we have p->a == 2 && p->b == 4 and we can make an
2737 object large enough to span both A and B, we can do this with a comparison
2738 against the object ANDed with the a mask.
2740 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2741 operations to do this with one comparison.
2743 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2744 function and the one above.
2746 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2747 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2749 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2752 We return the simplified tree or 0 if no optimization is possible. */
2755 fold_truthop (code, truth_type, lhs, rhs)
2756 enum tree_code code;
2757 tree truth_type, lhs, rhs;
2759 /* If this is the "or" of two comparisons, we can do something if we
2760 the comparisons are NE_EXPR. If this is the "and", we can do something
2761 if the comparisons are EQ_EXPR. I.e.,
2762 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2764 WANTED_CODE is this operation code. For single bit fields, we can
2765 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2766 comparison for one-bit fields. */
2768 enum tree_code wanted_code;
2769 enum tree_code lcode, rcode;
2770 tree ll_arg, lr_arg, rl_arg, rr_arg;
2771 tree ll_inner, lr_inner, rl_inner, rr_inner;
2772 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2773 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2774 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2775 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2776 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2777 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2778 enum machine_mode lnmode, rnmode;
2779 tree ll_mask, lr_mask, rl_mask, rr_mask;
2780 tree l_const, r_const;
2782 int first_bit, end_bit;
2785 /* Start by getting the comparison codes and seeing if this looks like
2786 a range test. Fail if anything is volatile. If one operand is a
2787 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2790 if (TREE_SIDE_EFFECTS (lhs)
2791 || TREE_SIDE_EFFECTS (rhs))
2794 lcode = TREE_CODE (lhs);
2795 rcode = TREE_CODE (rhs);
2797 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2798 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2800 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2801 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2803 if (TREE_CODE_CLASS (lcode) != '<'
2804 || TREE_CODE_CLASS (rcode) != '<')
2807 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2808 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2810 ll_arg = TREE_OPERAND (lhs, 0);
2811 lr_arg = TREE_OPERAND (lhs, 1);
2812 rl_arg = TREE_OPERAND (rhs, 0);
2813 rr_arg = TREE_OPERAND (rhs, 1);
2815 if (TREE_CODE (lr_arg) == INTEGER_CST
2816 && TREE_CODE (rr_arg) == INTEGER_CST
2817 && operand_equal_p (ll_arg, rl_arg, 0))
2819 if (tree_int_cst_lt (lr_arg, rr_arg))
2820 result = range_test (code, truth_type, lcode, rcode,
2821 ll_arg, lr_arg, rr_arg);
2823 result = range_test (code, truth_type, rcode, lcode,
2824 ll_arg, rr_arg, lr_arg);
2826 /* If this isn't a range test, it also isn't a comparison that
2827 can be merged. However, it wins to evaluate the RHS unconditionally
2828 on machines with expensive branches. */
2830 if (result == 0 && BRANCH_COST >= 2)
2832 if (TREE_CODE (ll_arg) != VAR_DECL
2833 && TREE_CODE (ll_arg) != PARM_DECL)
2835 /* Avoid evaluating the variable part twice. */
2836 ll_arg = save_expr (ll_arg);
2837 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2838 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2840 return build (code, truth_type, lhs, rhs);
2845 /* If the RHS can be evaluated unconditionally and its operands are
2846 simple, it wins to evaluate the RHS unconditionally on machines
2847 with expensive branches. In this case, this isn't a comparison
2848 that can be merged. */
2850 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2851 are with zero (tmw). */
2853 if (BRANCH_COST >= 2
2854 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2855 && simple_operand_p (rl_arg)
2856 && simple_operand_p (rr_arg))
2857 return build (code, truth_type, lhs, rhs);
2859 /* See if the comparisons can be merged. Then get all the parameters for
2862 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2863 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2867 ll_inner = decode_field_reference (ll_arg,
2868 &ll_bitsize, &ll_bitpos, &ll_mode,
2869 &ll_unsignedp, &volatilep, &ll_mask);
2870 lr_inner = decode_field_reference (lr_arg,
2871 &lr_bitsize, &lr_bitpos, &lr_mode,
2872 &lr_unsignedp, &volatilep, &lr_mask);
2873 rl_inner = decode_field_reference (rl_arg,
2874 &rl_bitsize, &rl_bitpos, &rl_mode,
2875 &rl_unsignedp, &volatilep, &rl_mask);
2876 rr_inner = decode_field_reference (rr_arg,
2877 &rr_bitsize, &rr_bitpos, &rr_mode,
2878 &rr_unsignedp, &volatilep, &rr_mask);
2880 /* It must be true that the inner operation on the lhs of each
2881 comparison must be the same if we are to be able to do anything.
2882 Then see if we have constants. If not, the same must be true for
2884 if (volatilep || ll_inner == 0 || rl_inner == 0
2885 || ! operand_equal_p (ll_inner, rl_inner, 0))
2888 if (TREE_CODE (lr_arg) == INTEGER_CST
2889 && TREE_CODE (rr_arg) == INTEGER_CST)
2890 l_const = lr_arg, r_const = rr_arg;
2891 else if (lr_inner == 0 || rr_inner == 0
2892 || ! operand_equal_p (lr_inner, rr_inner, 0))
2895 l_const = r_const = 0;
2897 /* If either comparison code is not correct for our logical operation,
2898 fail. However, we can convert a one-bit comparison against zero into
2899 the opposite comparison against that bit being set in the field. */
2901 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2902 if (lcode != wanted_code)
2904 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2910 if (rcode != wanted_code)
2912 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2918 /* See if we can find a mode that contains both fields being compared on
2919 the left. If we can't, fail. Otherwise, update all constants and masks
2920 to be relative to a field of that size. */
2921 first_bit = MIN (ll_bitpos, rl_bitpos);
2922 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2923 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2924 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2926 if (lnmode == VOIDmode)
2929 lnbitsize = GET_MODE_BITSIZE (lnmode);
2930 lnbitpos = first_bit & ~ (lnbitsize - 1);
2931 type = type_for_size (lnbitsize, 1);
2932 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2934 #if BYTES_BIG_ENDIAN
2935 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2936 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2939 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2940 size_int (xll_bitpos), 0);
2941 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2942 size_int (xrl_bitpos), 0);
2944 /* Make sure the constants are interpreted as unsigned, so we
2945 don't have sign bits outside the range of their type. */
2949 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2950 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2951 size_int (xll_bitpos), 0);
2955 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2956 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2957 size_int (xrl_bitpos), 0);
2960 /* If the right sides are not constant, do the same for it. Also,
2961 disallow this optimization if a size or signedness mismatch occurs
2962 between the left and right sides. */
2965 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2966 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2967 /* Make sure the two fields on the right
2968 correspond to the left without being swapped. */
2969 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2972 first_bit = MIN (lr_bitpos, rr_bitpos);
2973 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2974 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2975 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2977 if (rnmode == VOIDmode)
2980 rnbitsize = GET_MODE_BITSIZE (rnmode);
2981 rnbitpos = first_bit & ~ (rnbitsize - 1);
2982 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2984 #if BYTES_BIG_ENDIAN
2985 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2986 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2989 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2990 size_int (xlr_bitpos), 0);
2991 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2992 size_int (xrr_bitpos), 0);
2994 /* Make a mask that corresponds to both fields being compared.
2995 Do this for both items being compared. If the masks agree,
2996 we can do this by masking both and comparing the masked
2998 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2999 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
3000 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
3002 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
3003 ll_unsignedp || rl_unsignedp);
3004 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
3005 lr_unsignedp || rr_unsignedp);
3006 if (! all_ones_mask_p (ll_mask, lnbitsize))
3008 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
3009 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
3011 return build (wanted_code, truth_type, lhs, rhs);
3014 /* There is still another way we can do something: If both pairs of
3015 fields being compared are adjacent, we may be able to make a wider
3016 field containing them both. */
3017 if ((ll_bitsize + ll_bitpos == rl_bitpos
3018 && lr_bitsize + lr_bitpos == rr_bitpos)
3019 || (ll_bitpos == rl_bitpos + rl_bitsize
3020 && lr_bitpos == rr_bitpos + rr_bitsize))
3021 return build (wanted_code, truth_type,
3022 make_bit_field_ref (ll_inner, type,
3023 ll_bitsize + rl_bitsize,
3024 MIN (ll_bitpos, rl_bitpos),
3026 make_bit_field_ref (lr_inner, type,
3027 lr_bitsize + rr_bitsize,
3028 MIN (lr_bitpos, rr_bitpos),
3034 /* Handle the case of comparisons with constants. If there is something in
3035 common between the masks, those bits of the constants must be the same.
3036 If not, the condition is always false. Test for this to avoid generating
3037 incorrect code below. */
3038 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
3039 if (! integer_zerop (result)
3040 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
3041 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
3043 if (wanted_code == NE_EXPR)
3045 warning ("`or' of unmatched not-equal tests is always 1");
3046 return convert (truth_type, integer_one_node);
3050 warning ("`and' of mutually exclusive equal-tests is always zero");
3051 return convert (truth_type, integer_zero_node);
3055 /* Construct the expression we will return. First get the component
3056 reference we will make. Unless the mask is all ones the width of
3057 that field, perform the mask operation. Then compare with the
3059 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
3060 ll_unsignedp || rl_unsignedp);
3062 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
3063 if (! all_ones_mask_p (ll_mask, lnbitsize))
3064 result = build (BIT_AND_EXPR, type, result, ll_mask);
3066 return build (wanted_code, truth_type, result,
3067 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
3070 /* Perform constant folding and related simplification of EXPR.
3071 The related simplifications include x*1 => x, x*0 => 0, etc.,
3072 and application of the associative law.
3073 NOP_EXPR conversions may be removed freely (as long as we
3074 are careful not to change the C type of the overall expression)
3075 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3076 but we can constant-fold them if they have constant operands. */
3082 register tree t = expr;
3083 tree t1 = NULL_TREE;
3085 tree type = TREE_TYPE (expr);
3086 register tree arg0, arg1;
3087 register enum tree_code code = TREE_CODE (t);
3091 /* WINS will be nonzero when the switch is done
3092 if all operands are constant. */
3096 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3097 if (code == RTL_EXPR)
3100 /* Return right away if already constant. */
3101 if (TREE_CONSTANT (t))
3103 if (code == CONST_DECL)
3104 return DECL_INITIAL (t);
3108 kind = TREE_CODE_CLASS (code);
3109 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3113 /* Special case for conversion ops that can have fixed point args. */
3114 arg0 = TREE_OPERAND (t, 0);
3116 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3118 STRIP_TYPE_NOPS (arg0);
3120 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3121 subop = TREE_REALPART (arg0);
3125 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3126 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3127 && TREE_CODE (subop) != REAL_CST
3128 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3130 /* Note that TREE_CONSTANT isn't enough:
3131 static var addresses are constant but we can't
3132 do arithmetic on them. */
3135 else if (kind == 'e' || kind == '<'
3136 || kind == '1' || kind == '2' || kind == 'r')
3138 register int len = tree_code_length[(int) code];
3140 for (i = 0; i < len; i++)
3142 tree op = TREE_OPERAND (t, i);
3146 continue; /* Valid for CALL_EXPR, at least. */
3148 if (kind == '<' || code == RSHIFT_EXPR)
3150 /* Signedness matters here. Perhaps we can refine this
3152 STRIP_TYPE_NOPS (op);
3156 /* Strip any conversions that don't change the mode. */
3160 if (TREE_CODE (op) == COMPLEX_CST)
3161 subop = TREE_REALPART (op);
3165 if (TREE_CODE (subop) != INTEGER_CST
3166 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3167 && TREE_CODE (subop) != REAL_CST
3168 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3170 /* Note that TREE_CONSTANT isn't enough:
3171 static var addresses are constant but we can't
3172 do arithmetic on them. */
3182 /* If this is a commutative operation, and ARG0 is a constant, move it
3183 to ARG1 to reduce the number of tests below. */
3184 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3185 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3186 || code == BIT_AND_EXPR)
3187 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3189 tem = arg0; arg0 = arg1; arg1 = tem;
3191 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3192 TREE_OPERAND (t, 1) = tem;
3195 /* Now WINS is set as described above,
3196 ARG0 is the first operand of EXPR,
3197 and ARG1 is the second operand (if it has more than one operand).
3199 First check for cases where an arithmetic operation is applied to a
3200 compound, conditional, or comparison operation. Push the arithmetic
3201 operation inside the compound or conditional to see if any folding
3202 can then be done. Convert comparison to conditional for this purpose.
3203 The also optimizes non-constant cases that used to be done in
3206 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3207 one of the operands is a comparison and the other is a comparison, a
3208 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3209 code below would make the expression more complex. Change it to a
3210 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3211 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3213 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3214 || code == EQ_EXPR || code == NE_EXPR)
3215 && ((truth_value_p (TREE_CODE (arg0))
3216 && (truth_value_p (TREE_CODE (arg1))
3217 || (TREE_CODE (arg1) == BIT_AND_EXPR
3218 && integer_onep (TREE_OPERAND (arg1, 1)))))
3219 || (truth_value_p (TREE_CODE (arg1))
3220 && (truth_value_p (TREE_CODE (arg0))
3221 || (TREE_CODE (arg0) == BIT_AND_EXPR
3222 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3224 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3225 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3229 if (code == EQ_EXPR)
3230 t = invert_truthvalue (t);
3235 if (TREE_CODE_CLASS (code) == '1')
3237 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3238 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3239 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3240 else if (TREE_CODE (arg0) == COND_EXPR)
3242 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3243 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3244 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3246 /* If this was a conversion, and all we did was to move into
3247 inside the COND_EXPR, bring it back out. Then return so we
3248 don't get into an infinite recursion loop taking the conversion
3249 out and then back in. */
3251 if ((code == NOP_EXPR || code == CONVERT_EXPR
3252 || code == NON_LVALUE_EXPR)
3253 && TREE_CODE (t) == COND_EXPR
3254 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3255 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3256 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3257 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
3258 t = build1 (code, type,
3260 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3261 TREE_OPERAND (t, 0),
3262 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3263 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3266 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3267 return fold (build (COND_EXPR, type, arg0,
3268 fold (build1 (code, type, integer_one_node)),
3269 fold (build1 (code, type, integer_zero_node))));
3271 else if (TREE_CODE_CLASS (code) == '2'
3272 || TREE_CODE_CLASS (code) == '<')
3274 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3275 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3276 fold (build (code, type,
3277 arg0, TREE_OPERAND (arg1, 1))));
3278 else if (TREE_CODE (arg1) == COND_EXPR
3279 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3281 tree test, true_value, false_value;
3283 if (TREE_CODE (arg1) == COND_EXPR)
3285 test = TREE_OPERAND (arg1, 0);
3286 true_value = TREE_OPERAND (arg1, 1);
3287 false_value = TREE_OPERAND (arg1, 2);
3292 true_value = integer_one_node;
3293 false_value = integer_zero_node;
3296 /* If ARG0 is complex we want to make sure we only evaluate
3297 it once. Though this is only required if it is volatile, it
3298 might be more efficient even if it is not. However, if we
3299 succeed in folding one part to a constant, we do not need
3300 to make this SAVE_EXPR. Since we do this optimization
3301 primarily to see if we do end up with constant and this
3302 SAVE_EXPR interfers with later optimizations, suppressing
3303 it when we can is important. */
3305 if ((TREE_CODE (arg0) != VAR_DECL && TREE_CODE (arg0) != PARM_DECL)
3306 || TREE_SIDE_EFFECTS (arg0))
3308 tree lhs = fold (build (code, type, arg0, true_value));
3309 tree rhs = fold (build (code, type, arg0, false_value));
3311 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3312 return fold (build (COND_EXPR, type, test, lhs, rhs));
3314 arg0 = save_expr (arg0);
3317 test = fold (build (COND_EXPR, type, test,
3318 fold (build (code, type, arg0, true_value)),
3319 fold (build (code, type, arg0, false_value))));
3320 if (TREE_CODE (arg0) == SAVE_EXPR)
3321 return build (COMPOUND_EXPR, type,
3322 convert (void_type_node, arg0), test);
3324 return convert (type, test);
3327 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3328 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3329 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3330 else if (TREE_CODE (arg0) == COND_EXPR
3331 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3333 tree test, true_value, false_value;
3335 if (TREE_CODE (arg0) == COND_EXPR)
3337 test = TREE_OPERAND (arg0, 0);
3338 true_value = TREE_OPERAND (arg0, 1);
3339 false_value = TREE_OPERAND (arg0, 2);
3344 true_value = integer_one_node;
3345 false_value = integer_zero_node;
3348 if ((TREE_CODE (arg1) != VAR_DECL && TREE_CODE (arg1) != PARM_DECL)
3349 || TREE_SIDE_EFFECTS (arg1))
3351 tree lhs = fold (build (code, type, true_value, arg1));
3352 tree rhs = fold (build (code, type, false_value, arg1));
3354 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3355 return fold (build (COND_EXPR, type, test, lhs, rhs));
3357 arg1 = save_expr (arg1);
3360 test = fold (build (COND_EXPR, type, test,
3361 fold (build (code, type, true_value, arg1)),
3362 fold (build (code, type, false_value, arg1))));
3363 if (TREE_CODE (arg1) == SAVE_EXPR)
3364 return build (COMPOUND_EXPR, type,
3365 convert (void_type_node, arg1), test);
3367 return convert (type, test);
3370 else if (TREE_CODE_CLASS (code) == '<'
3371 && TREE_CODE (arg0) == COMPOUND_EXPR)
3372 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3373 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3374 else if (TREE_CODE_CLASS (code) == '<'
3375 && TREE_CODE (arg1) == COMPOUND_EXPR)
3376 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3377 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3389 return fold (DECL_INITIAL (t));
3394 case FIX_TRUNC_EXPR:
3395 /* Other kinds of FIX are not handled properly by fold_convert. */
3397 /* In addition to the cases of two conversions in a row
3398 handled below, if we are converting something to its own
3399 type via an object of identical or wider precision, neither
3400 conversion is needed. */
3401 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3402 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3403 && TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == TREE_TYPE (t)
3404 && ((INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3405 && INTEGRAL_TYPE_P (TREE_TYPE (t)))
3406 || (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))
3407 && FLOAT_TYPE_P (TREE_TYPE (t))))
3408 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3409 >= TYPE_PRECISION (TREE_TYPE (t))))
3410 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3412 /* Two conversions in a row are not needed unless:
3413 - the intermediate type is narrower than both initial and final, or
3414 - the intermediate type and innermost type differ in signedness,
3415 and the outermost type is wider than the intermediate, or
3416 - the initial type is a pointer type and the precisions of the
3417 intermediate and final types differ, or
3418 - the final type is a pointer type and the precisions of the
3419 initial and intermediate types differ. */
3420 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3421 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3422 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3423 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3425 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3426 > TYPE_PRECISION (TREE_TYPE (t)))
3427 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3429 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3431 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3432 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3433 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3434 < TYPE_PRECISION (TREE_TYPE (t))))
3435 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3436 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3437 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3439 (TREE_UNSIGNED (TREE_TYPE (t))
3440 && (TYPE_PRECISION (TREE_TYPE (t))
3441 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3442 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3444 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3445 != TYPE_PRECISION (TREE_TYPE (t))))
3446 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3447 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3448 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3449 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3451 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3452 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3453 /* Detect assigning a bitfield. */
3454 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3455 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3457 /* Don't leave an assignment inside a conversion
3458 unless assigning a bitfield. */
3459 tree prev = TREE_OPERAND (t, 0);
3460 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3461 /* First do the assignment, then return converted constant. */
3462 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3468 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3471 return fold_convert (t, arg0);
3473 #if 0 /* This loses on &"foo"[0]. */
3478 /* Fold an expression like: "foo"[2] */
3479 if (TREE_CODE (arg0) == STRING_CST
3480 && TREE_CODE (arg1) == INTEGER_CST
3481 && !TREE_INT_CST_HIGH (arg1)
3482 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3484 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3485 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3486 force_fit_type (t, 0);
3493 TREE_CONSTANT (t) = wins;
3499 if (TREE_CODE (arg0) == INTEGER_CST)
3501 HOST_WIDE_INT low, high;
3502 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3503 TREE_INT_CST_HIGH (arg0),
3505 t = build_int_2 (low, high);
3506 TREE_TYPE (t) = type;
3508 = (TREE_OVERFLOW (arg0)
3509 | force_fit_type (t, overflow));
3510 TREE_CONSTANT_OVERFLOW (t)
3511 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3513 else if (TREE_CODE (arg0) == REAL_CST)
3514 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3515 TREE_TYPE (t) = type;
3517 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3518 return TREE_OPERAND (arg0, 0);
3520 /* Convert - (a - b) to (b - a) for non-floating-point. */
3521 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3522 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3523 TREE_OPERAND (arg0, 0));
3530 if (TREE_CODE (arg0) == INTEGER_CST)
3532 if (! TREE_UNSIGNED (type)
3533 && TREE_INT_CST_HIGH (arg0) < 0)
3535 HOST_WIDE_INT low, high;
3536 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3537 TREE_INT_CST_HIGH (arg0),
3539 t = build_int_2 (low, high);
3540 TREE_TYPE (t) = type;
3542 = (TREE_OVERFLOW (arg0)
3543 | force_fit_type (t, overflow));
3544 TREE_CONSTANT_OVERFLOW (t)
3545 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3548 else if (TREE_CODE (arg0) == REAL_CST)
3550 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3551 t = build_real (type,
3552 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3554 TREE_TYPE (t) = type;
3556 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3557 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3561 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3563 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3564 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3565 TREE_OPERAND (arg0, 0),
3566 fold (build1 (NEGATE_EXPR,
3567 TREE_TYPE (TREE_TYPE (arg0)),
3568 TREE_OPERAND (arg0, 1))));
3569 else if (TREE_CODE (arg0) == COMPLEX_CST)
3570 return build_complex (TREE_OPERAND (arg0, 0),
3571 fold (build1 (NEGATE_EXPR,
3572 TREE_TYPE (TREE_TYPE (arg0)),
3573 TREE_OPERAND (arg0, 1))));
3574 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3575 return fold (build (TREE_CODE (arg0), type,
3576 fold (build1 (CONJ_EXPR, type,
3577 TREE_OPERAND (arg0, 0))),
3578 fold (build1 (CONJ_EXPR,
3579 type, TREE_OPERAND (arg0, 1)))));
3580 else if (TREE_CODE (arg0) == CONJ_EXPR)
3581 return TREE_OPERAND (arg0, 0);
3587 if (TREE_CODE (arg0) == INTEGER_CST)
3588 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3589 ~ TREE_INT_CST_HIGH (arg0));
3590 TREE_TYPE (t) = type;
3591 force_fit_type (t, 0);
3592 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3593 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3595 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3596 return TREE_OPERAND (arg0, 0);
3600 /* A + (-B) -> A - B */
3601 if (TREE_CODE (arg1) == NEGATE_EXPR)
3602 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3603 else if (! FLOAT_TYPE_P (type))
3605 if (integer_zerop (arg1))
3606 return non_lvalue (convert (type, arg0));
3608 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3609 with a constant, and the two constants have no bits in common,
3610 we should treat this as a BIT_IOR_EXPR since this may produce more
3612 if (TREE_CODE (arg0) == BIT_AND_EXPR
3613 && TREE_CODE (arg1) == BIT_AND_EXPR
3614 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3615 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3616 && integer_zerop (const_binop (BIT_AND_EXPR,
3617 TREE_OPERAND (arg0, 1),
3618 TREE_OPERAND (arg1, 1), 0)))
3620 code = BIT_IOR_EXPR;
3624 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3625 about the case where C is a constant, just try one of the
3626 four possibilities. */
3628 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3629 && operand_equal_p (TREE_OPERAND (arg0, 1),
3630 TREE_OPERAND (arg1, 1), 0))
3631 return fold (build (MULT_EXPR, type,
3632 fold (build (PLUS_EXPR, type,
3633 TREE_OPERAND (arg0, 0),
3634 TREE_OPERAND (arg1, 0))),
3635 TREE_OPERAND (arg0, 1)));
3637 /* In IEEE floating point, x+0 may not equal x. */
3638 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3640 && real_zerop (arg1))
3641 return non_lvalue (convert (type, arg0));
3643 /* In most languages, can't associate operations on floats
3644 through parentheses. Rather than remember where the parentheses
3645 were, we don't associate floats at all. It shouldn't matter much. */
3646 if (FLOAT_TYPE_P (type))
3648 /* The varsign == -1 cases happen only for addition and subtraction.
3649 It says that the arg that was split was really CON minus VAR.
3650 The rest of the code applies to all associative operations. */
3656 if (split_tree (arg0, code, &var, &con, &varsign))
3660 /* EXPR is (CON-VAR) +- ARG1. */
3661 /* If it is + and VAR==ARG1, return just CONST. */
3662 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3663 return convert (TREE_TYPE (t), con);
3665 /* If ARG0 is a constant, don't change things around;
3666 instead keep all the constant computations together. */
3668 if (TREE_CONSTANT (arg0))
3671 /* Otherwise return (CON +- ARG1) - VAR. */
3672 TREE_SET_CODE (t, MINUS_EXPR);
3673 TREE_OPERAND (t, 1) = var;
3675 = fold (build (code, TREE_TYPE (t), con, arg1));
3679 /* EXPR is (VAR+CON) +- ARG1. */
3680 /* If it is - and VAR==ARG1, return just CONST. */
3681 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3682 return convert (TREE_TYPE (t), con);
3684 /* If ARG0 is a constant, don't change things around;
3685 instead keep all the constant computations together. */
3687 if (TREE_CONSTANT (arg0))
3690 /* Otherwise return VAR +- (ARG1 +- CON). */
3691 TREE_OPERAND (t, 1) = tem
3692 = fold (build (code, TREE_TYPE (t), arg1, con));
3693 TREE_OPERAND (t, 0) = var;
3694 if (integer_zerop (tem)
3695 && (code == PLUS_EXPR || code == MINUS_EXPR))
3696 return convert (type, var);
3697 /* If we have x +/- (c - d) [c an explicit integer]
3698 change it to x -/+ (d - c) since if d is relocatable
3699 then the latter can be a single immediate insn
3700 and the former cannot. */
3701 if (TREE_CODE (tem) == MINUS_EXPR
3702 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3704 tree tem1 = TREE_OPERAND (tem, 1);
3705 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3706 TREE_OPERAND (tem, 0) = tem1;
3708 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3714 if (split_tree (arg1, code, &var, &con, &varsign))
3716 if (TREE_CONSTANT (arg1))
3721 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3723 /* EXPR is ARG0 +- (CON +- VAR). */
3724 if (TREE_CODE (t) == MINUS_EXPR
3725 && operand_equal_p (var, arg0, 0))
3727 /* If VAR and ARG0 cancel, return just CON or -CON. */
3728 if (code == PLUS_EXPR)
3729 return convert (TREE_TYPE (t), con);
3730 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3731 convert (TREE_TYPE (t), con)));
3735 = fold (build (code, TREE_TYPE (t), arg0, con));
3736 TREE_OPERAND (t, 1) = var;
3737 if (integer_zerop (TREE_OPERAND (t, 0))
3738 && TREE_CODE (t) == PLUS_EXPR)
3739 return convert (TREE_TYPE (t), var);
3744 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3745 if (TREE_CODE (arg1) == REAL_CST)
3747 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3749 t1 = const_binop (code, arg0, arg1, 0);
3750 if (t1 != NULL_TREE)
3752 /* The return value should always have
3753 the same type as the original expression. */
3754 TREE_TYPE (t1) = TREE_TYPE (t);
3760 if (! FLOAT_TYPE_P (type))
3762 if (! wins && integer_zerop (arg0))
3763 return build1 (NEGATE_EXPR, type, arg1);
3764 if (integer_zerop (arg1))
3765 return non_lvalue (convert (type, arg0));
3767 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3768 about the case where C is a constant, just try one of the
3769 four possibilities. */
3771 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3772 && operand_equal_p (TREE_OPERAND (arg0, 1),
3773 TREE_OPERAND (arg1, 1), 0))
3774 return fold (build (MULT_EXPR, type,
3775 fold (build (MINUS_EXPR, type,
3776 TREE_OPERAND (arg0, 0),
3777 TREE_OPERAND (arg1, 0))),
3778 TREE_OPERAND (arg0, 1)));
3780 /* Convert A - (-B) to A + B. */
3781 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3782 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3784 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3787 /* Except with IEEE floating point, 0-x equals -x. */
3788 if (! wins && real_zerop (arg0))
3789 return build1 (NEGATE_EXPR, type, arg1);
3790 /* Except with IEEE floating point, x-0 equals x. */
3791 if (real_zerop (arg1))
3792 return non_lvalue (convert (type, arg0));
3795 /* Fold &x - &x. This can happen from &x.foo - &x.
3796 This is unsafe for certain floats even in non-IEEE formats.
3797 In IEEE, it is unsafe because it does wrong for NaNs.
3798 Also note that operand_equal_p is always false if an operand
3801 if (operand_equal_p (arg0, arg1,
3802 FLOAT_TYPE_P (type) && ! flag_fast_math))
3803 return convert (type, integer_zero_node);
3808 if (! FLOAT_TYPE_P (type))
3810 if (integer_zerop (arg1))
3811 return omit_one_operand (type, arg1, arg0);
3812 if (integer_onep (arg1))
3813 return non_lvalue (convert (type, arg0));
3815 /* (a * (1 << b)) is (a << b) */
3816 if (TREE_CODE (arg1) == LSHIFT_EXPR
3817 && integer_onep (TREE_OPERAND (arg1, 0)))
3818 return fold (build (LSHIFT_EXPR, type, arg0,
3819 TREE_OPERAND (arg1, 1)));
3820 if (TREE_CODE (arg0) == LSHIFT_EXPR
3821 && integer_onep (TREE_OPERAND (arg0, 0)))
3822 return fold (build (LSHIFT_EXPR, type, arg1,
3823 TREE_OPERAND (arg0, 1)));
3827 /* x*0 is 0, except for IEEE floating point. */
3828 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3830 && real_zerop (arg1))
3831 return omit_one_operand (type, arg1, arg0);
3832 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3833 However, ANSI says we can drop signals,
3834 so we can do this anyway. */
3835 if (real_onep (arg1))
3836 return non_lvalue (convert (type, arg0));
3838 if (! wins && real_twop (arg1))
3840 tree arg = save_expr (arg0);
3841 return build (PLUS_EXPR, type, arg, arg);
3848 if (integer_all_onesp (arg1))
3849 return omit_one_operand (type, arg1, arg0);
3850 if (integer_zerop (arg1))
3851 return non_lvalue (convert (type, arg0));
3852 t1 = distribute_bit_expr (code, type, arg0, arg1);
3853 if (t1 != NULL_TREE)
3856 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3857 is a rotate of A by C1 bits. */
3859 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3860 || TREE_CODE (arg0) == LSHIFT_EXPR)
3861 && (TREE_CODE (arg1) == RSHIFT_EXPR
3862 || TREE_CODE (arg1) == LSHIFT_EXPR)
3863 && TREE_CODE (arg0) != TREE_CODE (arg1)
3864 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3865 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3866 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3867 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3868 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3869 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3870 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3871 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3872 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3873 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3874 TREE_CODE (arg0) == LSHIFT_EXPR
3875 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3880 if (integer_zerop (arg1))
3881 return non_lvalue (convert (type, arg0));
3882 if (integer_all_onesp (arg1))
3883 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3888 if (integer_all_onesp (arg1))
3889 return non_lvalue (convert (type, arg0));
3890 if (integer_zerop (arg1))
3891 return omit_one_operand (type, arg1, arg0);
3892 t1 = distribute_bit_expr (code, type, arg0, arg1);
3893 if (t1 != NULL_TREE)
3895 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3896 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3897 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3899 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3900 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3901 && (~TREE_INT_CST_LOW (arg0)
3902 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3903 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3905 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3906 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3908 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3909 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3910 && (~TREE_INT_CST_LOW (arg1)
3911 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3912 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3916 case BIT_ANDTC_EXPR:
3917 if (integer_all_onesp (arg0))
3918 return non_lvalue (convert (type, arg1));
3919 if (integer_zerop (arg0))
3920 return omit_one_operand (type, arg0, arg1);
3921 if (TREE_CODE (arg1) == INTEGER_CST)
3923 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3924 code = BIT_AND_EXPR;
3929 case TRUNC_DIV_EXPR:
3930 case ROUND_DIV_EXPR:
3931 case FLOOR_DIV_EXPR:
3933 case EXACT_DIV_EXPR:
3935 if (integer_onep (arg1))
3936 return non_lvalue (convert (type, arg0));
3937 if (integer_zerop (arg1))
3940 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
3941 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
3942 expressions, which often appear in the offsets or sizes of
3943 objects with a varying size. Only deal with positive divisors
3944 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
3946 Look for NOPs and SAVE_EXPRs inside. */
3948 if (TREE_CODE (arg1) == INTEGER_CST
3949 && tree_int_cst_lt (integer_zero_node, arg1))
3951 int have_save_expr = 0;
3952 tree c2 = integer_zero_node;
3955 if (TREE_CODE (xarg0) == SAVE_EXPR)
3956 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3960 if (TREE_CODE (xarg0) == PLUS_EXPR
3961 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3962 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
3963 else if (TREE_CODE (xarg0) == MINUS_EXPR
3964 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3965 /* If we are doing this computation unsigned, the negate
3967 && ! TREE_UNSIGNED (type))
3969 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
3970 xarg0 = TREE_OPERAND (xarg0, 0);
3973 if (TREE_CODE (xarg0) == SAVE_EXPR)
3974 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3978 if (TREE_CODE (xarg0) == MULT_EXPR
3979 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3980 && tree_int_cst_lt (integer_zero_node, TREE_OPERAND (xarg0, 1))
3981 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
3982 TREE_OPERAND (xarg0, 1), arg1, 1))
3983 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
3984 TREE_OPERAND (xarg0, 1), 1)))
3985 && (tree_int_cst_lt (integer_zero_node, c2)
3986 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
3989 tree outer_div = integer_one_node;
3990 tree c1 = TREE_OPERAND (xarg0, 1);
3993 /* If C3 > C1, set them equal and do a divide by
3994 C3/C1 at the end of the operation. */
3995 if (tree_int_cst_lt (c1, c3))
3996 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
3998 /* The result is A * (C1/C3) + (C2/C3). */
3999 t = fold (build (PLUS_EXPR, type,
4000 fold (build (MULT_EXPR, type,
4001 TREE_OPERAND (xarg0, 0),
4002 const_binop (code, c1, c3, 1))),
4003 const_binop (code, c2, c3, 1)));
4005 if (! integer_onep (outer_div))
4006 t = fold (build (code, type, t, outer_div));
4015 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
4016 #ifndef REAL_INFINITY
4017 if (TREE_CODE (arg1) == REAL_CST
4018 && real_zerop (arg1))
4021 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
4026 case FLOOR_MOD_EXPR:
4027 case ROUND_MOD_EXPR:
4028 case TRUNC_MOD_EXPR:
4029 if (integer_onep (arg1))
4030 return omit_one_operand (type, integer_zero_node, arg0);
4031 if (integer_zerop (arg1))
4034 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4035 where C1 % C3 == 0. Handle similarly to the division case,
4036 but don't bother with SAVE_EXPRs. */
4038 if (TREE_CODE (arg1) == INTEGER_CST
4039 && ! integer_zerop (arg1))
4041 tree c2 = integer_zero_node;
4044 if (TREE_CODE (xarg0) == PLUS_EXPR
4045 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4046 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4047 else if (TREE_CODE (xarg0) == MINUS_EXPR
4048 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4049 && ! TREE_UNSIGNED (type))
4051 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4052 xarg0 = TREE_OPERAND (xarg0, 0);
4057 if (TREE_CODE (xarg0) == MULT_EXPR
4058 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4059 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4060 TREE_OPERAND (xarg0, 1),
4062 && tree_int_cst_lt (integer_zero_node, c2))
4063 /* The result is (C2%C3). */
4064 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4065 TREE_OPERAND (xarg0, 0));
4074 if (integer_zerop (arg1))
4075 return non_lvalue (convert (type, arg0));
4076 /* Since negative shift count is not well-defined,
4077 don't try to compute it in the compiler. */
4078 if (tree_int_cst_lt (arg1, integer_zero_node))
4083 if (operand_equal_p (arg0, arg1, 0))
4085 if (INTEGRAL_TYPE_P (type)
4086 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4087 return omit_one_operand (type, arg1, arg0);
4091 if (operand_equal_p (arg0, arg1, 0))
4093 if (INTEGRAL_TYPE_P (type)
4094 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4095 return omit_one_operand (type, arg1, arg0);
4098 case TRUTH_NOT_EXPR:
4099 /* Note that the operand of this must be an int
4100 and its values must be 0 or 1.
4101 ("true" is a fixed value perhaps depending on the language,
4102 but we don't handle values other than 1 correctly yet.) */
4103 return invert_truthvalue (arg0);
4105 case TRUTH_ANDIF_EXPR:
4106 /* Note that the operands of this must be ints
4107 and their values must be 0 or 1.
4108 ("true" is a fixed value perhaps depending on the language.) */
4109 /* If first arg is constant zero, return it. */
4110 if (integer_zerop (arg0))
4112 case TRUTH_AND_EXPR:
4113 /* If either arg is constant true, drop it. */
4114 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4115 return non_lvalue (arg1);
4116 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4117 return non_lvalue (arg0);
4118 /* If second arg is constant zero, result is zero, but first arg
4119 must be evaluated. */
4120 if (integer_zerop (arg1))
4121 return omit_one_operand (type, arg1, arg0);
4124 /* Check for the possibility of merging component references. If our
4125 lhs is another similar operation, try to merge its rhs with our
4126 rhs. Then try to merge our lhs and rhs. */
4129 if (TREE_CODE (arg0) == code)
4131 tem = fold_truthop (code, type,
4132 TREE_OPERAND (arg0, 1), arg1);
4134 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4137 /* Check for things like (A || B) && (A || C). We can convert
4138 this to A || (B && C). Note that either operator can be any of
4139 the four truth and/or operations and the transformation will
4141 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4142 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4143 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4144 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4145 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4147 tree a00 = TREE_OPERAND (arg0, 0);
4148 tree a01 = TREE_OPERAND (arg0, 1);
4149 tree a10 = TREE_OPERAND (arg1, 0);
4150 tree a11 = TREE_OPERAND (arg1, 1);
4151 tree common = 0, op0, op1;
4153 if (operand_equal_p (a00, a10, 0))
4154 common = a00, op0 = a01, op1 = a11;
4155 else if (operand_equal_p (a00, a11, 0))
4156 common = a00, op0 = a01, op1 = a10;
4157 else if (operand_equal_p (a01, a10, 0))
4158 common = a01, op0 = a00, op1 = a11;
4159 else if (operand_equal_p (a01, a11, 0))
4160 common = a01, op0 = a00, op1 = a10;
4163 return fold (build (TREE_CODE (arg0), type, common,
4164 fold (build (code, type, op0, op1))));
4167 tem = fold_truthop (code, type, arg0, arg1);
4173 case TRUTH_ORIF_EXPR:
4174 /* Note that the operands of this must be ints
4175 and their values must be 0 or true.
4176 ("true" is a fixed value perhaps depending on the language.) */
4177 /* If first arg is constant true, return it. */
4178 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4181 /* If either arg is constant zero, drop it. */
4182 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4183 return non_lvalue (arg1);
4184 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4185 return non_lvalue (arg0);
4186 /* If second arg is constant true, result is true, but we must
4187 evaluate first arg. */
4188 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4189 return omit_one_operand (type, arg1, arg0);
4192 case TRUTH_XOR_EXPR:
4193 /* If either arg is constant zero, drop it. */
4194 if (integer_zerop (arg0))
4195 return non_lvalue (arg1);
4196 if (integer_zerop (arg1))
4197 return non_lvalue (arg0);
4198 /* If either arg is constant true, this is a logical inversion. */
4199 if (integer_onep (arg0))
4200 return non_lvalue (invert_truthvalue (arg1));
4201 if (integer_onep (arg1))
4202 return non_lvalue (invert_truthvalue (arg0));
4211 /* If one arg is a constant integer, put it last. */
4212 if (TREE_CODE (arg0) == INTEGER_CST
4213 && TREE_CODE (arg1) != INTEGER_CST)
4215 TREE_OPERAND (t, 0) = arg1;
4216 TREE_OPERAND (t, 1) = arg0;
4217 arg0 = TREE_OPERAND (t, 0);
4218 arg1 = TREE_OPERAND (t, 1);
4219 code = swap_tree_comparison (code);
4220 TREE_SET_CODE (t, code);
4223 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4224 First, see if one arg is constant; find the constant arg
4225 and the other one. */
4227 tree constop = 0, varop;
4230 if (TREE_CONSTANT (arg1))
4231 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
4232 if (TREE_CONSTANT (arg0))
4233 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
4235 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4237 /* This optimization is invalid for ordered comparisons
4238 if CONST+INCR overflows or if foo+incr might overflow.
4239 This optimization is invalid for floating point due to rounding.
4240 For pointer types we assume overflow doesn't happen. */
4241 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4242 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4243 && (code == EQ_EXPR || code == NE_EXPR)))
4246 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4247 constop, TREE_OPERAND (varop, 1)));
4248 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4249 *constoploc = newconst;
4253 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4255 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4256 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4257 && (code == EQ_EXPR || code == NE_EXPR)))
4260 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4261 constop, TREE_OPERAND (varop, 1)));
4262 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4263 *constoploc = newconst;
4269 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4270 if (TREE_CODE (arg1) == INTEGER_CST
4271 && TREE_CODE (arg0) != INTEGER_CST
4272 && ! tree_int_cst_lt (arg1, integer_one_node))
4274 switch (TREE_CODE (t))
4278 TREE_SET_CODE (t, code);
4279 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4280 TREE_OPERAND (t, 1) = arg1;
4285 TREE_SET_CODE (t, code);
4286 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4287 TREE_OPERAND (t, 1) = arg1;
4291 /* If this is an EQ or NE comparison with zero and ARG0 is
4292 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4293 two operations, but the latter can be done in one less insn
4294 one machine that have only two-operand insns or on which a
4295 constant cannot be the first operand. */
4296 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4297 && TREE_CODE (arg0) == BIT_AND_EXPR)
4299 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4300 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4302 fold (build (code, type,
4303 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4305 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4306 TREE_OPERAND (arg0, 1),
4307 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4308 convert (TREE_TYPE (arg0),
4311 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4312 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4314 fold (build (code, type,
4315 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4317 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4318 TREE_OPERAND (arg0, 0),
4319 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4320 convert (TREE_TYPE (arg0),
4325 /* If this is an NE or EQ comparison of zero against the result of a
4326 signed MOD operation whose second operand is a power of 2, make
4327 the MOD operation unsigned since it is simpler and equivalent. */
4328 if ((code == NE_EXPR || code == EQ_EXPR)
4329 && integer_zerop (arg1)
4330 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4331 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4332 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4333 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4334 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4335 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4337 tree newtype = unsigned_type (TREE_TYPE (arg0));
4338 tree newmod = build (TREE_CODE (arg0), newtype,
4339 convert (newtype, TREE_OPERAND (arg0, 0)),
4340 convert (newtype, TREE_OPERAND (arg0, 1)));
4342 return build (code, type, newmod, convert (newtype, arg1));
4345 /* If this is an NE comparison of zero with an AND of one, remove the
4346 comparison since the AND will give the correct value. */
4347 if (code == NE_EXPR && integer_zerop (arg1)
4348 && TREE_CODE (arg0) == BIT_AND_EXPR
4349 && integer_onep (TREE_OPERAND (arg0, 1)))
4350 return convert (type, arg0);
4352 /* If we have (A & C) == C where C is a power of 2, convert this into
4353 (A & C) != 0. Similarly for NE_EXPR. */
4354 if ((code == EQ_EXPR || code == NE_EXPR)
4355 && TREE_CODE (arg0) == BIT_AND_EXPR
4356 && integer_pow2p (TREE_OPERAND (arg0, 1))
4357 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4358 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4359 arg0, integer_zero_node);
4361 /* Simplify comparison of something with itself. (For IEEE
4362 floating-point, we can only do some of these simplifications.) */
4363 if (operand_equal_p (arg0, arg1, 0))
4370 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4372 t = build_int_2 (1, 0);
4373 TREE_TYPE (t) = type;
4377 TREE_SET_CODE (t, code);
4381 /* For NE, we can only do this simplification if integer. */
4382 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4384 /* ... fall through ... */
4387 t = build_int_2 (0, 0);
4388 TREE_TYPE (t) = type;
4393 /* An unsigned comparison against 0 can be simplified. */
4394 if (integer_zerop (arg1)
4395 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4396 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4397 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4399 switch (TREE_CODE (t))
4403 TREE_SET_CODE (t, NE_EXPR);
4407 TREE_SET_CODE (t, EQ_EXPR);
4410 return omit_one_operand (type,
4411 convert (type, integer_one_node),
4414 return omit_one_operand (type,
4415 convert (type, integer_zero_node),
4420 /* If we are comparing an expression that just has comparisons
4421 of two integer values, arithmetic expressions of those comparisons,
4422 and constants, we can simplify it. There are only three cases
4423 to check: the two values can either be equal, the first can be
4424 greater, or the second can be greater. Fold the expression for
4425 those three values. Since each value must be 0 or 1, we have
4426 eight possibilities, each of which corresponds to the constant 0
4427 or 1 or one of the six possible comparisons.
4429 This handles common cases like (a > b) == 0 but also handles
4430 expressions like ((x > y) - (y > x)) > 0, which supposedly
4431 occur in macroized code. */
4433 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4435 tree cval1 = 0, cval2 = 0;
4438 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4439 /* Don't handle degenerate cases here; they should already
4440 have been handled anyway. */
4441 && cval1 != 0 && cval2 != 0
4442 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4443 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4444 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4445 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4446 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4448 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4449 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4451 /* We can't just pass T to eval_subst in case cval1 or cval2
4452 was the same as ARG1. */
4455 = fold (build (code, type,
4456 eval_subst (arg0, cval1, maxval, cval2, minval),
4459 = fold (build (code, type,
4460 eval_subst (arg0, cval1, maxval, cval2, maxval),
4463 = fold (build (code, type,
4464 eval_subst (arg0, cval1, minval, cval2, maxval),
4467 /* All three of these results should be 0 or 1. Confirm they
4468 are. Then use those values to select the proper code
4471 if ((integer_zerop (high_result)
4472 || integer_onep (high_result))
4473 && (integer_zerop (equal_result)
4474 || integer_onep (equal_result))
4475 && (integer_zerop (low_result)
4476 || integer_onep (low_result)))
4478 /* Make a 3-bit mask with the high-order bit being the
4479 value for `>', the next for '=', and the low for '<'. */
4480 switch ((integer_onep (high_result) * 4)
4481 + (integer_onep (equal_result) * 2)
4482 + integer_onep (low_result))
4486 return omit_one_operand (type, integer_zero_node, arg0);
4507 return omit_one_operand (type, integer_one_node, arg0);
4510 t = build (code, type, cval1, cval2);
4512 return save_expr (t);
4519 /* If this is a comparison of a field, we may be able to simplify it. */
4520 if ((TREE_CODE (arg0) == COMPONENT_REF
4521 || TREE_CODE (arg0) == BIT_FIELD_REF)
4522 && (code == EQ_EXPR || code == NE_EXPR)
4523 /* Handle the constant case even without -O
4524 to make sure the warnings are given. */
4525 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4527 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4531 /* If this is a comparison of complex values and either or both
4532 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4533 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4534 may prevent needless evaluations. */
4535 if ((code == EQ_EXPR || code == NE_EXPR)
4536 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4537 && (TREE_CODE (arg0) == COMPLEX_EXPR
4538 || TREE_CODE (arg1) == COMPLEX_EXPR))
4540 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4541 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4542 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4543 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4544 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4546 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4549 fold (build (code, type, real0, real1)),
4550 fold (build (code, type, imag0, imag1))));
4553 /* From here on, the only cases we handle are when the result is
4554 known to be a constant.
4556 To compute GT, swap the arguments and do LT.
4557 To compute GE, do LT and invert the result.
4558 To compute LE, swap the arguments, do LT and invert the result.
4559 To compute NE, do EQ and invert the result.
4561 Therefore, the code below must handle only EQ and LT. */
4563 if (code == LE_EXPR || code == GT_EXPR)
4565 tem = arg0, arg0 = arg1, arg1 = tem;
4566 code = swap_tree_comparison (code);
4569 /* Note that it is safe to invert for real values here because we
4570 will check below in the one case that it matters. */
4573 if (code == NE_EXPR || code == GE_EXPR)
4576 code = invert_tree_comparison (code);
4579 /* Compute a result for LT or EQ if args permit;
4580 otherwise return T. */
4581 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4583 if (code == EQ_EXPR)
4584 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4585 == TREE_INT_CST_LOW (arg1))
4586 && (TREE_INT_CST_HIGH (arg0)
4587 == TREE_INT_CST_HIGH (arg1)),
4590 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4591 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4592 : INT_CST_LT (arg0, arg1)),
4596 /* Assume a nonexplicit constant cannot equal an explicit one,
4597 since such code would be undefined anyway.
4598 Exception: on sysvr4, using #pragma weak,
4599 a label can come out as 0. */
4600 else if (TREE_CODE (arg1) == INTEGER_CST
4601 && !integer_zerop (arg1)
4602 && TREE_CONSTANT (arg0)
4603 && TREE_CODE (arg0) == ADDR_EXPR
4605 t1 = build_int_2 (0, 0);
4607 /* Two real constants can be compared explicitly. */
4608 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4610 /* If either operand is a NaN, the result is false with two
4611 exceptions: First, an NE_EXPR is true on NaNs, but that case
4612 is already handled correctly since we will be inverting the
4613 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4614 or a GE_EXPR into a LT_EXPR, we must return true so that it
4615 will be inverted into false. */
4617 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4618 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4619 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4621 else if (code == EQ_EXPR)
4622 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4623 TREE_REAL_CST (arg1)),
4626 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4627 TREE_REAL_CST (arg1)),
4631 if (t1 == NULL_TREE)
4635 TREE_INT_CST_LOW (t1) ^= 1;
4637 TREE_TYPE (t1) = type;
4641 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4642 so all simple results must be passed through pedantic_non_lvalue. */
4643 if (TREE_CODE (arg0) == INTEGER_CST)
4644 return pedantic_non_lvalue
4645 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4646 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4647 return pedantic_non_lvalue (omit_one_operand (type, arg1, arg0));
4649 /* If the second operand is zero, invert the comparison and swap
4650 the second and third operands. Likewise if the second operand
4651 is constant and the third is not or if the third operand is
4652 equivalent to the first operand of the comparison. */
4654 if (integer_zerop (arg1)
4655 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4656 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4657 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4658 TREE_OPERAND (t, 2),
4659 TREE_OPERAND (arg0, 1))))
4661 /* See if this can be inverted. If it can't, possibly because
4662 it was a floating-point inequality comparison, don't do
4664 tem = invert_truthvalue (arg0);
4666 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4668 arg0 = TREE_OPERAND (t, 0) = tem;
4669 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4670 TREE_OPERAND (t, 2) = arg1;
4671 arg1 = TREE_OPERAND (t, 1);
4675 /* If we have A op B ? A : C, we may be able to convert this to a
4676 simpler expression, depending on the operation and the values
4677 of B and C. IEEE floating point prevents this though,
4678 because A or B might be -0.0 or a NaN. */
4680 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4681 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4682 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4684 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4685 arg1, TREE_OPERAND (arg0, 1)))
4687 tree arg2 = TREE_OPERAND (t, 2);
4688 enum tree_code comp_code = TREE_CODE (arg0);
4690 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4691 depending on the comparison operation. */
4692 if (integer_zerop (TREE_OPERAND (arg0, 1))
4693 && TREE_CODE (arg2) == NEGATE_EXPR
4694 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4698 return pedantic_non_lvalue
4699 (fold (build1 (NEGATE_EXPR, type, arg1)));
4701 return pedantic_non_lvalue (convert (type, arg1));
4704 return pedantic_non_lvalue
4705 (fold (build1 (ABS_EXPR, type, arg1)));
4708 return pedantic_non_lvalue
4709 (fold (build1 (NEGATE_EXPR, type,
4710 fold (build1 (ABS_EXPR, type, arg1)))));
4713 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4716 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4718 if (comp_code == NE_EXPR)
4719 return pedantic_non_lvalue (convert (type, arg1));
4720 else if (comp_code == EQ_EXPR)
4721 return pedantic_non_lvalue (convert (type, integer_zero_node));
4724 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4725 or max (A, B), depending on the operation. */
4727 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4728 arg2, TREE_OPERAND (arg0, 0)))
4732 return pedantic_non_lvalue (convert (type, arg2));
4734 return pedantic_non_lvalue (convert (type, arg1));
4737 return pedantic_non_lvalue
4738 (fold (build (MIN_EXPR, type, arg1, arg2)));
4741 return pedantic_non_lvalue
4742 (fold (build (MAX_EXPR, type, arg1, arg2)));
4745 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4746 we might still be able to simplify this. For example,
4747 if C1 is one less or one more than C2, this might have started
4748 out as a MIN or MAX and been transformed by this function.
4749 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4751 if (INTEGRAL_TYPE_P (type)
4752 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4753 && TREE_CODE (arg2) == INTEGER_CST)
4757 /* We can replace A with C1 in this case. */
4758 arg1 = TREE_OPERAND (t, 1)
4759 = convert (type, TREE_OPERAND (arg0, 1));
4763 /* If C1 is C2 + 1, this is min(A, C2). */
4764 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4765 && operand_equal_p (TREE_OPERAND (arg0, 1),
4766 const_binop (PLUS_EXPR, arg2,
4767 integer_one_node, 0), 1))
4768 return pedantic_non_lvalue
4769 (fold (build (MIN_EXPR, type, arg1, arg2)));
4773 /* If C1 is C2 - 1, this is min(A, C2). */
4774 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4775 && operand_equal_p (TREE_OPERAND (arg0, 1),
4776 const_binop (MINUS_EXPR, arg2,
4777 integer_one_node, 0), 1))
4778 return pedantic_non_lvalue
4779 (fold (build (MIN_EXPR, type, arg1, arg2)));
4783 /* If C1 is C2 - 1, this is max(A, C2). */
4784 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4785 && operand_equal_p (TREE_OPERAND (arg0, 1),
4786 const_binop (MINUS_EXPR, arg2,
4787 integer_one_node, 0), 1))
4788 return pedantic_non_lvalue
4789 (fold (build (MAX_EXPR, type, arg1, arg2)));
4793 /* If C1 is C2 + 1, this is max(A, C2). */
4794 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4795 && operand_equal_p (TREE_OPERAND (arg0, 1),
4796 const_binop (PLUS_EXPR, arg2,
4797 integer_one_node, 0), 1))
4798 return pedantic_non_lvalue
4799 (fold (build (MAX_EXPR, type, arg1, arg2)));
4804 /* Convert A ? 1 : 0 to simply A. */
4805 if (integer_onep (TREE_OPERAND (t, 1))
4806 && integer_zerop (TREE_OPERAND (t, 2))
4807 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4808 call to fold will try to move the conversion inside
4809 a COND, which will recurse. In that case, the COND_EXPR
4810 is probably the best choice, so leave it alone. */
4811 && type == TREE_TYPE (arg0))
4812 return pedantic_non_lvalue (arg0);
4815 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4816 operation is simply A & 2. */
4818 if (integer_zerop (TREE_OPERAND (t, 2))
4819 && TREE_CODE (arg0) == NE_EXPR
4820 && integer_zerop (TREE_OPERAND (arg0, 1))
4821 && integer_pow2p (arg1)
4822 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4823 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4825 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
4830 /* When pedantic, a compound expression can be neither an lvalue
4831 nor an integer constant expression. */
4832 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4834 /* Don't let (0, 0) be null pointer constant. */
4835 if (integer_zerop (arg1))
4836 return non_lvalue (arg1);
4841 return build_complex (arg0, arg1);
4845 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4847 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4848 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4849 TREE_OPERAND (arg0, 1));
4850 else if (TREE_CODE (arg0) == COMPLEX_CST)
4851 return TREE_REALPART (arg0);
4852 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4853 return fold (build (TREE_CODE (arg0), type,
4854 fold (build1 (REALPART_EXPR, type,
4855 TREE_OPERAND (arg0, 0))),
4856 fold (build1 (REALPART_EXPR,
4857 type, TREE_OPERAND (arg0, 1)))));
4861 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4862 return convert (type, integer_zero_node);
4863 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4864 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4865 TREE_OPERAND (arg0, 0));
4866 else if (TREE_CODE (arg0) == COMPLEX_CST)
4867 return TREE_IMAGPART (arg0);
4868 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4869 return fold (build (TREE_CODE (arg0), type,
4870 fold (build1 (IMAGPART_EXPR, type,
4871 TREE_OPERAND (arg0, 0))),
4872 fold (build1 (IMAGPART_EXPR, type,
4873 TREE_OPERAND (arg0, 1)))));
4878 } /* switch (code) */