1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ Fix lossage on folding division of big integers. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
31 /* The entry points in this file are fold, size_int and size_binop.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'. */
48 /* Handle floating overflow for `const_binop'. */
49 static jmp_buf float_error;
51 static void encode PROTO((short *, HOST_WIDE_INT, HOST_WIDE_INT));
52 static void decode PROTO((short *, HOST_WIDE_INT *, HOST_WIDE_INT *));
53 static int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
54 HOST_WIDE_INT, HOST_WIDE_INT,
55 HOST_WIDE_INT, HOST_WIDE_INT *,
56 HOST_WIDE_INT *, HOST_WIDE_INT *,
58 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
59 static tree const_binop PROTO((enum tree_code, tree, tree, int));
60 static tree fold_convert PROTO((tree, tree));
61 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
62 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
63 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
64 static int twoval_comparison_p PROTO((tree, tree *, tree *));
65 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
66 static tree omit_one_operand PROTO((tree, tree, tree));
67 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
68 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
69 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
71 static tree decode_field_reference PROTO((tree, int *, int *,
72 enum machine_mode *, int *,
74 static int all_ones_mask_p PROTO((tree, int));
75 static int simple_operand_p PROTO((tree));
76 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
77 enum tree_code, tree, tree, tree));
78 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
84 /* Yield nonzero if a signed left shift of A by B bits overflows. */
85 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
87 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
88 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
89 Then this yields nonzero if overflow occurred during the addition.
90 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
91 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
92 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
94 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
95 We do that by representing the two-word integer as MAX_SHORTS shorts,
96 with only 8 bits stored in each short, as a positive number. */
98 /* Unpack a two-word integer into MAX_SHORTS shorts.
99 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
100 SHORTS points to the array of shorts. */
103 encode (shorts, low, hi)
105 HOST_WIDE_INT low, hi;
109 for (i = 0; i < MAX_SHORTS / 2; i++)
111 shorts[i] = (low >> (i * 8)) & 0xff;
112 shorts[i + MAX_SHORTS / 2] = (hi >> (i * 8) & 0xff);
116 /* Pack an array of MAX_SHORTS shorts into a two-word integer.
117 SHORTS points to the array of shorts.
118 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
121 decode (shorts, low, hi)
123 HOST_WIDE_INT *low, *hi;
126 HOST_WIDE_INT lv = 0, hv = 0;
128 for (i = 0; i < MAX_SHORTS / 2; i++)
130 lv |= (HOST_WIDE_INT) shorts[i] << (i * 8);
131 hv |= (HOST_WIDE_INT) shorts[i + MAX_SHORTS / 2] << (i * 8);
137 /* Make the integer constant T valid for its type
138 by setting to 0 or 1 all the bits in the constant
139 that don't belong in the type.
140 Yield 1 if a signed overflow occurs, 0 otherwise.
141 If OVERFLOW is nonzero, a signed overflow has already occurred
142 in calculating T, so propagate it. */
145 force_fit_type (t, overflow)
149 HOST_WIDE_INT low, high;
152 if (TREE_CODE (t) != INTEGER_CST)
155 low = TREE_INT_CST_LOW (t);
156 high = TREE_INT_CST_HIGH (t);
158 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
161 prec = TYPE_PRECISION (TREE_TYPE (t));
163 /* First clear all bits that are beyond the type's precision. */
165 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
167 else if (prec > HOST_BITS_PER_WIDE_INT)
169 TREE_INT_CST_HIGH (t)
170 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
174 TREE_INT_CST_HIGH (t) = 0;
175 if (prec < HOST_BITS_PER_WIDE_INT)
176 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
179 /* Unsigned types do not suffer sign extension or overflow. */
180 if (TREE_UNSIGNED (TREE_TYPE (t)))
183 /* If the value's sign bit is set, extend the sign. */
184 if (prec != 2 * HOST_BITS_PER_WIDE_INT
185 && (prec > HOST_BITS_PER_WIDE_INT
186 ? (TREE_INT_CST_HIGH (t)
187 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
188 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
190 /* Value is negative:
191 set to 1 all the bits that are outside this type's precision. */
192 if (prec > HOST_BITS_PER_WIDE_INT)
194 TREE_INT_CST_HIGH (t)
195 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
199 TREE_INT_CST_HIGH (t) = -1;
200 if (prec < HOST_BITS_PER_WIDE_INT)
201 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
205 /* Yield nonzero if signed overflow occurred. */
207 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
211 /* Add two doubleword integers with doubleword result.
212 Each argument is given as two `HOST_WIDE_INT' pieces.
213 One argument is L1 and H1; the other, L2 and H2.
214 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
215 We use the 8-shorts representation internally. */
218 add_double (l1, h1, l2, h2, lv, hv)
219 HOST_WIDE_INT l1, h1, l2, h2;
220 HOST_WIDE_INT *lv, *hv;
222 short arg1[MAX_SHORTS];
223 short arg2[MAX_SHORTS];
224 register int carry = 0;
227 encode (arg1, l1, h1);
228 encode (arg2, l2, h2);
230 for (i = 0; i < MAX_SHORTS; i++)
232 carry += arg1[i] + arg2[i];
233 arg1[i] = carry & 0xff;
237 decode (arg1, lv, hv);
238 return overflow_sum_sign (h1, h2, *hv);
241 /* Negate a doubleword integer with doubleword result.
242 Return nonzero if the operation overflows, assuming it's signed.
243 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
244 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
245 We use the 8-shorts representation internally. */
248 neg_double (l1, h1, lv, hv)
249 HOST_WIDE_INT l1, h1;
250 HOST_WIDE_INT *lv, *hv;
256 return (*hv & h1) < 0;
266 /* Multiply two doubleword integers with doubleword result.
267 Return nonzero if the operation overflows, assuming it's signed.
268 Each argument is given as two `HOST_WIDE_INT' pieces.
269 One argument is L1 and H1; the other, L2 and H2.
270 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV.
271 We use the 8-shorts representation internally. */
274 mul_double (l1, h1, l2, h2, lv, hv)
275 HOST_WIDE_INT l1, h1, l2, h2;
276 HOST_WIDE_INT *lv, *hv;
278 short arg1[MAX_SHORTS];
279 short arg2[MAX_SHORTS];
280 short prod[MAX_SHORTS * 2];
281 register int carry = 0;
282 register int i, j, k;
283 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
285 /* These cases are used extensively, arising from pointer combinations. */
290 int overflow = left_shift_overflows (h1, 1);
291 unsigned HOST_WIDE_INT temp = l1 + l1;
292 *hv = (h1 << 1) + (temp < l1);
298 int overflow = left_shift_overflows (h1, 2);
299 unsigned HOST_WIDE_INT temp = l1 + l1;
300 h1 = (h1 << 2) + ((temp < l1) << 1);
310 int overflow = left_shift_overflows (h1, 3);
311 unsigned HOST_WIDE_INT temp = l1 + l1;
312 h1 = (h1 << 3) + ((temp < l1) << 2);
315 h1 += (temp < l1) << 1;
325 encode (arg1, l1, h1);
326 encode (arg2, l2, h2);
328 bzero (prod, sizeof prod);
330 for (i = 0; i < MAX_SHORTS; i++)
331 for (j = 0; j < MAX_SHORTS; j++)
334 carry = arg1[i] * arg2[j];
338 prod[k] = carry & 0xff;
344 decode (prod, lv, hv); /* This ignores
345 prod[MAX_SHORTS] -> prod[MAX_SHORTS*2-1] */
347 /* Check for overflow by calculating the top half of the answer in full;
348 it should agree with the low half's sign bit. */
349 decode (prod+MAX_SHORTS, &toplow, &tophigh);
352 neg_double (l2, h2, &neglow, &neghigh);
353 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
357 neg_double (l1, h1, &neglow, &neghigh);
358 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
360 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
363 /* Shift the doubleword integer in L1, H1 left by COUNT places
364 keeping only PREC bits of result.
365 Shift right if COUNT is negative.
366 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
367 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
370 lshift_double (l1, h1, count, prec, lv, hv, arith)
371 HOST_WIDE_INT l1, h1, count;
373 HOST_WIDE_INT *lv, *hv;
376 short arg1[MAX_SHORTS];
382 rshift_double (l1, h1, - count, prec, lv, hv, arith);
386 encode (arg1, l1, h1);
394 for (i = 0; i < MAX_SHORTS; i++)
396 carry += arg1[i] << 1;
397 arg1[i] = carry & 0xff;
403 decode (arg1, lv, hv);
406 /* Shift the doubleword integer in L1, H1 right by COUNT places
407 keeping only PREC bits of result. COUNT must be positive.
408 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
409 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
412 rshift_double (l1, h1, count, prec, lv, hv, arith)
413 HOST_WIDE_INT l1, h1, count;
415 HOST_WIDE_INT *lv, *hv;
418 short arg1[MAX_SHORTS];
422 encode (arg1, l1, h1);
429 carry = arith && arg1[7] >> 7;
430 for (i = MAX_SHORTS - 1; i >= 0; i--)
434 arg1[i] = (carry >> 1) & 0xff;
439 decode (arg1, lv, hv);
442 /* Rotate the doubldword integer in L1, H1 left by COUNT places
443 keeping only PREC bits of result.
444 Rotate right if COUNT is negative.
445 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
448 lrotate_double (l1, h1, count, prec, lv, hv)
449 HOST_WIDE_INT l1, h1, count;
451 HOST_WIDE_INT *lv, *hv;
453 short arg1[MAX_SHORTS];
459 rrotate_double (l1, h1, - count, prec, lv, hv);
463 encode (arg1, l1, h1);
468 carry = arg1[MAX_SHORTS - 1] >> 7;
471 for (i = 0; i < MAX_SHORTS; i++)
473 carry += arg1[i] << 1;
474 arg1[i] = carry & 0xff;
480 decode (arg1, lv, hv);
483 /* Rotate the doubleword integer in L1, H1 left by COUNT places
484 keeping only PREC bits of result. COUNT must be positive.
485 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
488 rrotate_double (l1, h1, count, prec, lv, hv)
489 HOST_WIDE_INT l1, h1, count;
491 HOST_WIDE_INT *lv, *hv;
493 short arg1[MAX_SHORTS];
497 encode (arg1, l1, h1);
505 for (i = MAX_SHORTS - 1; i >= 0; i--)
509 arg1[i] = (carry >> 1) & 0xff;
514 decode (arg1, lv, hv);
517 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
518 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
519 CODE is a tree code for a kind of division, one of
520 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
522 It controls how the quotient is rounded to a integer.
523 Return nonzero if the operation overflows.
524 UNS nonzero says do unsigned division. */
527 div_and_round_double (code, uns,
528 lnum_orig, hnum_orig, lden_orig, hden_orig,
529 lquo, hquo, lrem, hrem)
532 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
533 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
534 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
537 short num[MAX_SHORTS + 1]; /* extra element for scaling. */
538 short den[MAX_SHORTS], quo[MAX_SHORTS];
539 register int i, j, work;
540 register int carry = 0;
541 HOST_WIDE_INT lnum = lnum_orig;
542 HOST_WIDE_INT hnum = hnum_orig;
543 HOST_WIDE_INT lden = lden_orig;
544 HOST_WIDE_INT hden = hden_orig;
547 if ((hden == 0) && (lden == 0))
550 /* calculate quotient sign and convert operands to unsigned. */
556 /* (minimum integer) / (-1) is the only overflow case. */
557 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
563 neg_double (lden, hden, &lden, &hden);
567 if (hnum == 0 && hden == 0)
568 { /* single precision */
570 /* This unsigned division rounds toward zero. */
571 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
576 { /* trivial case: dividend < divisor */
577 /* hden != 0 already checked. */
584 bzero (quo, sizeof quo);
586 bzero (num, sizeof num); /* to zero 9th element */
587 bzero (den, sizeof den);
589 encode (num, lnum, hnum);
590 encode (den, lden, hden);
592 /* This code requires more than just hden == 0.
593 We also have to require that we don't need more than three bytes
594 to hold CARRY. If we ever did need four bytes to hold it, we
595 would lose part of it when computing WORK on the next round. */
596 if (hden == 0 && (((unsigned HOST_WIDE_INT) lden << 8) >> 8) == lden)
597 { /* simpler algorithm */
598 /* hnum != 0 already checked. */
599 for (i = MAX_SHORTS - 1; i >= 0; i--)
601 work = num[i] + (carry << 8);
602 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
603 carry = work % (unsigned HOST_WIDE_INT) lden;
606 else { /* full double precision,
607 with thanks to Don Knuth's
608 "Seminumerical Algorithms". */
610 int quo_est, scale, num_hi_sig, den_hi_sig, quo_hi_sig;
612 /* Find the highest non-zero divisor digit. */
613 for (i = MAX_SHORTS - 1; ; i--)
618 for (i = MAX_SHORTS - 1; ; i--)
623 quo_hi_sig = num_hi_sig - den_hi_sig + 1;
625 /* Insure that the first digit of the divisor is at least BASE/2.
626 This is required by the quotient digit estimation algorithm. */
628 scale = BASE / (den[den_hi_sig] + 1);
629 if (scale > 1) { /* scale divisor and dividend */
631 for (i = 0; i <= MAX_SHORTS - 1; i++) {
632 work = (num[i] * scale) + carry;
633 num[i] = work & 0xff;
635 if (num[i] != 0) num_hi_sig = i;
638 for (i = 0; i <= MAX_SHORTS - 1; i++) {
639 work = (den[i] * scale) + carry;
640 den[i] = work & 0xff;
642 if (den[i] != 0) den_hi_sig = i;
647 for (i = quo_hi_sig; i > 0; i--) {
648 /* guess the next quotient digit, quo_est, by dividing the first
649 two remaining dividend digits by the high order quotient digit.
650 quo_est is never low and is at most 2 high. */
652 int num_hi; /* index of highest remaining dividend digit */
654 num_hi = i + den_hi_sig;
656 work = (num[num_hi] * BASE) + (num_hi > 0 ? num[num_hi - 1] : 0);
657 if (num[num_hi] != den[den_hi_sig]) {
658 quo_est = work / den[den_hi_sig];
664 /* refine quo_est so it's usually correct, and at most one high. */
665 while ((den[den_hi_sig - 1] * quo_est)
666 > (((work - (quo_est * den[den_hi_sig])) * BASE)
667 + ((num_hi - 1) > 0 ? num[num_hi - 2] : 0)))
670 /* Try QUO_EST as the quotient digit, by multiplying the
671 divisor by QUO_EST and subtracting from the remaining dividend.
672 Keep in mind that QUO_EST is the I - 1st digit. */
676 for (j = 0; j <= den_hi_sig; j++)
680 work = num[i + j - 1] - (quo_est * den[j]) + carry;
688 num[i + j - 1] = digit;
691 /* if quo_est was high by one, then num[i] went negative and
692 we need to correct things. */
697 carry = 0; /* add divisor back in */
698 for (j = 0; j <= den_hi_sig; j++)
700 work = num[i + j - 1] + den[j] + carry;
710 num[i + j - 1] = work;
712 num [num_hi] += carry;
715 /* store the quotient digit. */
716 quo[i - 1] = quo_est;
720 decode (quo, lquo, hquo);
723 /* if result is negative, make it so. */
725 neg_double (*lquo, *hquo, lquo, hquo);
727 /* compute trial remainder: rem = num - (quo * den) */
728 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
729 neg_double (*lrem, *hrem, lrem, hrem);
730 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
735 case TRUNC_MOD_EXPR: /* round toward zero */
736 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
740 case FLOOR_MOD_EXPR: /* round toward negative infinity */
741 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
744 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
747 else return overflow;
751 case CEIL_MOD_EXPR: /* round toward positive infinity */
752 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
754 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
757 else return overflow;
761 case ROUND_MOD_EXPR: /* round to closest integer */
763 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
764 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
766 /* get absolute values */
767 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
768 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
770 /* if (2 * abs (lrem) >= abs (lden)) */
771 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
772 labs_rem, habs_rem, <wice, &htwice);
773 if (((unsigned HOST_WIDE_INT) habs_den
774 < (unsigned HOST_WIDE_INT) htwice)
775 || (((unsigned HOST_WIDE_INT) habs_den
776 == (unsigned HOST_WIDE_INT) htwice)
777 && ((HOST_WIDE_INT unsigned) labs_den
778 < (unsigned HOST_WIDE_INT) ltwice)))
782 add_double (*lquo, *hquo,
783 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
786 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
789 else return overflow;
797 /* compute true remainder: rem = num - (quo * den) */
798 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
799 neg_double (*lrem, *hrem, lrem, hrem);
800 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
804 #ifndef REAL_ARITHMETIC
805 /* Effectively truncate a real value to represent
806 the nearest possible value in a narrower mode.
807 The result is actually represented in the same data type as the argument,
808 but its value is usually different. */
811 real_value_truncate (mode, arg)
812 enum machine_mode mode;
816 /* Make sure the value is actually stored in memory before we turn off
820 REAL_VALUE_TYPE value;
821 jmp_buf handler, old_handler;
824 if (setjmp (handler))
826 error ("floating overflow");
829 handled = push_float_handler (handler, old_handler);
830 value = REAL_VALUE_TRUNCATE (mode, arg);
831 pop_float_handler (handled, old_handler);
835 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
837 /* Check for infinity in an IEEE double precision number. */
843 /* The IEEE 64-bit double format. */
848 unsigned exponent : 11;
849 unsigned mantissa1 : 20;
854 unsigned mantissa1 : 20;
855 unsigned exponent : 11;
861 if (u.big_endian.sign == 1)
864 return (u.big_endian.exponent == 2047
865 && u.big_endian.mantissa1 == 0
866 && u.big_endian.mantissa2 == 0);
871 return (u.little_endian.exponent == 2047
872 && u.little_endian.mantissa1 == 0
873 && u.little_endian.mantissa2 == 0);
877 /* Check whether an IEEE double precision number is a NaN. */
883 /* The IEEE 64-bit double format. */
888 unsigned exponent : 11;
889 unsigned mantissa1 : 20;
894 unsigned mantissa1 : 20;
895 unsigned exponent : 11;
901 if (u.big_endian.sign == 1)
904 return (u.big_endian.exponent == 2047
905 && (u.big_endian.mantissa1 != 0
906 || u.big_endian.mantissa2 != 0));
911 return (u.little_endian.exponent == 2047
912 && (u.little_endian.mantissa1 != 0
913 || u.little_endian.mantissa2 != 0));
917 /* Check for a negative IEEE double precision number. */
923 /* The IEEE 64-bit double format. */
928 unsigned exponent : 11;
929 unsigned mantissa1 : 20;
934 unsigned mantissa1 : 20;
935 unsigned exponent : 11;
941 if (u.big_endian.sign == 1)
944 return u.big_endian.sign;
949 return u.little_endian.sign;
952 #else /* Target not IEEE */
954 /* Let's assume other float formats don't have infinity.
955 (This can be overridden by redefining REAL_VALUE_ISINF.) */
963 /* Let's assume other float formats don't have NaNs.
964 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
972 /* Let's assume other float formats don't have minus zero.
973 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
980 #endif /* Target not IEEE */
981 #endif /* no REAL_ARITHMETIC */
983 /* Split a tree IN into a constant and a variable part
984 that could be combined with CODE to make IN.
985 CODE must be a commutative arithmetic operation.
986 Store the constant part into *CONP and the variable in &VARP.
987 Return 1 if this was done; zero means the tree IN did not decompose
990 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
991 Therefore, we must tell the caller whether the variable part
992 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
993 The value stored is the coefficient for the variable term.
994 The constant term we return should always be added;
995 we negate it if necessary. */
998 split_tree (in, code, varp, conp, varsignp)
1000 enum tree_code code;
1004 register tree outtype = TREE_TYPE (in);
1008 /* Strip any conversions that don't change the machine mode. */
1009 while ((TREE_CODE (in) == NOP_EXPR
1010 || TREE_CODE (in) == CONVERT_EXPR)
1011 && (TYPE_MODE (TREE_TYPE (in))
1012 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
1013 in = TREE_OPERAND (in, 0);
1015 if (TREE_CODE (in) == code
1016 || (! FLOAT_TYPE_P (TREE_TYPE (in))
1017 /* We can associate addition and subtraction together
1018 (even though the C standard doesn't say so)
1019 for integers because the value is not affected.
1020 For reals, the value might be affected, so we can't. */
1021 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1022 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1024 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
1025 if (code == INTEGER_CST)
1027 *conp = TREE_OPERAND (in, 0);
1028 *varp = TREE_OPERAND (in, 1);
1029 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1030 && TREE_TYPE (*varp) != outtype)
1031 *varp = convert (outtype, *varp);
1032 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1035 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
1037 *conp = TREE_OPERAND (in, 1);
1038 *varp = TREE_OPERAND (in, 0);
1040 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1041 && TREE_TYPE (*varp) != outtype)
1042 *varp = convert (outtype, *varp);
1043 if (TREE_CODE (in) == MINUS_EXPR)
1045 /* If operation is subtraction and constant is second,
1046 must negate it to get an additive constant.
1047 And this cannot be done unless it is a manifest constant.
1048 It could also be the address of a static variable.
1049 We cannot negate that, so give up. */
1050 if (TREE_CODE (*conp) == INTEGER_CST)
1051 /* Subtracting from integer_zero_node loses for long long. */
1052 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
1058 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
1060 *conp = TREE_OPERAND (in, 0);
1061 *varp = TREE_OPERAND (in, 1);
1062 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1063 && TREE_TYPE (*varp) != outtype)
1064 *varp = convert (outtype, *varp);
1065 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1072 /* Combine two constants NUM and ARG2 under operation CODE
1073 to produce a new constant.
1074 We assume ARG1 and ARG2 have the same data type,
1075 or at least are the same kind of constant and the same machine mode.
1077 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1080 const_binop (code, arg1, arg2, notrunc)
1081 enum tree_code code;
1082 register tree arg1, arg2;
1085 if (TREE_CODE (arg1) == INTEGER_CST)
1087 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
1088 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
1089 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
1090 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
1091 HOST_WIDE_INT low, hi;
1092 HOST_WIDE_INT garbagel, garbageh;
1094 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1100 t = build_int_2 (int1l | int2l, int1h | int2h);
1104 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
1108 t = build_int_2 (int1l & int2l, int1h & int2h);
1111 case BIT_ANDTC_EXPR:
1112 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1118 /* It's unclear from the C standard whether shifts can overflow.
1119 The following code ignores overflow; perhaps a C standard
1120 interpretation ruling is needed. */
1121 lshift_double (int1l, int1h, int2l,
1122 TYPE_PRECISION (TREE_TYPE (arg1)),
1125 t = build_int_2 (low, hi);
1126 TREE_TYPE (t) = TREE_TYPE (arg1);
1128 force_fit_type (t, 0);
1129 TREE_CONSTANT_OVERFLOW (t)
1130 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1136 lrotate_double (int1l, int1h, int2l,
1137 TYPE_PRECISION (TREE_TYPE (arg1)),
1139 t = build_int_2 (low, hi);
1146 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1149 overflow = int2h < hi;
1151 t = build_int_2 (int2l, int2h);
1157 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1160 overflow = int1h < hi;
1162 t = build_int_2 (int1l, int1h);
1165 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1166 t = build_int_2 (low, hi);
1170 if (int2h == 0 && int2l == 0)
1172 t = build_int_2 (int1l, int1h);
1175 neg_double (int2l, int2h, &low, &hi);
1176 add_double (int1l, int1h, low, hi, &low, &hi);
1177 overflow = overflow_sum_sign (hi, int2h, int1h);
1178 t = build_int_2 (low, hi);
1182 /* Optimize simple cases. */
1185 unsigned HOST_WIDE_INT temp;
1190 t = build_int_2 (0, 0);
1193 t = build_int_2 (int2l, int2h);
1196 overflow = left_shift_overflows (int2h, 1);
1197 temp = int2l + int2l;
1198 int2h = (int2h << 1) + (temp < int2l);
1199 t = build_int_2 (temp, int2h);
1201 #if 0 /* This code can lose carries. */
1203 temp = int2l + int2l + int2l;
1204 int2h = int2h * 3 + (temp < int2l);
1205 t = build_int_2 (temp, int2h);
1209 overflow = left_shift_overflows (int2h, 2);
1210 temp = int2l + int2l;
1211 int2h = (int2h << 2) + ((temp < int2l) << 1);
1214 int2h += (temp < int2l);
1215 t = build_int_2 (temp, int2h);
1218 overflow = left_shift_overflows (int2h, 3);
1219 temp = int2l + int2l;
1220 int2h = (int2h << 3) + ((temp < int2l) << 2);
1223 int2h += (temp < int2l) << 1;
1226 int2h += (temp < int2l);
1227 t = build_int_2 (temp, int2h);
1238 t = build_int_2 (0, 0);
1243 t = build_int_2 (int1l, int1h);
1248 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1249 t = build_int_2 (low, hi);
1252 case TRUNC_DIV_EXPR:
1253 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1254 case EXACT_DIV_EXPR:
1255 /* This is a shortcut for a common special case.
1256 It reduces the number of tree nodes generated
1258 if (int2h == 0 && int2l > 0
1259 && TREE_TYPE (arg1) == sizetype
1260 && int1h == 0 && int1l >= 0)
1262 if (code == CEIL_DIV_EXPR)
1264 return size_int (int1l / int2l);
1266 case ROUND_DIV_EXPR:
1267 if (int2h == 0 && int2l == 1)
1269 t = build_int_2 (int1l, int1h);
1272 if (int1l == int2l && int1h == int2h)
1274 if ((int1l | int1h) == 0)
1276 t = build_int_2 (1, 0);
1279 overflow = div_and_round_double (code, uns,
1280 int1l, int1h, int2l, int2h,
1281 &low, &hi, &garbagel, &garbageh);
1282 t = build_int_2 (low, hi);
1285 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1286 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1287 overflow = div_and_round_double (code, uns,
1288 int1l, int1h, int2l, int2h,
1289 &garbagel, &garbageh, &low, &hi);
1290 t = build_int_2 (low, hi);
1297 low = (((unsigned HOST_WIDE_INT) int1h
1298 < (unsigned HOST_WIDE_INT) int2h)
1299 || (((unsigned HOST_WIDE_INT) int1h
1300 == (unsigned HOST_WIDE_INT) int2h)
1301 && ((unsigned HOST_WIDE_INT) int1l
1302 < (unsigned HOST_WIDE_INT) int2l)));
1306 low = ((int1h < int2h)
1307 || ((int1h == int2h)
1308 && ((unsigned HOST_WIDE_INT) int1l
1309 < (unsigned HOST_WIDE_INT) int2l)));
1311 if (low == (code == MIN_EXPR))
1312 t = build_int_2 (int1l, int1h);
1314 t = build_int_2 (int2l, int2h);
1321 TREE_TYPE (t) = TREE_TYPE (arg1);
1322 TREE_CONSTANT_OVERFLOW (t)
1323 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow))
1324 | TREE_CONSTANT_OVERFLOW (arg1)
1325 | TREE_CONSTANT_OVERFLOW (arg2));
1328 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1329 if (TREE_CODE (arg1) == REAL_CST)
1333 REAL_VALUE_TYPE value;
1336 d1 = TREE_REAL_CST (arg1);
1337 d2 = TREE_REAL_CST (arg2);
1338 if (setjmp (float_error))
1340 pedwarn ("floating overflow in constant expression");
1341 return build (code, TREE_TYPE (arg1), arg1, arg2);
1343 set_float_handler (float_error);
1345 #ifdef REAL_ARITHMETIC
1346 REAL_ARITHMETIC (value, code, d1, d2);
1363 #ifndef REAL_INFINITY
1372 value = MIN (d1, d2);
1376 value = MAX (d1, d2);
1382 #endif /* no REAL_ARITHMETIC */
1383 t = build_real (TREE_TYPE (arg1),
1384 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1385 set_float_handler (NULL_PTR);
1388 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1389 if (TREE_CODE (arg1) == COMPLEX_CST)
1391 register tree r1 = TREE_REALPART (arg1);
1392 register tree i1 = TREE_IMAGPART (arg1);
1393 register tree r2 = TREE_REALPART (arg2);
1394 register tree i2 = TREE_IMAGPART (arg2);
1400 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1401 const_binop (PLUS_EXPR, i1, i2, notrunc));
1405 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1406 const_binop (MINUS_EXPR, i1, i2, notrunc));
1410 t = build_complex (const_binop (MINUS_EXPR,
1411 const_binop (MULT_EXPR,
1413 const_binop (MULT_EXPR,
1416 const_binop (PLUS_EXPR,
1417 const_binop (MULT_EXPR,
1419 const_binop (MULT_EXPR,
1426 register tree magsquared
1427 = const_binop (PLUS_EXPR,
1428 const_binop (MULT_EXPR, r2, r2, notrunc),
1429 const_binop (MULT_EXPR, i2, i2, notrunc),
1431 t = build_complex (const_binop (RDIV_EXPR,
1432 const_binop (PLUS_EXPR,
1433 const_binop (MULT_EXPR, r1, r2, notrunc),
1434 const_binop (MULT_EXPR, i1, i2, notrunc),
1436 magsquared, notrunc),
1437 const_binop (RDIV_EXPR,
1438 const_binop (MINUS_EXPR,
1439 const_binop (MULT_EXPR, i1, r2, notrunc),
1440 const_binop (MULT_EXPR, r1, i2, notrunc),
1442 magsquared, notrunc));
1449 TREE_TYPE (t) = TREE_TYPE (arg1);
1455 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1459 unsigned int number;
1462 /* Type-size nodes already made for small sizes. */
1463 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1465 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1466 && size_table[number] != 0)
1467 return size_table[number];
1468 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1470 push_obstacks_nochange ();
1471 /* Make this a permanent node. */
1472 end_temporary_allocation ();
1473 t = build_int_2 (number, 0);
1474 TREE_TYPE (t) = sizetype;
1475 size_table[number] = t;
1480 t = build_int_2 (number, 0);
1481 TREE_TYPE (t) = sizetype;
1486 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1487 CODE is a tree code. Data type is taken from `sizetype',
1488 If the operands are constant, so is the result. */
1491 size_binop (code, arg0, arg1)
1492 enum tree_code code;
1495 /* Handle the special case of two integer constants faster. */
1496 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1498 /* And some specific cases even faster than that. */
1499 if (code == PLUS_EXPR
1500 && TREE_INT_CST_LOW (arg0) == 0
1501 && TREE_INT_CST_HIGH (arg0) == 0)
1503 if (code == MINUS_EXPR
1504 && TREE_INT_CST_LOW (arg1) == 0
1505 && TREE_INT_CST_HIGH (arg1) == 0)
1507 if (code == MULT_EXPR
1508 && TREE_INT_CST_LOW (arg0) == 1
1509 && TREE_INT_CST_HIGH (arg0) == 0)
1511 /* Handle general case of two integer constants. */
1512 return const_binop (code, arg0, arg1, 1);
1515 if (arg0 == error_mark_node || arg1 == error_mark_node)
1516 return error_mark_node;
1518 return fold (build (code, sizetype, arg0, arg1));
1521 /* Given T, a tree representing type conversion of ARG1, a constant,
1522 return a constant tree representing the result of conversion. */
1525 fold_convert (t, arg1)
1529 register tree type = TREE_TYPE (t);
1531 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1533 if (TREE_CODE (arg1) == INTEGER_CST)
1535 /* Given an integer constant, make new constant with new type,
1536 appropriately sign-extended or truncated. */
1537 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1538 TREE_INT_CST_HIGH (arg1));
1539 TREE_TYPE (t) = type;
1540 /* Indicate an overflow if (1) ARG1 already overflowed,
1541 or (2) ARG1 is a too-large unsigned value and T is signed,
1542 or (3) force_fit_type indicates an overflow.
1543 force_fit_type can't detect (2), since it sees only T's type. */
1544 TREE_CONSTANT_OVERFLOW (t) =
1545 (TREE_CONSTANT_OVERFLOW (arg1)
1546 | (TREE_INT_CST_HIGH (arg1) < 0
1547 & TREE_UNSIGNED (type) < TREE_UNSIGNED (TREE_TYPE (arg1)))
1548 | force_fit_type (t, 0));
1550 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1551 else if (TREE_CODE (arg1) == REAL_CST)
1553 REAL_VALUE_TYPE l, x, u;
1555 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1556 x = TREE_REAL_CST (arg1);
1557 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1559 /* See if X will be in range after truncation towards 0.
1560 To compensate for truncation, move the bounds away from 0,
1561 but reject if X exactly equals the adjusted bounds. */
1562 #ifdef REAL_ARITHMETIC
1563 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1564 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1569 if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1571 pedwarn ("real constant out of range for integer conversion");
1574 #ifndef REAL_ARITHMETIC
1577 HOST_WIDE_INT low, high;
1578 HOST_WIDE_INT half_word
1579 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1581 d = TREE_REAL_CST (arg1);
1585 high = (HOST_WIDE_INT) (d / half_word / half_word);
1586 d -= (REAL_VALUE_TYPE) high * half_word * half_word;
1587 if (d >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1589 low = d - (REAL_VALUE_TYPE) half_word * half_word / 2;
1590 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1593 low = (HOST_WIDE_INT) d;
1594 if (TREE_REAL_CST (arg1) < 0)
1595 neg_double (low, high, &low, &high);
1596 t = build_int_2 (low, high);
1600 HOST_WIDE_INT low, high;
1601 REAL_VALUE_TO_INT (&low, &high, (TREE_REAL_CST (arg1)));
1602 t = build_int_2 (low, high);
1605 TREE_TYPE (t) = type;
1606 force_fit_type (t, 0);
1608 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1609 TREE_TYPE (t) = type;
1611 else if (TREE_CODE (type) == REAL_TYPE)
1613 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1614 if (TREE_CODE (arg1) == INTEGER_CST)
1615 return build_real_from_int_cst (type, arg1);
1616 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1617 if (TREE_CODE (arg1) == REAL_CST)
1619 if (setjmp (float_error))
1621 pedwarn ("floating overflow in constant expression");
1624 set_float_handler (float_error);
1626 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1627 TREE_REAL_CST (arg1)));
1628 set_float_handler (NULL_PTR);
1632 TREE_CONSTANT (t) = 1;
1636 /* Return an expr equal to X but certainly not valid as an lvalue.
1637 Also make sure it is not valid as an null pointer constant. */
1645 /* These things are certainly not lvalues. */
1646 if (TREE_CODE (x) == NON_LVALUE_EXPR
1647 || TREE_CODE (x) == INTEGER_CST
1648 || TREE_CODE (x) == REAL_CST
1649 || TREE_CODE (x) == STRING_CST
1650 || TREE_CODE (x) == ADDR_EXPR)
1652 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1654 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1655 so convert_for_assignment won't strip it.
1656 This is so this 0 won't be treated as a null pointer constant. */
1657 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1658 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1664 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1665 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1669 /* Given a tree comparison code, return the code that is the logical inverse
1670 of the given code. It is not safe to do this for floating-point
1671 comparisons, except for NE_EXPR and EQ_EXPR. */
1673 static enum tree_code
1674 invert_tree_comparison (code)
1675 enum tree_code code;
1696 /* Similar, but return the comparison that results if the operands are
1697 swapped. This is safe for floating-point. */
1699 static enum tree_code
1700 swap_tree_comparison (code)
1701 enum tree_code code;
1721 /* Return nonzero if two operands are necessarily equal.
1722 If ONLY_CONST is non-zero, only return non-zero for constants.
1723 This function tests whether the operands are indistinguishable;
1724 it does not test whether they are equal using C's == operation.
1725 The distinction is important for IEEE floating point, because
1726 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1727 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1730 operand_equal_p (arg0, arg1, only_const)
1734 /* If both types don't have the same signedness, then we can't consider
1735 them equal. We must check this before the STRIP_NOPS calls
1736 because they may change the signedness of the arguments. */
1737 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1743 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1744 We don't care about side effects in that case because the SAVE_EXPR
1745 takes care of that for us. */
1746 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1747 return ! only_const;
1749 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1752 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1753 && TREE_CODE (arg0) == ADDR_EXPR
1754 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1757 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1758 && TREE_CODE (arg0) == INTEGER_CST
1759 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1760 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1763 /* Detect when real constants are equal. */
1764 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1765 && TREE_CODE (arg0) == REAL_CST)
1766 return !bcmp (&TREE_REAL_CST (arg0), &TREE_REAL_CST (arg1),
1767 sizeof (REAL_VALUE_TYPE));
1775 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1777 /* This is needed for conversions and for COMPONENT_REF.
1778 Might as well play it safe and always test this. */
1779 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1782 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1785 /* Two conversions are equal only if signedness and modes match. */
1786 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1787 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1788 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1791 return operand_equal_p (TREE_OPERAND (arg0, 0),
1792 TREE_OPERAND (arg1, 0), 0);
1796 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1797 TREE_OPERAND (arg1, 0), 0)
1798 && operand_equal_p (TREE_OPERAND (arg0, 1),
1799 TREE_OPERAND (arg1, 1), 0));
1802 switch (TREE_CODE (arg0))
1805 return operand_equal_p (TREE_OPERAND (arg0, 0),
1806 TREE_OPERAND (arg1, 0), 0);
1810 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1811 TREE_OPERAND (arg1, 0), 0)
1812 && operand_equal_p (TREE_OPERAND (arg0, 1),
1813 TREE_OPERAND (arg1, 1), 0));
1816 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1817 TREE_OPERAND (arg1, 0), 0)
1818 && operand_equal_p (TREE_OPERAND (arg0, 1),
1819 TREE_OPERAND (arg1, 1), 0)
1820 && operand_equal_p (TREE_OPERAND (arg0, 2),
1821 TREE_OPERAND (arg1, 2), 0));
1829 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1830 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1832 When in doubt, return 0. */
1835 operand_equal_for_comparison_p (arg0, arg1, other)
1839 int unsignedp1, unsignedpo;
1840 tree primarg1, primother;
1843 if (operand_equal_p (arg0, arg1, 0))
1846 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
1849 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1850 actual comparison operand, ARG0.
1852 First throw away any conversions to wider types
1853 already present in the operands. */
1855 primarg1 = get_narrower (arg1, &unsignedp1);
1856 primother = get_narrower (other, &unsignedpo);
1858 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1859 if (unsignedp1 == unsignedpo
1860 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1861 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1863 tree type = TREE_TYPE (arg0);
1865 /* Make sure shorter operand is extended the right way
1866 to match the longer operand. */
1867 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1868 TREE_TYPE (primarg1)),
1871 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1878 /* See if ARG is an expression that is either a comparison or is performing
1879 arithmetic on comparisons. The comparisons must only be comparing
1880 two different values, which will be stored in *CVAL1 and *CVAL2; if
1881 they are non-zero it means that some operands have already been found.
1882 No variables may be used anywhere else in the expression except in the
1885 If this is true, return 1. Otherwise, return zero. */
1888 twoval_comparison_p (arg, cval1, cval2)
1890 tree *cval1, *cval2;
1892 enum tree_code code = TREE_CODE (arg);
1893 char class = TREE_CODE_CLASS (code);
1895 /* We can handle some of the 'e' cases here. */
1897 && (code == TRUTH_NOT_EXPR
1898 || (code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)))
1900 else if (class == 'e'
1901 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1902 || code == COMPOUND_EXPR))
1908 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2);
1911 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2)
1912 && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2));
1918 if (code == COND_EXPR)
1919 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2)
1920 && twoval_comparison_p (TREE_OPERAND (arg, 1), cval1, cval2)
1921 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1926 /* First see if we can handle the first operand, then the second. For
1927 the second operand, we know *CVAL1 can't be zero. It must be that
1928 one side of the comparison is each of the values; test for the
1929 case where this isn't true by failing if the two operands
1932 if (operand_equal_p (TREE_OPERAND (arg, 0),
1933 TREE_OPERAND (arg, 1), 0))
1937 *cval1 = TREE_OPERAND (arg, 0);
1938 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1940 else if (*cval2 == 0)
1941 *cval2 = TREE_OPERAND (arg, 0);
1942 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1947 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1949 else if (*cval2 == 0)
1950 *cval2 = TREE_OPERAND (arg, 1);
1951 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1962 /* ARG is a tree that is known to contain just arithmetic operations and
1963 comparisons. Evaluate the operations in the tree substituting NEW0 for
1964 any occurrence of OLD0 as an operand of a comparison and likewise for
1968 eval_subst (arg, old0, new0, old1, new1)
1970 tree old0, new0, old1, new1;
1972 tree type = TREE_TYPE (arg);
1973 enum tree_code code = TREE_CODE (arg);
1974 char class = TREE_CODE_CLASS (code);
1976 /* We can handle some of the 'e' cases here. */
1977 if (class == 'e' && code == TRUTH_NOT_EXPR)
1979 else if (class == 'e'
1980 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1986 return fold (build1 (code, type,
1987 eval_subst (TREE_OPERAND (arg, 0),
1988 old0, new0, old1, new1)));
1991 return fold (build (code, type,
1992 eval_subst (TREE_OPERAND (arg, 0),
1993 old0, new0, old1, new1),
1994 eval_subst (TREE_OPERAND (arg, 1),
1995 old0, new0, old1, new1)));
2001 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
2004 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
2007 return fold (build (code, type,
2008 eval_subst (TREE_OPERAND (arg, 0),
2009 old0, new0, old1, new1),
2010 eval_subst (TREE_OPERAND (arg, 1),
2011 old0, new0, old1, new1),
2012 eval_subst (TREE_OPERAND (arg, 2),
2013 old0, new0, old1, new1)));
2018 tree arg0 = TREE_OPERAND (arg, 0);
2019 tree arg1 = TREE_OPERAND (arg, 1);
2021 /* We need to check both for exact equality and tree equality. The
2022 former will be true if the operand has a side-effect. In that
2023 case, we know the operand occurred exactly once. */
2025 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
2027 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
2030 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2032 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2035 return fold (build (code, type, arg0, arg1));
2042 /* Return a tree for the case when the result of an expression is RESULT
2043 converted to TYPE and OMITTED was previously an operand of the expression
2044 but is now not needed (e.g., we folded OMITTED * 0).
2046 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2047 the conversion of RESULT to TYPE. */
2050 omit_one_operand (type, result, omitted)
2051 tree type, result, omitted;
2053 tree t = convert (type, result);
2055 if (TREE_SIDE_EFFECTS (omitted))
2056 return build (COMPOUND_EXPR, type, omitted, t);
2058 return non_lvalue (t);
2061 /* Return a simplified tree node for the truth-negation of ARG. This
2062 never alters ARG itself. We assume that ARG is an operation that
2063 returns a truth value (0 or 1). */
2066 invert_truthvalue (arg)
2069 tree type = TREE_TYPE (arg);
2070 enum tree_code code = TREE_CODE (arg);
2072 /* If this is a comparison, we can simply invert it, except for
2073 floating-point non-equality comparisons, in which case we just
2074 enclose a TRUTH_NOT_EXPR around what we have. */
2076 if (TREE_CODE_CLASS (code) == '<')
2078 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2079 && code != NE_EXPR && code != EQ_EXPR)
2080 return build1 (TRUTH_NOT_EXPR, type, arg);
2082 return build (invert_tree_comparison (code), type,
2083 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2089 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2090 && TREE_INT_CST_HIGH (arg) == 0, 0));
2092 case TRUTH_AND_EXPR:
2093 return build (TRUTH_OR_EXPR, type,
2094 invert_truthvalue (TREE_OPERAND (arg, 0)),
2095 invert_truthvalue (TREE_OPERAND (arg, 1)));
2098 return build (TRUTH_AND_EXPR, type,
2099 invert_truthvalue (TREE_OPERAND (arg, 0)),
2100 invert_truthvalue (TREE_OPERAND (arg, 1)));
2102 case TRUTH_XOR_EXPR:
2103 /* Here we can invert either operand. We invert the first operand
2104 unless the second operand is a TRUTH_NOT_EXPR in which case our
2105 result is the XOR of the first operand with the inside of the
2106 negation of the second operand. */
2108 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2109 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2110 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2112 return build (TRUTH_XOR_EXPR, type,
2113 invert_truthvalue (TREE_OPERAND (arg, 0)),
2114 TREE_OPERAND (arg, 1));
2116 case TRUTH_ANDIF_EXPR:
2117 return build (TRUTH_ORIF_EXPR, type,
2118 invert_truthvalue (TREE_OPERAND (arg, 0)),
2119 invert_truthvalue (TREE_OPERAND (arg, 1)));
2121 case TRUTH_ORIF_EXPR:
2122 return build (TRUTH_ANDIF_EXPR, type,
2123 invert_truthvalue (TREE_OPERAND (arg, 0)),
2124 invert_truthvalue (TREE_OPERAND (arg, 1)));
2126 case TRUTH_NOT_EXPR:
2127 return TREE_OPERAND (arg, 0);
2130 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2131 invert_truthvalue (TREE_OPERAND (arg, 1)),
2132 invert_truthvalue (TREE_OPERAND (arg, 2)));
2135 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2136 invert_truthvalue (TREE_OPERAND (arg, 1)));
2138 case NON_LVALUE_EXPR:
2139 return invert_truthvalue (TREE_OPERAND (arg, 0));
2144 return build1 (TREE_CODE (arg), type,
2145 invert_truthvalue (TREE_OPERAND (arg, 0)));
2148 if (! integer_onep (TREE_OPERAND (arg, 1)))
2150 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2156 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2157 operands are another bit-wise operation with a common input. If so,
2158 distribute the bit operations to save an operation and possibly two if
2159 constants are involved. For example, convert
2160 (A | B) & (A | C) into A | (B & C)
2161 Further simplification will occur if B and C are constants.
2163 If this optimization cannot be done, 0 will be returned. */
2166 distribute_bit_expr (code, type, arg0, arg1)
2167 enum tree_code code;
2174 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2175 || TREE_CODE (arg0) == code
2176 || (TREE_CODE (arg0) != BIT_AND_EXPR
2177 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2180 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2182 common = TREE_OPERAND (arg0, 0);
2183 left = TREE_OPERAND (arg0, 1);
2184 right = TREE_OPERAND (arg1, 1);
2186 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2188 common = TREE_OPERAND (arg0, 0);
2189 left = TREE_OPERAND (arg0, 1);
2190 right = TREE_OPERAND (arg1, 0);
2192 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2194 common = TREE_OPERAND (arg0, 1);
2195 left = TREE_OPERAND (arg0, 0);
2196 right = TREE_OPERAND (arg1, 1);
2198 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2200 common = TREE_OPERAND (arg0, 1);
2201 left = TREE_OPERAND (arg0, 0);
2202 right = TREE_OPERAND (arg1, 0);
2207 return fold (build (TREE_CODE (arg0), type, common,
2208 fold (build (code, type, left, right))));
2211 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2212 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2215 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2218 int bitsize, bitpos;
2221 tree result = build (BIT_FIELD_REF, type, inner,
2222 size_int (bitsize), size_int (bitpos));
2224 TREE_UNSIGNED (result) = unsignedp;
2229 /* Optimize a bit-field compare.
2231 There are two cases: First is a compare against a constant and the
2232 second is a comparison of two items where the fields are at the same
2233 bit position relative to the start of a chunk (byte, halfword, word)
2234 large enough to contain it. In these cases we can avoid the shift
2235 implicit in bitfield extractions.
2237 For constants, we emit a compare of the shifted constant with the
2238 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2239 compared. For two fields at the same position, we do the ANDs with the
2240 similar mask and compare the result of the ANDs.
2242 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2243 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2244 are the left and right operands of the comparison, respectively.
2246 If the optimization described above can be done, we return the resulting
2247 tree. Otherwise we return zero. */
2250 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2251 enum tree_code code;
2255 int lbitpos, lbitsize, rbitpos, rbitsize;
2256 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2257 tree type = TREE_TYPE (lhs);
2258 tree signed_type, unsigned_type;
2259 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2260 enum machine_mode lmode, rmode, lnmode, rnmode;
2261 int lunsignedp, runsignedp;
2262 int lvolatilep = 0, rvolatilep = 0;
2263 tree linner, rinner;
2267 /* Get all the information about the extractions being done. If the bit size
2268 if the same as the size of the underlying object, we aren't doing an
2269 extraction at all and so can do nothing. */
2270 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2271 &lunsignedp, &lvolatilep);
2272 if (lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2278 /* If this is not a constant, we can only do something if bit positions,
2279 sizes, and signedness are the same. */
2280 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2281 &rmode, &runsignedp, &rvolatilep);
2283 if (lbitpos != rbitpos || lbitsize != rbitsize
2284 || lunsignedp != runsignedp || offset != 0)
2288 /* See if we can find a mode to refer to this field. We should be able to,
2289 but fail if we can't. */
2290 lnmode = get_best_mode (lbitsize, lbitpos,
2291 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2293 if (lnmode == VOIDmode)
2296 /* Set signed and unsigned types of the precision of this mode for the
2298 signed_type = type_for_mode (lnmode, 0);
2299 unsigned_type = type_for_mode (lnmode, 1);
2303 rnmode = get_best_mode (rbitsize, rbitpos,
2304 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2306 if (rnmode == VOIDmode)
2310 /* Compute the bit position and size for the new reference and our offset
2311 within it. If the new reference is the same size as the original, we
2312 won't optimize anything, so return zero. */
2313 lnbitsize = GET_MODE_BITSIZE (lnmode);
2314 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2315 lbitpos -= lnbitpos;
2316 if (lnbitsize == lbitsize)
2321 rnbitsize = GET_MODE_BITSIZE (rnmode);
2322 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2323 rbitpos -= rnbitpos;
2324 if (rnbitsize == rbitsize)
2328 #if BYTES_BIG_ENDIAN
2329 lbitpos = lnbitsize - lbitsize - lbitpos;
2332 /* Make the mask to be used against the extracted field. */
2333 mask = build_int_2 (~0, ~0);
2334 TREE_TYPE (mask) = unsigned_type;
2335 force_fit_type (mask, 0);
2336 mask = convert (unsigned_type, mask);
2337 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2338 mask = const_binop (RSHIFT_EXPR, mask,
2339 size_int (lnbitsize - lbitsize - lbitpos), 0);
2342 /* If not comparing with constant, just rework the comparison
2344 return build (code, compare_type,
2345 build (BIT_AND_EXPR, unsigned_type,
2346 make_bit_field_ref (linner, unsigned_type,
2347 lnbitsize, lnbitpos, 1),
2349 build (BIT_AND_EXPR, unsigned_type,
2350 make_bit_field_ref (rinner, unsigned_type,
2351 rnbitsize, rnbitpos, 1),
2354 /* Otherwise, we are handling the constant case. See if the constant is too
2355 big for the field. Warn and return a tree of for 0 (false) if so. We do
2356 this not only for its own sake, but to avoid having to test for this
2357 error case below. If we didn't, we might generate wrong code.
2359 For unsigned fields, the constant shifted right by the field length should
2360 be all zero. For signed fields, the high-order bits should agree with
2365 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2366 convert (unsigned_type, rhs),
2367 size_int (lbitsize), 0)))
2369 warning ("comparison is always %s due to width of bitfield",
2370 code == NE_EXPR ? "one" : "zero");
2371 return convert (compare_type,
2373 ? integer_one_node : integer_zero_node));
2378 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2379 size_int (lbitsize - 1), 0);
2380 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2382 warning ("comparison is always %s due to width of bitfield",
2383 code == NE_EXPR ? "one" : "zero");
2384 return convert (compare_type,
2386 ? integer_one_node : integer_zero_node));
2390 /* Single-bit compares should always be against zero. */
2391 if (lbitsize == 1 && ! integer_zerop (rhs))
2393 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2394 rhs = convert (type, integer_zero_node);
2397 /* Make a new bitfield reference, shift the constant over the
2398 appropriate number of bits and mask it with the computed mask
2399 (in case this was a signed field). If we changed it, make a new one. */
2400 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2402 rhs = fold (const_binop (BIT_AND_EXPR,
2403 const_binop (LSHIFT_EXPR,
2404 convert (unsigned_type, rhs),
2405 size_int (lbitpos), 0),
2408 return build (code, compare_type,
2409 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2413 /* Subroutine for fold_truthop: decode a field reference.
2415 If EXP is a comparison reference, we return the innermost reference.
2417 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2418 set to the starting bit number.
2420 If the innermost field can be completely contained in a mode-sized
2421 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2423 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2424 otherwise it is not changed.
2426 *PUNSIGNEDP is set to the signedness of the field.
2428 *PMASK is set to the mask used. This is either contained in a
2429 BIT_AND_EXPR or derived from the width of the field.
2431 Return 0 if this is not a component reference or is one that we can't
2432 do anything with. */
2435 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2438 int *pbitsize, *pbitpos;
2439 enum machine_mode *pmode;
2440 int *punsignedp, *pvolatilep;
2447 /* All the optimizations using this function assume integer fields.
2448 There are problems with FP fields since the type_for_size call
2449 below can fail for, e.g., XFmode. */
2450 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2455 if (TREE_CODE (exp) == BIT_AND_EXPR)
2457 mask = TREE_OPERAND (exp, 1);
2458 exp = TREE_OPERAND (exp, 0);
2459 STRIP_NOPS (exp); STRIP_NOPS (mask);
2460 if (TREE_CODE (mask) != INTEGER_CST)
2464 if (TREE_CODE (exp) != COMPONENT_REF && TREE_CODE (exp) != ARRAY_REF
2465 && TREE_CODE (exp) != BIT_FIELD_REF)
2468 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2469 punsignedp, pvolatilep);
2470 if (*pbitsize < 0 || offset != 0)
2475 tree unsigned_type = type_for_size (*pbitsize, 1);
2476 int precision = TYPE_PRECISION (unsigned_type);
2478 mask = build_int_2 (~0, ~0);
2479 TREE_TYPE (mask) = unsigned_type;
2480 force_fit_type (mask, 0);
2481 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2482 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2489 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2493 all_ones_mask_p (mask, size)
2497 tree type = TREE_TYPE (mask);
2498 int precision = TYPE_PRECISION (type);
2501 tmask = build_int_2 (~0, ~0);
2502 TREE_TYPE (tmask) = signed_type (type);
2503 force_fit_type (tmask, 0);
2505 operand_equal_p (mask,
2506 const_binop (RSHIFT_EXPR,
2507 const_binop (LSHIFT_EXPR, tmask,
2508 size_int (precision - size), 0),
2509 size_int (precision - size), 0),
2513 /* Subroutine for fold_truthop: determine if an operand is simple enough
2514 to be evaluated unconditionally. */
2517 simple_operand_p (exp)
2520 /* Strip any conversions that don't change the machine mode. */
2521 while ((TREE_CODE (exp) == NOP_EXPR
2522 || TREE_CODE (exp) == CONVERT_EXPR)
2523 && (TYPE_MODE (TREE_TYPE (exp))
2524 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2525 exp = TREE_OPERAND (exp, 0);
2527 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2528 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2529 && ! TREE_ADDRESSABLE (exp)
2530 && ! TREE_THIS_VOLATILE (exp)
2531 && ! DECL_NONLOCAL (exp)
2532 /* Don't regard global variables as simple. They may be
2533 allocated in ways unknown to the compiler (shared memory,
2534 #pragma weak, etc). */
2535 && ! TREE_PUBLIC (exp)
2536 && ! DECL_EXTERNAL (exp)
2537 /* Loading a static variable is unduly expensive, but global
2538 registers aren't expensive. */
2539 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2542 /* Subroutine for fold_truthop: try to optimize a range test.
2544 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2546 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2547 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2548 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2551 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2552 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2553 larger than HI_CST (they may be equal).
2555 We return the simplified tree or 0 if no optimization is possible. */
2558 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2559 enum tree_code jcode, lo_code, hi_code;
2560 tree type, var, lo_cst, hi_cst;
2563 enum tree_code rcode;
2565 /* See if this is a range test and normalize the constant terms. */
2567 if (jcode == TRUTH_AND_EXPR)
2572 /* See if we have VAR != CST && VAR != CST+1. */
2573 if (! (hi_code == NE_EXPR
2574 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2575 && tree_int_cst_equal (integer_one_node,
2576 const_binop (MINUS_EXPR,
2577 hi_cst, lo_cst, 0))))
2585 if (hi_code == LT_EXPR)
2586 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2587 else if (hi_code != LE_EXPR)
2590 if (lo_code == GT_EXPR)
2591 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2593 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2606 /* See if we have VAR == CST || VAR == CST+1. */
2607 if (! (hi_code == EQ_EXPR
2608 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2609 && tree_int_cst_equal (integer_one_node,
2610 const_binop (MINUS_EXPR,
2611 hi_cst, lo_cst, 0))))
2619 if (hi_code == GE_EXPR)
2620 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2621 else if (hi_code != GT_EXPR)
2624 if (lo_code == LE_EXPR)
2625 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2627 /* We now have VAR < LO_CST || VAR > HI_CST. */
2636 /* When normalizing, it is possible to both increment the smaller constant
2637 and decrement the larger constant. See if they are still ordered. */
2638 if (tree_int_cst_lt (hi_cst, lo_cst))
2641 /* Fail if VAR isn't an integer. */
2642 utype = TREE_TYPE (var);
2643 if (! INTEGRAL_TYPE_P (utype))
2646 /* The range test is invalid if subtracting the two constants results
2647 in overflow. This can happen in traditional mode. */
2648 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2649 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2652 if (! TREE_UNSIGNED (utype))
2654 utype = unsigned_type (utype);
2655 var = convert (utype, var);
2656 lo_cst = convert (utype, lo_cst);
2657 hi_cst = convert (utype, hi_cst);
2660 return fold (convert (type,
2661 build (rcode, utype,
2662 build (MINUS_EXPR, utype, var, lo_cst),
2663 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2666 /* Find ways of folding logical expressions of LHS and RHS:
2667 Try to merge two comparisons to the same innermost item.
2668 Look for range tests like "ch >= '0' && ch <= '9'".
2669 Look for combinations of simple terms on machines with expensive branches
2670 and evaluate the RHS unconditionally.
2672 For example, if we have p->a == 2 && p->b == 4 and we can make an
2673 object large enough to span both A and B, we can do this with a comparison
2674 against the object ANDed with the a mask.
2676 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2677 operations to do this with one comparison.
2679 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2680 function and the one above.
2682 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2683 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2685 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2688 We return the simplified tree or 0 if no optimization is possible. */
2691 fold_truthop (code, truth_type, lhs, rhs)
2692 enum tree_code code;
2693 tree truth_type, lhs, rhs;
2695 /* If this is the "or" of two comparisons, we can do something if we
2696 the comparisons are NE_EXPR. If this is the "and", we can do something
2697 if the comparisons are EQ_EXPR. I.e.,
2698 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2700 WANTED_CODE is this operation code. For single bit fields, we can
2701 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2702 comparison for one-bit fields. */
2704 enum tree_code wanted_code;
2705 enum tree_code lcode, rcode;
2706 tree ll_arg, lr_arg, rl_arg, rr_arg;
2707 tree ll_inner, lr_inner, rl_inner, rr_inner;
2708 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2709 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2710 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2711 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2712 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2713 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2714 enum machine_mode lnmode, rnmode;
2715 tree ll_mask, lr_mask, rl_mask, rr_mask;
2716 tree l_const, r_const;
2718 int first_bit, end_bit;
2721 /* Start by getting the comparison codes and seeing if this looks like
2722 a range test. Fail if anything is volatile. If one operand is a
2723 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2726 if (TREE_SIDE_EFFECTS (lhs)
2727 || TREE_SIDE_EFFECTS (rhs))
2730 lcode = TREE_CODE (lhs);
2731 rcode = TREE_CODE (rhs);
2733 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2734 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2736 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2737 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2739 if (TREE_CODE_CLASS (lcode) != '<'
2740 || TREE_CODE_CLASS (rcode) != '<')
2743 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2744 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2746 ll_arg = TREE_OPERAND (lhs, 0);
2747 lr_arg = TREE_OPERAND (lhs, 1);
2748 rl_arg = TREE_OPERAND (rhs, 0);
2749 rr_arg = TREE_OPERAND (rhs, 1);
2751 if (TREE_CODE (lr_arg) == INTEGER_CST
2752 && TREE_CODE (rr_arg) == INTEGER_CST
2753 && operand_equal_p (ll_arg, rl_arg, 0))
2755 if (tree_int_cst_lt (lr_arg, rr_arg))
2756 result = range_test (code, truth_type, lcode, rcode,
2757 ll_arg, lr_arg, rr_arg);
2759 result = range_test (code, truth_type, rcode, lcode,
2760 ll_arg, rr_arg, lr_arg);
2762 /* If this isn't a range test, it also isn't a comparison that
2763 can be merged. However, it wins to evaluate the RHS unconditionally
2764 on machines with expensive branches. */
2766 if (result == 0 && BRANCH_COST >= 2)
2768 if (TREE_CODE (ll_arg) != VAR_DECL
2769 && TREE_CODE (ll_arg) != PARM_DECL)
2771 /* Avoid evaluating the variable part twice. */
2772 ll_arg = save_expr (ll_arg);
2773 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2774 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2776 return build (code, truth_type, lhs, rhs);
2781 /* If the RHS can be evaluated unconditionally and its operands are
2782 simple, it wins to evaluate the RHS unconditionally on machines
2783 with expensive branches. In this case, this isn't a comparison
2784 that can be merged. */
2786 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2787 are with zero (tmw). */
2789 if (BRANCH_COST >= 2
2790 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2791 && simple_operand_p (rl_arg)
2792 && simple_operand_p (rr_arg))
2793 return build (code, truth_type, lhs, rhs);
2795 /* See if the comparisons can be merged. Then get all the parameters for
2798 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2799 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2803 ll_inner = decode_field_reference (ll_arg,
2804 &ll_bitsize, &ll_bitpos, &ll_mode,
2805 &ll_unsignedp, &volatilep, &ll_mask);
2806 lr_inner = decode_field_reference (lr_arg,
2807 &lr_bitsize, &lr_bitpos, &lr_mode,
2808 &lr_unsignedp, &volatilep, &lr_mask);
2809 rl_inner = decode_field_reference (rl_arg,
2810 &rl_bitsize, &rl_bitpos, &rl_mode,
2811 &rl_unsignedp, &volatilep, &rl_mask);
2812 rr_inner = decode_field_reference (rr_arg,
2813 &rr_bitsize, &rr_bitpos, &rr_mode,
2814 &rr_unsignedp, &volatilep, &rr_mask);
2816 /* It must be true that the inner operation on the lhs of each
2817 comparison must be the same if we are to be able to do anything.
2818 Then see if we have constants. If not, the same must be true for
2820 if (volatilep || ll_inner == 0 || rl_inner == 0
2821 || ! operand_equal_p (ll_inner, rl_inner, 0))
2824 if (TREE_CODE (lr_arg) == INTEGER_CST
2825 && TREE_CODE (rr_arg) == INTEGER_CST)
2826 l_const = lr_arg, r_const = rr_arg;
2827 else if (lr_inner == 0 || rr_inner == 0
2828 || ! operand_equal_p (lr_inner, rr_inner, 0))
2831 l_const = r_const = 0;
2833 /* If either comparison code is not correct for our logical operation,
2834 fail. However, we can convert a one-bit comparison against zero into
2835 the opposite comparison against that bit being set in the field. */
2837 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2838 if (lcode != wanted_code)
2840 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2846 if (rcode != wanted_code)
2848 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2854 /* See if we can find a mode that contains both fields being compared on
2855 the left. If we can't, fail. Otherwise, update all constants and masks
2856 to be relative to a field of that size. */
2857 first_bit = MIN (ll_bitpos, rl_bitpos);
2858 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2859 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2860 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2862 if (lnmode == VOIDmode)
2865 lnbitsize = GET_MODE_BITSIZE (lnmode);
2866 lnbitpos = first_bit & ~ (lnbitsize - 1);
2867 type = type_for_size (lnbitsize, 1);
2868 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2870 #if BYTES_BIG_ENDIAN
2871 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2872 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2875 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2876 size_int (xll_bitpos), 0);
2877 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2878 size_int (xrl_bitpos), 0);
2880 /* Make sure the constants are interpreted as unsigned, so we
2881 don't have sign bits outside the range of their type. */
2885 l_const = convert (unsigned_type (TREE_TYPE (l_const)), l_const);
2886 l_const = const_binop (LSHIFT_EXPR, convert (type, l_const),
2887 size_int (xll_bitpos), 0);
2891 r_const = convert (unsigned_type (TREE_TYPE (r_const)), r_const);
2892 r_const = const_binop (LSHIFT_EXPR, convert (type, r_const),
2893 size_int (xrl_bitpos), 0);
2896 /* If the right sides are not constant, do the same for it. Also,
2897 disallow this optimization if a size or signedness mismatch occurs
2898 between the left and right sides. */
2901 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2902 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2903 /* Make sure the two fields on the right
2904 correspond to the left without being swapped. */
2905 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2908 first_bit = MIN (lr_bitpos, rr_bitpos);
2909 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2910 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2911 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2913 if (rnmode == VOIDmode)
2916 rnbitsize = GET_MODE_BITSIZE (rnmode);
2917 rnbitpos = first_bit & ~ (rnbitsize - 1);
2918 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2920 #if BYTES_BIG_ENDIAN
2921 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2922 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2925 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2926 size_int (xlr_bitpos), 0);
2927 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2928 size_int (xrr_bitpos), 0);
2930 /* Make a mask that corresponds to both fields being compared.
2931 Do this for both items being compared. If the masks agree,
2932 we can do this by masking both and comparing the masked
2934 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2935 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2936 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2938 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2939 ll_unsignedp || rl_unsignedp);
2940 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2941 lr_unsignedp || rr_unsignedp);
2942 if (! all_ones_mask_p (ll_mask, lnbitsize))
2944 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2945 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2947 return build (wanted_code, truth_type, lhs, rhs);
2950 /* There is still another way we can do something: If both pairs of
2951 fields being compared are adjacent, we may be able to make a wider
2952 field containing them both. */
2953 if ((ll_bitsize + ll_bitpos == rl_bitpos
2954 && lr_bitsize + lr_bitpos == rr_bitpos)
2955 || (ll_bitpos == rl_bitpos + rl_bitsize
2956 && lr_bitpos == rr_bitpos + rr_bitsize))
2957 return build (wanted_code, truth_type,
2958 make_bit_field_ref (ll_inner, type,
2959 ll_bitsize + rl_bitsize,
2960 MIN (ll_bitpos, rl_bitpos),
2962 make_bit_field_ref (lr_inner, type,
2963 lr_bitsize + rr_bitsize,
2964 MIN (lr_bitpos, rr_bitpos),
2970 /* Handle the case of comparisons with constants. If there is something in
2971 common between the masks, those bits of the constants must be the same.
2972 If not, the condition is always false. Test for this to avoid generating
2973 incorrect code below. */
2974 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2975 if (! integer_zerop (result)
2976 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2977 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2979 if (wanted_code == NE_EXPR)
2981 warning ("`or' of unmatched not-equal tests is always 1");
2982 return convert (truth_type, integer_one_node);
2986 warning ("`and' of mutually exclusive equal-tests is always zero");
2987 return convert (truth_type, integer_zero_node);
2991 /* Construct the expression we will return. First get the component
2992 reference we will make. Unless the mask is all ones the width of
2993 that field, perform the mask operation. Then compare with the
2995 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2996 ll_unsignedp || rl_unsignedp);
2998 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2999 if (! all_ones_mask_p (ll_mask, lnbitsize))
3000 result = build (BIT_AND_EXPR, type, result, ll_mask);
3002 return build (wanted_code, truth_type, result,
3003 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
3006 /* Perform constant folding and related simplification of EXPR.
3007 The related simplifications include x*1 => x, x*0 => 0, etc.,
3008 and application of the associative law.
3009 NOP_EXPR conversions may be removed freely (as long as we
3010 are careful not to change the C type of the overall expression)
3011 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3012 but we can constant-fold them if they have constant operands. */
3018 register tree t = expr;
3019 tree t1 = NULL_TREE;
3021 tree type = TREE_TYPE (expr);
3022 register tree arg0, arg1;
3023 register enum tree_code code = TREE_CODE (t);
3027 /* WINS will be nonzero when the switch is done
3028 if all operands are constant. */
3032 /* Return right away if already constant. */
3033 if (TREE_CONSTANT (t))
3035 if (code == CONST_DECL)
3036 return DECL_INITIAL (t);
3040 kind = TREE_CODE_CLASS (code);
3041 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3045 /* Special case for conversion ops that can have fixed point args. */
3046 arg0 = TREE_OPERAND (t, 0);
3048 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3050 STRIP_TYPE_NOPS (arg0);
3052 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3053 subop = TREE_REALPART (arg0);
3057 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3058 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3059 && TREE_CODE (subop) != REAL_CST
3060 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3062 /* Note that TREE_CONSTANT isn't enough:
3063 static var addresses are constant but we can't
3064 do arithmetic on them. */
3067 else if (kind == 'e' || kind == '<'
3068 || kind == '1' || kind == '2' || kind == 'r')
3070 register int len = tree_code_length[(int) code];
3072 for (i = 0; i < len; i++)
3074 tree op = TREE_OPERAND (t, i);
3078 continue; /* Valid for CALL_EXPR, at least. */
3080 if (kind == '<' || code == RSHIFT_EXPR)
3082 /* Signedness matters here. Perhaps we can refine this
3084 STRIP_TYPE_NOPS (op);
3088 /* Strip any conversions that don't change the mode. */
3092 if (TREE_CODE (op) == COMPLEX_CST)
3093 subop = TREE_REALPART (op);
3097 if (TREE_CODE (subop) != INTEGER_CST
3098 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3099 && TREE_CODE (subop) != REAL_CST
3100 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3102 /* Note that TREE_CONSTANT isn't enough:
3103 static var addresses are constant but we can't
3104 do arithmetic on them. */
3114 /* If this is a commutative operation, and ARG0 is a constant, move it
3115 to ARG1 to reduce the number of tests below. */
3116 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3117 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3118 || code == BIT_AND_EXPR)
3119 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3121 tem = arg0; arg0 = arg1; arg1 = tem;
3123 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3124 TREE_OPERAND (t, 1) = tem;
3127 /* Now WINS is set as described above,
3128 ARG0 is the first operand of EXPR,
3129 and ARG1 is the second operand (if it has more than one operand).
3131 First check for cases where an arithmetic operation is applied to a
3132 compound, conditional, or comparison operation. Push the arithmetic
3133 operation inside the compound or conditional to see if any folding
3134 can then be done. Convert comparison to conditional for this purpose.
3135 The also optimizes non-constant cases that used to be done in
3138 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3139 one of the operands is a comparison and the other is either a comparison
3140 or a BIT_AND_EXPR with the constant 1. In that case, the code below
3141 would make the expression more complex. Change it to a
3142 TRUTH_{AND,OR}_EXPR. */
3144 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR)
3145 && ((TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
3146 && (TREE_CODE_CLASS (TREE_CODE (arg1)) == '<'
3147 || (TREE_CODE (arg1) == BIT_AND_EXPR
3148 && integer_onep (TREE_OPERAND (arg1, 1)))))
3149 || (TREE_CODE_CLASS (TREE_CODE (arg1)) == '<'
3150 && (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
3151 || (TREE_CODE (arg0) == BIT_AND_EXPR
3152 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3153 return fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
3156 if (TREE_CODE_CLASS (code) == '1')
3158 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3159 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3160 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3161 else if (TREE_CODE (arg0) == COND_EXPR)
3163 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3164 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3165 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3167 /* If this was a conversion, and all we did was to move into
3168 inside the COND_EXPR, bring it back out. Then return so we
3169 don't get into an infinite recursion loop taking the conversion
3170 out and then back in. */
3172 if ((code == NOP_EXPR || code == CONVERT_EXPR
3173 || code == NON_LVALUE_EXPR)
3174 && TREE_CODE (t) == COND_EXPR
3175 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3176 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3177 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3178 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0))))
3179 t = build1 (code, type,
3181 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3182 TREE_OPERAND (t, 0),
3183 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3184 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3187 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3188 return fold (build (COND_EXPR, type, arg0,
3189 fold (build1 (code, type, integer_one_node)),
3190 fold (build1 (code, type, integer_zero_node))));
3192 else if (TREE_CODE_CLASS (code) == '2'
3193 || TREE_CODE_CLASS (code) == '<')
3195 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3196 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3197 fold (build (code, type,
3198 arg0, TREE_OPERAND (arg1, 1))));
3199 else if (TREE_CODE (arg1) == COND_EXPR
3200 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3202 tree test, true_value, false_value;
3204 if (TREE_CODE (arg1) == COND_EXPR)
3206 test = TREE_OPERAND (arg1, 0);
3207 true_value = TREE_OPERAND (arg1, 1);
3208 false_value = TREE_OPERAND (arg1, 2);
3213 true_value = integer_one_node;
3214 false_value = integer_zero_node;
3217 /* If ARG0 is complex we want to make sure we only evaluate
3218 it once. Though this is only required if it is volatile, it
3219 might be more efficient even if it is not. However, if we
3220 succeed in folding one part to a constant, we do not need
3221 to make this SAVE_EXPR. Since we do this optimization
3222 primarily to see if we do end up with constant and this
3223 SAVE_EXPR interfers with later optimizations, suppressing
3224 it when we can is important. */
3226 if ((TREE_CODE (arg0) != VAR_DECL && TREE_CODE (arg0) != PARM_DECL)
3227 || TREE_SIDE_EFFECTS (arg0))
3229 tree lhs = fold (build (code, type, arg0, true_value));
3230 tree rhs = fold (build (code, type, arg0, false_value));
3232 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3233 return fold (build (COND_EXPR, type, test, lhs, rhs));
3235 arg0 = save_expr (arg0);
3238 test = fold (build (COND_EXPR, type, test,
3239 fold (build (code, type, arg0, true_value)),
3240 fold (build (code, type, arg0, false_value))));
3241 if (TREE_CODE (arg0) == SAVE_EXPR)
3242 return build (COMPOUND_EXPR, type,
3243 convert (void_type_node, arg0), test);
3245 return convert (type, test);
3248 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3249 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3250 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3251 else if (TREE_CODE (arg0) == COND_EXPR
3252 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3254 tree test, true_value, false_value;
3256 if (TREE_CODE (arg0) == COND_EXPR)
3258 test = TREE_OPERAND (arg0, 0);
3259 true_value = TREE_OPERAND (arg0, 1);
3260 false_value = TREE_OPERAND (arg0, 2);
3265 true_value = integer_one_node;
3266 false_value = integer_zero_node;
3269 if ((TREE_CODE (arg1) != VAR_DECL && TREE_CODE (arg1) != PARM_DECL)
3270 || TREE_SIDE_EFFECTS (arg1))
3272 tree lhs = fold (build (code, type, true_value, arg1));
3273 tree rhs = fold (build (code, type, false_value, arg1));
3275 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3276 return fold (build (COND_EXPR, type, test, lhs, rhs));
3278 arg1 = save_expr (arg1);
3281 test = fold (build (COND_EXPR, type, test,
3282 fold (build (code, type, true_value, arg1)),
3283 fold (build (code, type, false_value, arg1))));
3284 if (TREE_CODE (arg1) == SAVE_EXPR)
3285 return build (COMPOUND_EXPR, type,
3286 convert (void_type_node, arg1), test);
3288 return convert (type, test);
3291 else if (TREE_CODE_CLASS (code) == '<'
3292 && TREE_CODE (arg0) == COMPOUND_EXPR)
3293 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3294 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3295 else if (TREE_CODE_CLASS (code) == '<'
3296 && TREE_CODE (arg1) == COMPOUND_EXPR)
3297 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3298 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3310 return fold (DECL_INITIAL (t));
3315 case FIX_TRUNC_EXPR:
3316 /* Other kinds of FIX are not handled properly by fold_convert. */
3317 /* Two conversions in a row are not needed unless:
3318 - the intermediate type is narrower than both initial and final, or
3319 - the intermediate type and innermost type differ in signedness,
3320 and the outermost type is wider than the intermediate, or
3321 - the initial type is a pointer type and the precisions of the
3322 intermediate and final types differ, or
3323 - the final type is a pointer type and the precisions of the
3324 initial and intermediate types differ. */
3325 if ((TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3326 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3327 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3328 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3330 TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3331 > TYPE_PRECISION (TREE_TYPE (t)))
3332 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3334 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0)))
3336 && (TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3337 != TREE_UNSIGNED (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3338 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3339 < TYPE_PRECISION (TREE_TYPE (t))))
3340 && ((TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (t, 0)))
3341 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3342 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))))
3344 (TREE_UNSIGNED (TREE_TYPE (t))
3345 && (TYPE_PRECISION (TREE_TYPE (t))
3346 > TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3347 && ! ((TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3349 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0)))
3350 != TYPE_PRECISION (TREE_TYPE (t))))
3351 && ! (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
3352 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)))
3353 != TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (t, 0))))))
3354 return convert (TREE_TYPE (t), TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3356 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3357 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3358 /* Detect assigning a bitfield. */
3359 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3360 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3362 /* Don't leave an assignment inside a conversion
3363 unless assigning a bitfield. */
3364 tree prev = TREE_OPERAND (t, 0);
3365 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3366 /* First do the assignment, then return converted constant. */
3367 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3373 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3376 return fold_convert (t, arg0);
3378 #if 0 /* This loses on &"foo"[0]. */
3383 /* Fold an expression like: "foo"[2] */
3384 if (TREE_CODE (arg0) == STRING_CST
3385 && TREE_CODE (arg1) == INTEGER_CST
3386 && !TREE_INT_CST_HIGH (arg1)
3387 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3389 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3390 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3391 force_fit_type (t, 0);
3398 TREE_CONSTANT (t) = wins;
3404 if (TREE_CODE (arg0) == INTEGER_CST)
3406 HOST_WIDE_INT low, high;
3407 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3408 TREE_INT_CST_HIGH (arg0),
3410 t = build_int_2 (low, high);
3411 TREE_TYPE (t) = type;
3412 TREE_CONSTANT_OVERFLOW (t)
3413 = (TREE_CONSTANT_OVERFLOW (arg0)
3414 | force_fit_type (t, overflow));
3416 else if (TREE_CODE (arg0) == REAL_CST)
3417 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3418 TREE_TYPE (t) = type;
3420 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3421 return TREE_OPERAND (arg0, 0);
3423 /* Convert - (a - b) to (b - a) for non-floating-point. */
3424 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3425 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3426 TREE_OPERAND (arg0, 0));
3433 if (TREE_CODE (arg0) == INTEGER_CST)
3435 if (! TREE_UNSIGNED (type)
3436 && TREE_INT_CST_HIGH (arg0) < 0)
3438 HOST_WIDE_INT low, high;
3439 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3440 TREE_INT_CST_HIGH (arg0),
3442 t = build_int_2 (low, high);
3443 TREE_TYPE (t) = type;
3444 TREE_CONSTANT_OVERFLOW (t)
3445 = (TREE_CONSTANT_OVERFLOW (arg0)
3446 | force_fit_type (t, overflow));
3449 else if (TREE_CODE (arg0) == REAL_CST)
3451 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3452 t = build_real (type,
3453 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3455 TREE_TYPE (t) = type;
3457 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3458 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3464 if (TREE_CODE (arg0) == INTEGER_CST)
3465 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3466 ~ TREE_INT_CST_HIGH (arg0));
3467 TREE_TYPE (t) = type;
3468 force_fit_type (t, 0);
3469 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3471 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3472 return TREE_OPERAND (arg0, 0);
3476 /* A + (-B) -> A - B */
3477 if (TREE_CODE (arg1) == NEGATE_EXPR)
3478 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3479 else if (! FLOAT_TYPE_P (type))
3481 if (integer_zerop (arg1))
3482 return non_lvalue (convert (type, arg0));
3484 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3485 with a constant, and the two constants have no bits in common,
3486 we should treat this as a BIT_IOR_EXPR since this may produce more
3488 if (TREE_CODE (arg0) == BIT_AND_EXPR
3489 && TREE_CODE (arg1) == BIT_AND_EXPR
3490 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3491 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3492 && integer_zerop (const_binop (BIT_AND_EXPR,
3493 TREE_OPERAND (arg0, 1),
3494 TREE_OPERAND (arg1, 1), 0)))
3496 code = BIT_IOR_EXPR;
3500 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3501 about the case where C is a constant, just try one of the
3502 four possibilities. */
3504 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3505 && operand_equal_p (TREE_OPERAND (arg0, 1),
3506 TREE_OPERAND (arg1, 1), 0))
3507 return fold (build (MULT_EXPR, type,
3508 fold (build (PLUS_EXPR, type,
3509 TREE_OPERAND (arg0, 0),
3510 TREE_OPERAND (arg1, 0))),
3511 TREE_OPERAND (arg0, 1)));
3513 /* In IEEE floating point, x+0 may not equal x. */
3514 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3515 && real_zerop (arg1))
3516 return non_lvalue (convert (type, arg0));
3518 /* In most languages, can't associate operations on floats
3519 through parentheses. Rather than remember where the parentheses
3520 were, we don't associate floats at all. It shouldn't matter much. */
3521 if (FLOAT_TYPE_P (type))
3523 /* The varsign == -1 cases happen only for addition and subtraction.
3524 It says that the arg that was split was really CON minus VAR.
3525 The rest of the code applies to all associative operations. */
3531 if (split_tree (arg0, code, &var, &con, &varsign))
3535 /* EXPR is (CON-VAR) +- ARG1. */
3536 /* If it is + and VAR==ARG1, return just CONST. */
3537 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3538 return convert (TREE_TYPE (t), con);
3540 /* If ARG0 is a constant, don't change things around;
3541 instead keep all the constant computations together. */
3543 if (TREE_CONSTANT (arg0))
3546 /* Otherwise return (CON +- ARG1) - VAR. */
3547 TREE_SET_CODE (t, MINUS_EXPR);
3548 TREE_OPERAND (t, 1) = var;
3550 = fold (build (code, TREE_TYPE (t), con, arg1));
3554 /* EXPR is (VAR+CON) +- ARG1. */
3555 /* If it is - and VAR==ARG1, return just CONST. */
3556 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3557 return convert (TREE_TYPE (t), con);
3559 /* If ARG0 is a constant, don't change things around;
3560 instead keep all the constant computations together. */
3562 if (TREE_CONSTANT (arg0))
3565 /* Otherwise return VAR +- (ARG1 +- CON). */
3566 TREE_OPERAND (t, 1) = tem
3567 = fold (build (code, TREE_TYPE (t), arg1, con));
3568 TREE_OPERAND (t, 0) = var;
3569 if (integer_zerop (tem)
3570 && (code == PLUS_EXPR || code == MINUS_EXPR))
3571 return convert (type, var);
3572 /* If we have x +/- (c - d) [c an explicit integer]
3573 change it to x -/+ (d - c) since if d is relocatable
3574 then the latter can be a single immediate insn
3575 and the former cannot. */
3576 if (TREE_CODE (tem) == MINUS_EXPR
3577 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3579 tree tem1 = TREE_OPERAND (tem, 1);
3580 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3581 TREE_OPERAND (tem, 0) = tem1;
3583 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3589 if (split_tree (arg1, code, &var, &con, &varsign))
3591 /* EXPR is ARG0 +- (CON +- VAR). */
3592 if (TREE_CODE (t) == MINUS_EXPR
3593 && operand_equal_p (var, arg0, 0))
3595 /* If VAR and ARG0 cancel, return just CON or -CON. */
3596 if (code == PLUS_EXPR)
3597 return convert (TREE_TYPE (t), con);
3598 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3599 convert (TREE_TYPE (t), con)));
3601 if (TREE_CONSTANT (arg1))
3605 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3607 = fold (build (code, TREE_TYPE (t), arg0, con));
3608 TREE_OPERAND (t, 1) = var;
3609 if (integer_zerop (TREE_OPERAND (t, 0))
3610 && TREE_CODE (t) == PLUS_EXPR)
3611 return convert (TREE_TYPE (t), var);
3616 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3617 if (TREE_CODE (arg1) == REAL_CST)
3619 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3621 t1 = const_binop (code, arg0, arg1, 0);
3622 if (t1 != NULL_TREE)
3624 /* The return value should always have
3625 the same type as the original expression. */
3626 TREE_TYPE (t1) = TREE_TYPE (t);
3632 if (! FLOAT_TYPE_P (type))
3634 if (! wins && integer_zerop (arg0))
3635 return build1 (NEGATE_EXPR, type, arg1);
3636 if (integer_zerop (arg1))
3637 return non_lvalue (convert (type, arg0));
3639 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3640 about the case where C is a constant, just try one of the
3641 four possibilities. */
3643 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3644 && operand_equal_p (TREE_OPERAND (arg0, 1),
3645 TREE_OPERAND (arg1, 1), 0))
3646 return fold (build (MULT_EXPR, type,
3647 fold (build (MINUS_EXPR, type,
3648 TREE_OPERAND (arg0, 0),
3649 TREE_OPERAND (arg1, 0))),
3650 TREE_OPERAND (arg0, 1)));
3652 /* Convert A - (-B) to A + B. */
3653 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3654 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3655 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT)
3657 /* Except with IEEE floating point, 0-x equals -x. */
3658 if (! wins && real_zerop (arg0))
3659 return build1 (NEGATE_EXPR, type, arg1);
3660 /* Except with IEEE floating point, x-0 equals x. */
3661 if (real_zerop (arg1))
3662 return non_lvalue (convert (type, arg0));
3664 /* Fold &x - &x. This can happen from &x.foo - &x.
3665 This is unsafe for certain floats even in non-IEEE formats.
3666 In IEEE, it is unsafe because it does wrong for NaNs.
3667 Also note that operand_equal_p is always false if an operand
3670 if (operand_equal_p (arg0, arg1, FLOAT_TYPE_P (type)))
3671 return convert (type, integer_zero_node);
3676 if (! FLOAT_TYPE_P (type))
3678 if (integer_zerop (arg1))
3679 return omit_one_operand (type, arg1, arg0);
3680 if (integer_onep (arg1))
3681 return non_lvalue (convert (type, arg0));
3683 /* (a * (1 << b)) is (a << b) */
3684 if (TREE_CODE (arg1) == LSHIFT_EXPR
3685 && integer_onep (TREE_OPERAND (arg1, 0)))
3686 return fold (build (LSHIFT_EXPR, type, arg0,
3687 TREE_OPERAND (arg1, 1)));
3688 if (TREE_CODE (arg0) == LSHIFT_EXPR
3689 && integer_onep (TREE_OPERAND (arg0, 0)))
3690 return fold (build (LSHIFT_EXPR, type, arg1,
3691 TREE_OPERAND (arg0, 1)));
3695 /* x*0 is 0, except for IEEE floating point. */
3696 if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3697 && real_zerop (arg1))
3698 return omit_one_operand (type, arg1, arg0);
3699 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3700 However, ANSI says we can drop signals,
3701 so we can do this anyway. */
3702 if (real_onep (arg1))
3703 return non_lvalue (convert (type, arg0));
3705 if (! wins && real_twop (arg1))
3707 tree arg = save_expr (arg0);
3708 return build (PLUS_EXPR, type, arg, arg);
3715 if (integer_all_onesp (arg1))
3716 return omit_one_operand (type, arg1, arg0);
3717 if (integer_zerop (arg1))
3718 return non_lvalue (convert (type, arg0));
3719 t1 = distribute_bit_expr (code, type, arg0, arg1);
3720 if (t1 != NULL_TREE)
3723 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3724 is a rotate of A by C1 bits. */
3726 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3727 || TREE_CODE (arg0) == LSHIFT_EXPR)
3728 && (TREE_CODE (arg1) == RSHIFT_EXPR
3729 || TREE_CODE (arg1) == LSHIFT_EXPR)
3730 && TREE_CODE (arg0) != TREE_CODE (arg1)
3731 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3732 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3733 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3734 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3735 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3736 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3737 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3738 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3739 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3740 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3741 TREE_CODE (arg0) == LSHIFT_EXPR
3742 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3747 if (integer_zerop (arg1))
3748 return non_lvalue (convert (type, arg0));
3749 if (integer_all_onesp (arg1))
3750 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3755 if (integer_all_onesp (arg1))
3756 return non_lvalue (convert (type, arg0));
3757 if (integer_zerop (arg1))
3758 return omit_one_operand (type, arg1, arg0);
3759 t1 = distribute_bit_expr (code, type, arg0, arg1);
3760 if (t1 != NULL_TREE)
3762 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3763 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3764 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3766 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3767 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3768 && (~TREE_INT_CST_LOW (arg0)
3769 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3770 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3772 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3773 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3775 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3776 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3777 && (~TREE_INT_CST_LOW (arg1)
3778 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3779 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3783 case BIT_ANDTC_EXPR:
3784 if (integer_all_onesp (arg0))
3785 return non_lvalue (convert (type, arg1));
3786 if (integer_zerop (arg0))
3787 return omit_one_operand (type, arg0, arg1);
3788 if (TREE_CODE (arg1) == INTEGER_CST)
3790 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3791 code = BIT_AND_EXPR;
3796 case TRUNC_DIV_EXPR:
3797 case ROUND_DIV_EXPR:
3798 case FLOOR_DIV_EXPR:
3800 case EXACT_DIV_EXPR:
3802 if (integer_onep (arg1))
3803 return non_lvalue (convert (type, arg0));
3804 if (integer_zerop (arg1))
3807 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
3808 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
3809 expressions, which often appear in the offsets or sizes of
3810 objects with a varying size. Only deal with positive divisors
3813 Look for NOPs and SAVE_EXPRs inside. */
3815 if (TREE_CODE (arg1) == INTEGER_CST
3816 && tree_int_cst_lt (integer_zero_node, arg1))
3818 int have_save_expr = 0;
3819 tree c2 = integer_zero_node;
3822 if (TREE_CODE (xarg0) == SAVE_EXPR)
3823 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3827 if (TREE_CODE (xarg0) == PLUS_EXPR
3828 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3829 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
3830 else if (TREE_CODE (xarg0) == MINUS_EXPR
3831 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3833 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
3834 xarg0 = TREE_OPERAND (xarg0, 0);
3837 if (TREE_CODE (xarg0) == SAVE_EXPR)
3838 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
3842 if (TREE_CODE (xarg0) == MULT_EXPR
3843 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3844 && tree_int_cst_lt (integer_zero_node, TREE_OPERAND (xarg0, 1))
3845 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
3846 TREE_OPERAND (xarg0, 1), arg1, 1))
3847 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
3848 TREE_OPERAND (xarg0, 1), 1))))
3850 tree outer_div = integer_one_node;
3851 tree c1 = TREE_OPERAND (xarg0, 1);
3854 /* If C3 > C1, set them equal and do a divide by
3855 C3/C1 at the end of the operation. */
3856 if (tree_int_cst_lt (c1, c3))
3857 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
3859 /* The result is A * (C1/C3) + (C2/C3). */
3860 t = fold (build (PLUS_EXPR, type,
3861 fold (build (MULT_EXPR, type,
3862 TREE_OPERAND (xarg0, 0),
3863 const_binop (code, c1, c3, 1))),
3864 const_binop (code, c2, c3, 1)));
3866 if (! integer_onep (outer_div))
3867 t = fold (build (code, type, t, outer_div));
3876 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3877 #ifndef REAL_INFINITY
3878 if (TREE_CODE (arg1) == REAL_CST
3879 && real_zerop (arg1))
3882 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3887 case FLOOR_MOD_EXPR:
3888 case ROUND_MOD_EXPR:
3889 case TRUNC_MOD_EXPR:
3890 if (integer_onep (arg1))
3891 return omit_one_operand (type, integer_zero_node, arg0);
3892 if (integer_zerop (arg1))
3895 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
3896 where C1 % C3 == 0. Handle similarly to the division case,
3897 but don't bother with SAVE_EXPRs. */
3899 if (TREE_CODE (arg1) == INTEGER_CST
3900 && ! integer_zerop (arg1))
3902 tree c2 = integer_zero_node;
3905 if (TREE_CODE (xarg0) == PLUS_EXPR
3906 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3907 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
3908 else if (TREE_CODE (xarg0) == MINUS_EXPR
3909 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
3911 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
3912 xarg0 = TREE_OPERAND (xarg0, 0);
3917 if (TREE_CODE (xarg0) == MULT_EXPR
3918 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
3919 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
3920 TREE_OPERAND (xarg0, 1),
3922 /* The result is (C2%C3). */
3923 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
3924 TREE_OPERAND (xarg0, 0));
3933 if (integer_zerop (arg1))
3934 return non_lvalue (convert (type, arg0));
3935 /* Since negative shift count is not well-defined,
3936 don't try to compute it in the compiler. */
3937 if (tree_int_cst_lt (arg1, integer_zero_node))
3942 if (operand_equal_p (arg0, arg1, 0))
3944 if (INTEGRAL_TYPE_P (type)
3945 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
3946 return omit_one_operand (type, arg1, arg0);
3950 if (operand_equal_p (arg0, arg1, 0))
3952 if (INTEGRAL_TYPE_P (type)
3953 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
3954 return omit_one_operand (type, arg1, arg0);
3957 case TRUTH_NOT_EXPR:
3958 /* Note that the operand of this must be an int
3959 and its values must be 0 or 1.
3960 ("true" is a fixed value perhaps depending on the language,
3961 but we don't handle values other than 1 correctly yet.) */
3962 return invert_truthvalue (arg0);
3964 case TRUTH_ANDIF_EXPR:
3965 /* Note that the operands of this must be ints
3966 and their values must be 0 or 1.
3967 ("true" is a fixed value perhaps depending on the language.) */
3968 /* If first arg is constant zero, return it. */
3969 if (integer_zerop (arg0))
3971 case TRUTH_AND_EXPR:
3972 /* If either arg is constant true, drop it. */
3973 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
3974 return non_lvalue (arg1);
3975 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
3976 return non_lvalue (arg0);
3977 /* If second arg is constant zero, result is zero, but first arg
3978 must be evaluated. */
3979 if (integer_zerop (arg1))
3980 return omit_one_operand (type, arg1, arg0);
3983 /* Check for the possibility of merging component references. If our
3984 lhs is another similar operation, try to merge its rhs with our
3985 rhs. Then try to merge our lhs and rhs. */
3988 if (TREE_CODE (arg0) == code)
3990 tem = fold_truthop (code, type,
3991 TREE_OPERAND (arg0, 1), arg1);
3993 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
3996 tem = fold_truthop (code, type, arg0, arg1);
4002 case TRUTH_ORIF_EXPR:
4003 /* Note that the operands of this must be ints
4004 and their values must be 0 or true.
4005 ("true" is a fixed value perhaps depending on the language.) */
4006 /* If first arg is constant true, return it. */
4007 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4010 /* If either arg is constant zero, drop it. */
4011 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4012 return non_lvalue (arg1);
4013 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4014 return non_lvalue (arg0);
4015 /* If second arg is constant true, result is true, but we must
4016 evaluate first arg. */
4017 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4018 return omit_one_operand (type, arg1, arg0);
4021 case TRUTH_XOR_EXPR:
4022 /* If either arg is constant zero, drop it. */
4023 if (integer_zerop (arg0))
4024 return non_lvalue (arg1);
4025 if (integer_zerop (arg1))
4026 return non_lvalue (arg0);
4027 /* If either arg is constant true, this is a logical inversion. */
4028 if (integer_onep (arg0))
4029 return non_lvalue (invert_truthvalue (arg1));
4030 if (integer_onep (arg1))
4031 return non_lvalue (invert_truthvalue (arg0));
4040 /* If one arg is a constant integer, put it last. */
4041 if (TREE_CODE (arg0) == INTEGER_CST
4042 && TREE_CODE (arg1) != INTEGER_CST)
4044 TREE_OPERAND (t, 0) = arg1;
4045 TREE_OPERAND (t, 1) = arg0;
4046 arg0 = TREE_OPERAND (t, 0);
4047 arg1 = TREE_OPERAND (t, 1);
4048 code = swap_tree_comparison (code);
4049 TREE_SET_CODE (t, code);
4052 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4053 First, see if one arg is constant; find the constant arg
4054 and the other one. */
4056 tree constop = 0, varop;
4059 if (TREE_CONSTANT (arg1))
4060 constoploc = &TREE_OPERAND (t, 1), constop = arg1, varop = arg0;
4061 if (TREE_CONSTANT (arg0))
4062 constoploc = &TREE_OPERAND (t, 0), constop = arg0, varop = arg1;
4064 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4066 /* This optimization is invalid for ordered comparisons
4067 if CONST+INCR overflows or if foo+incr might overflow.
4068 This optimization is invalid for floating point due to rounding.
4069 For pointer types we assume overflow doesn't happen. */
4070 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4071 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4072 && (code == EQ_EXPR || code == NE_EXPR)))
4075 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4076 constop, TREE_OPERAND (varop, 1)));
4077 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4078 *constoploc = newconst;
4082 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4084 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4085 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4086 && (code == EQ_EXPR || code == NE_EXPR)))
4089 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4090 constop, TREE_OPERAND (varop, 1)));
4091 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4092 *constoploc = newconst;
4098 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4099 if (TREE_CODE (arg1) == INTEGER_CST
4100 && TREE_CODE (arg0) != INTEGER_CST
4101 && ! tree_int_cst_lt (arg1, integer_one_node))
4103 switch (TREE_CODE (t))
4107 TREE_SET_CODE (t, code);
4108 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4109 TREE_OPERAND (t, 1) = arg1;
4114 TREE_SET_CODE (t, code);
4115 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4116 TREE_OPERAND (t, 1) = arg1;
4120 /* If this is an EQ or NE comparison with zero and ARG0 is
4121 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4122 two operations, but the latter can be done in one less insn
4123 one machine that have only two-operand insns or on which a
4124 constant cannot be the first operand. */
4125 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4126 && TREE_CODE (arg0) == BIT_AND_EXPR)
4128 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4129 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4131 fold (build (code, type,
4132 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4134 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4135 TREE_OPERAND (arg0, 1),
4136 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4137 convert (TREE_TYPE (arg0),
4140 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4141 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4143 fold (build (code, type,
4144 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4146 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4147 TREE_OPERAND (arg0, 0),
4148 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4149 convert (TREE_TYPE (arg0),
4154 /* If this is an NE comparison of zero with an AND of one, remove the
4155 comparison since the AND will give the correct value. */
4156 if (code == NE_EXPR && integer_zerop (arg1)
4157 && TREE_CODE (arg0) == BIT_AND_EXPR
4158 && integer_onep (TREE_OPERAND (arg0, 1)))
4159 return convert (type, arg0);
4161 /* If we have (A & C) == C where C is a power of 2, convert this into
4162 (A & C) != 0. Similarly for NE_EXPR. */
4163 if ((code == EQ_EXPR || code == NE_EXPR)
4164 && TREE_CODE (arg0) == BIT_AND_EXPR
4165 && integer_pow2p (TREE_OPERAND (arg0, 1))
4166 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4167 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4168 arg0, integer_zero_node);
4170 /* Simplify comparison of something with itself. (For IEEE
4171 floating-point, we can only do some of these simplifications.) */
4172 if (operand_equal_p (arg0, arg1, 0))
4179 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4181 t = build_int_2 (1, 0);
4182 TREE_TYPE (t) = type;
4186 TREE_SET_CODE (t, code);
4190 /* For NE, we can only do this simplification if integer. */
4191 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4193 /* ... fall through ... */
4196 t = build_int_2 (0, 0);
4197 TREE_TYPE (t) = type;
4202 /* An unsigned comparison against 0 can be simplified. */
4203 if (integer_zerop (arg1)
4204 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4205 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4206 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4208 switch (TREE_CODE (t))
4212 TREE_SET_CODE (t, NE_EXPR);
4216 TREE_SET_CODE (t, EQ_EXPR);
4219 return omit_one_operand (type,
4220 convert (type, integer_one_node),
4223 return omit_one_operand (type,
4224 convert (type, integer_zero_node),
4229 /* If we are comparing an expression that just has comparisons
4230 of two integer values, arithmetic expressions of those comparisons,
4231 and constants, we can simplify it. There are only three cases
4232 to check: the two values can either be equal, the first can be
4233 greater, or the second can be greater. Fold the expression for
4234 those three values. Since each value must be 0 or 1, we have
4235 eight possibilities, each of which corresponds to the constant 0
4236 or 1 or one of the six possible comparisons.
4238 This handles common cases like (a > b) == 0 but also handles
4239 expressions like ((x > y) - (y > x)) > 0, which supposedly
4240 occur in macroized code. */
4242 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4244 tree cval1 = 0, cval2 = 0;
4246 if (twoval_comparison_p (arg0, &cval1, &cval2)
4247 /* Don't handle degenerate cases here; they should already
4248 have been handled anyway. */
4249 && cval1 != 0 && cval2 != 0
4250 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4251 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4252 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4253 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4254 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4256 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4257 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4259 /* We can't just pass T to eval_subst in case cval1 or cval2
4260 was the same as ARG1. */
4263 = fold (build (code, type,
4264 eval_subst (arg0, cval1, maxval, cval2, minval),
4267 = fold (build (code, type,
4268 eval_subst (arg0, cval1, maxval, cval2, maxval),
4271 = fold (build (code, type,
4272 eval_subst (arg0, cval1, minval, cval2, maxval),
4275 /* All three of these results should be 0 or 1. Confirm they
4276 are. Then use those values to select the proper code
4279 if ((integer_zerop (high_result)
4280 || integer_onep (high_result))
4281 && (integer_zerop (equal_result)
4282 || integer_onep (equal_result))
4283 && (integer_zerop (low_result)
4284 || integer_onep (low_result)))
4286 /* Make a 3-bit mask with the high-order bit being the
4287 value for `>', the next for '=', and the low for '<'. */
4288 switch ((integer_onep (high_result) * 4)
4289 + (integer_onep (equal_result) * 2)
4290 + integer_onep (low_result))
4294 return omit_one_operand (type, integer_zero_node, arg0);
4315 return omit_one_operand (type, integer_one_node, arg0);
4318 return fold (build (code, type, cval1, cval2));
4323 /* If this is a comparison of a field, we may be able to simplify it. */
4324 if ((TREE_CODE (arg0) == COMPONENT_REF
4325 || TREE_CODE (arg0) == BIT_FIELD_REF)
4326 && (code == EQ_EXPR || code == NE_EXPR)
4327 /* Handle the constant case even without -O
4328 to make sure the warnings are given. */
4329 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4331 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4335 /* From here on, the only cases we handle are when the result is
4336 known to be a constant.
4338 To compute GT, swap the arguments and do LT.
4339 To compute GE, do LT and invert the result.
4340 To compute LE, swap the arguments, do LT and invert the result.
4341 To compute NE, do EQ and invert the result.
4343 Therefore, the code below must handle only EQ and LT. */
4345 if (code == LE_EXPR || code == GT_EXPR)
4347 tem = arg0, arg0 = arg1, arg1 = tem;
4348 code = swap_tree_comparison (code);
4351 /* Note that it is safe to invert for real values here because we
4352 will check below in the one case that it matters. */
4355 if (code == NE_EXPR || code == GE_EXPR)
4358 code = invert_tree_comparison (code);
4361 /* Compute a result for LT or EQ if args permit;
4362 otherwise return T. */
4363 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4365 if (code == EQ_EXPR)
4366 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4367 == TREE_INT_CST_LOW (arg1))
4368 && (TREE_INT_CST_HIGH (arg0)
4369 == TREE_INT_CST_HIGH (arg1)),
4372 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4373 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4374 : INT_CST_LT (arg0, arg1)),
4378 /* Assume a nonexplicit constant cannot equal an explicit one,
4379 since such code would be undefined anyway.
4380 Exception: on sysvr4, using #pragma weak,
4381 a label can come out as 0. */
4382 else if (TREE_CODE (arg1) == INTEGER_CST
4383 && !integer_zerop (arg1)
4384 && TREE_CONSTANT (arg0)
4385 && TREE_CODE (arg0) == ADDR_EXPR
4387 t1 = build_int_2 (0, 0);
4389 /* Two real constants can be compared explicitly. */
4390 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4392 /* If either operand is a NaN, the result is false with two
4393 exceptions: First, an NE_EXPR is true on NaNs, but that case
4394 is already handled correctly since we will be inverting the
4395 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4396 or a GE_EXPR into a LT_EXPR, we must return true so that it
4397 will be inverted into false. */
4399 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4400 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4401 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4403 else if (code == EQ_EXPR)
4404 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4405 TREE_REAL_CST (arg1)),
4408 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4409 TREE_REAL_CST (arg1)),
4413 if (t1 == NULL_TREE)
4417 TREE_INT_CST_LOW (t1) ^= 1;
4419 TREE_TYPE (t1) = type;
4423 if (TREE_CODE (arg0) == INTEGER_CST)
4424 return TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1));
4425 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4426 return omit_one_operand (type, arg1, arg0);
4428 /* If the second operand is zero, invert the comparison and swap
4429 the second and third operands. Likewise if the second operand
4430 is constant and the third is not or if the third operand is
4431 equivalent to the first operand of the comparison. */
4433 if (integer_zerop (arg1)
4434 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4435 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4436 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4437 TREE_OPERAND (t, 2),
4438 TREE_OPERAND (arg0, 1))))
4440 /* See if this can be inverted. If it can't, possibly because
4441 it was a floating-point inequality comparison, don't do
4443 tem = invert_truthvalue (arg0);
4445 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4447 arg0 = TREE_OPERAND (t, 0) = tem;
4448 TREE_OPERAND (t, 1) = TREE_OPERAND (t, 2);
4449 TREE_OPERAND (t, 2) = arg1;
4450 arg1 = TREE_OPERAND (t, 1);
4454 /* If we have A op B ? A : C, we may be able to convert this to a
4455 simpler expression, depending on the operation and the values
4456 of B and C. IEEE floating point prevents this though,
4457 because A or B might be -0.0 or a NaN. */
4459 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4460 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4461 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0))))
4462 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4463 arg1, TREE_OPERAND (arg0, 1)))
4465 tree arg2 = TREE_OPERAND (t, 2);
4466 enum tree_code comp_code = TREE_CODE (arg0);
4468 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4469 depending on the comparison operation. */
4470 if (integer_zerop (TREE_OPERAND (arg0, 1))
4471 && TREE_CODE (arg2) == NEGATE_EXPR
4472 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4476 return fold (build1 (NEGATE_EXPR, type, arg1));
4478 return convert (type, arg1);
4481 return fold (build1 (ABS_EXPR, type, arg1));
4484 return fold (build1 (NEGATE_EXPR, type,
4485 fold (build1 (ABS_EXPR, type, arg1))));
4488 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4491 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4493 if (comp_code == NE_EXPR)
4494 return convert (type, arg1);
4495 else if (comp_code == EQ_EXPR)
4496 return convert (type, integer_zero_node);
4499 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4500 or max (A, B), depending on the operation. */
4502 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4503 arg2, TREE_OPERAND (arg0, 0)))
4507 return convert (type, arg2);
4509 return convert (type, arg1);
4512 return fold (build (MIN_EXPR, type, arg1, arg2));
4515 return fold (build (MAX_EXPR, type, arg1, arg2));
4518 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4519 we might still be able to simplify this. For example,
4520 if C1 is one less or one more than C2, this might have started
4521 out as a MIN or MAX and been transformed by this function.
4522 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4524 if (INTEGRAL_TYPE_P (type)
4525 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4526 && TREE_CODE (arg2) == INTEGER_CST)
4530 /* We can replace A with C1 in this case. */
4531 arg1 = TREE_OPERAND (t, 1)
4532 = convert (type, TREE_OPERAND (arg0, 1));
4536 /* If C1 is C2 + 1, this is min(A, C2). */
4537 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4538 && operand_equal_p (TREE_OPERAND (arg0, 1),
4539 const_binop (PLUS_EXPR, arg2,
4540 integer_one_node, 0), 1))
4541 return fold (build (MIN_EXPR, type, arg1, arg2));
4545 /* If C1 is C2 - 1, this is min(A, C2). */
4546 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4547 && operand_equal_p (TREE_OPERAND (arg0, 1),
4548 const_binop (MINUS_EXPR, arg2,
4549 integer_one_node, 0), 1))
4550 return fold (build (MIN_EXPR, type, arg1, arg2));
4554 /* If C1 is C2 - 1, this is max(A, C2). */
4555 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4556 && operand_equal_p (TREE_OPERAND (arg0, 1),
4557 const_binop (MINUS_EXPR, arg2,
4558 integer_one_node, 0), 1))
4559 return fold (build (MAX_EXPR, type, arg1, arg2));
4563 /* If C1 is C2 + 1, this is max(A, C2). */
4564 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4565 && operand_equal_p (TREE_OPERAND (arg0, 1),
4566 const_binop (PLUS_EXPR, arg2,
4567 integer_one_node, 0), 1))
4568 return fold (build (MAX_EXPR, type, arg1, arg2));
4573 /* Convert A ? 1 : 0 to simply A. */
4574 if (integer_onep (TREE_OPERAND (t, 1))
4575 && integer_zerop (TREE_OPERAND (t, 2))
4576 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4577 call to fold will try to move the conversion inside
4578 a COND, which will recurse. In that case, the COND_EXPR
4579 is probably the best choice, so leave it alone. */
4580 && type == TREE_TYPE (arg0))
4584 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
4585 operation is simply A & 2. */
4587 if (integer_zerop (TREE_OPERAND (t, 2))
4588 && TREE_CODE (arg0) == NE_EXPR
4589 && integer_zerop (TREE_OPERAND (arg0, 1))
4590 && integer_pow2p (arg1)
4591 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
4592 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
4594 return convert (type, TREE_OPERAND (arg0, 0));
4599 /* When pedantic, a compound expression can be neither an lvalue
4600 nor an integer constant expression. */
4601 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
4603 /* Don't let (0, 0) be null pointer constant. */
4604 if (integer_zerop (arg1))
4605 return non_lvalue (arg1);
4610 return build_complex (arg0, arg1);
4614 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4616 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4617 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
4618 TREE_OPERAND (arg0, 1));
4619 else if (TREE_CODE (arg0) == COMPLEX_CST)
4620 return TREE_REALPART (arg0);
4621 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4622 return fold (build (TREE_CODE (arg0), type,
4623 fold (build1 (REALPART_EXPR, type,
4624 TREE_OPERAND (arg0, 0))),
4625 fold (build1 (REALPART_EXPR,
4626 type, TREE_OPERAND (arg0, 1)))));
4630 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
4631 return convert (type, integer_zero_node);
4632 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
4633 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
4634 TREE_OPERAND (arg0, 0));
4635 else if (TREE_CODE (arg0) == COMPLEX_CST)
4636 return TREE_IMAGPART (arg0);
4637 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
4638 return fold (build (TREE_CODE (arg0), type,
4639 fold (build1 (IMAGPART_EXPR, type,
4640 TREE_OPERAND (arg0, 0))),
4641 fold (build1 (IMAGPART_EXPR, type,
4642 TREE_OPERAND (arg0, 1)))));
4647 } /* switch (code) */