1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 88, 92, 93, 94, 1995 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
20 /*@@ This file should be rewritten to use an arbitrary precision
21 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
22 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
23 @@ The routines that translate from the ap rep should
24 @@ warn if precision et. al. is lost.
25 @@ This would also make life easier when this technology is used
26 @@ for cross-compilers. */
29 /* The entry points in this file are fold, size_int and size_binop.
31 fold takes a tree as argument and returns a simplified tree.
33 size_binop takes a tree code for an arithmetic operation
34 and two operands that are trees, and produces a tree for the
35 result, assuming the type comes from `sizetype'.
37 size_int takes an integer value, and creates a tree constant
38 with type from `sizetype'. */
46 /* Handle floating overflow for `const_binop'. */
47 static jmp_buf float_error;
49 static void encode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT, HOST_WIDE_INT));
50 static void decode PROTO((HOST_WIDE_INT *, HOST_WIDE_INT *, HOST_WIDE_INT *));
51 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
52 HOST_WIDE_INT, HOST_WIDE_INT,
53 HOST_WIDE_INT, HOST_WIDE_INT *,
54 HOST_WIDE_INT *, HOST_WIDE_INT *,
56 static int split_tree PROTO((tree, enum tree_code, tree *, tree *, int *));
57 static tree const_binop PROTO((enum tree_code, tree, tree, int));
58 static tree fold_convert PROTO((tree, tree));
59 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
60 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
61 static int truth_value_p PROTO((enum tree_code));
62 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
63 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
64 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
65 static tree omit_one_operand PROTO((tree, tree, tree));
66 static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
67 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
68 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
69 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
71 static tree decode_field_reference PROTO((tree, int *, int *,
72 enum machine_mode *, int *,
74 static int all_ones_mask_p PROTO((tree, int));
75 static int simple_operand_p PROTO((tree));
76 static tree range_test PROTO((enum tree_code, tree, enum tree_code,
77 enum tree_code, tree, tree, tree));
78 static tree unextend PROTO((tree, int, int));
79 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
80 static tree strip_compound_expr PROTO((tree, tree));
86 /* Yield nonzero if a signed left shift of A by B bits overflows. */
87 #define left_shift_overflows(a, b) ((a) != ((a) << (b)) >> (b))
89 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
90 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
91 Then this yields nonzero if overflow occurred during the addition.
92 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
93 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
94 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
96 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
97 We do that by representing the two-word integer in 4 words, with only
98 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
101 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
102 #define HIGHPART(x) \
103 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
104 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
106 /* Unpack a two-word integer into 4 words.
107 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
108 WORDS points to the array of HOST_WIDE_INTs. */
111 encode (words, low, hi)
112 HOST_WIDE_INT *words;
113 HOST_WIDE_INT low, hi;
115 words[0] = LOWPART (low);
116 words[1] = HIGHPART (low);
117 words[2] = LOWPART (hi);
118 words[3] = HIGHPART (hi);
121 /* Pack an array of 4 words into a two-word integer.
122 WORDS points to the array of words.
123 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
126 decode (words, low, hi)
127 HOST_WIDE_INT *words;
128 HOST_WIDE_INT *low, *hi;
130 *low = words[0] | words[1] * BASE;
131 *hi = words[2] | words[3] * BASE;
134 /* Make the integer constant T valid for its type
135 by setting to 0 or 1 all the bits in the constant
136 that don't belong in the type.
137 Yield 1 if a signed overflow occurs, 0 otherwise.
138 If OVERFLOW is nonzero, a signed overflow has already occurred
139 in calculating T, so propagate it.
141 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
145 force_fit_type (t, overflow)
149 HOST_WIDE_INT low, high;
152 if (TREE_CODE (t) == REAL_CST)
154 #ifdef CHECK_FLOAT_VALUE
155 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
161 else if (TREE_CODE (t) != INTEGER_CST)
164 low = TREE_INT_CST_LOW (t);
165 high = TREE_INT_CST_HIGH (t);
167 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
170 prec = TYPE_PRECISION (TREE_TYPE (t));
172 /* First clear all bits that are beyond the type's precision. */
174 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
176 else if (prec > HOST_BITS_PER_WIDE_INT)
178 TREE_INT_CST_HIGH (t)
179 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
183 TREE_INT_CST_HIGH (t) = 0;
184 if (prec < HOST_BITS_PER_WIDE_INT)
185 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
188 /* Unsigned types do not suffer sign extension or overflow. */
189 if (TREE_UNSIGNED (TREE_TYPE (t)))
192 /* If the value's sign bit is set, extend the sign. */
193 if (prec != 2 * HOST_BITS_PER_WIDE_INT
194 && (prec > HOST_BITS_PER_WIDE_INT
195 ? (TREE_INT_CST_HIGH (t)
196 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
197 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
199 /* Value is negative:
200 set to 1 all the bits that are outside this type's precision. */
201 if (prec > HOST_BITS_PER_WIDE_INT)
203 TREE_INT_CST_HIGH (t)
204 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
208 TREE_INT_CST_HIGH (t) = -1;
209 if (prec < HOST_BITS_PER_WIDE_INT)
210 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
214 /* Yield nonzero if signed overflow occurred. */
216 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
220 /* Add two doubleword integers with doubleword result.
221 Each argument is given as two `HOST_WIDE_INT' pieces.
222 One argument is L1 and H1; the other, L2 and H2.
223 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
226 add_double (l1, h1, l2, h2, lv, hv)
227 HOST_WIDE_INT l1, h1, l2, h2;
228 HOST_WIDE_INT *lv, *hv;
233 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
237 return overflow_sum_sign (h1, h2, h);
240 /* Negate a doubleword integer with doubleword result.
241 Return nonzero if the operation overflows, assuming it's signed.
242 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
243 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
246 neg_double (l1, h1, lv, hv)
247 HOST_WIDE_INT l1, h1;
248 HOST_WIDE_INT *lv, *hv;
254 return (*hv & h1) < 0;
264 /* Multiply two doubleword integers with doubleword result.
265 Return nonzero if the operation overflows, assuming it's signed.
266 Each argument is given as two `HOST_WIDE_INT' pieces.
267 One argument is L1 and H1; the other, L2 and H2.
268 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
271 mul_double (l1, h1, l2, h2, lv, hv)
272 HOST_WIDE_INT l1, h1, l2, h2;
273 HOST_WIDE_INT *lv, *hv;
275 HOST_WIDE_INT arg1[4];
276 HOST_WIDE_INT arg2[4];
277 HOST_WIDE_INT prod[4 * 2];
278 register unsigned HOST_WIDE_INT carry;
279 register int i, j, k;
280 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
282 encode (arg1, l1, h1);
283 encode (arg2, l2, h2);
285 bzero ((char *) prod, sizeof prod);
287 for (i = 0; i < 4; i++)
290 for (j = 0; j < 4; j++)
293 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
294 carry += arg1[i] * arg2[j];
295 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
297 prod[k] = LOWPART (carry);
298 carry = HIGHPART (carry);
303 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
305 /* Check for overflow by calculating the top half of the answer in full;
306 it should agree with the low half's sign bit. */
307 decode (prod+4, &toplow, &tophigh);
310 neg_double (l2, h2, &neglow, &neghigh);
311 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
315 neg_double (l1, h1, &neglow, &neghigh);
316 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
318 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
321 /* Shift the doubleword integer in L1, H1 left by COUNT places
322 keeping only PREC bits of result.
323 Shift right if COUNT is negative.
324 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
325 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
328 lshift_double (l1, h1, count, prec, lv, hv, arith)
329 HOST_WIDE_INT l1, h1, count;
331 HOST_WIDE_INT *lv, *hv;
336 rshift_double (l1, h1, - count, prec, lv, hv, arith);
340 #ifdef SHIFT_COUNT_TRUNCATED
341 if (SHIFT_COUNT_TRUNCATED)
345 if (count >= HOST_BITS_PER_WIDE_INT)
347 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
352 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
353 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
354 *lv = (unsigned HOST_WIDE_INT) l1 << count;
358 /* Shift the doubleword integer in L1, H1 right by COUNT places
359 keeping only PREC bits of result. COUNT must be positive.
360 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
361 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
364 rshift_double (l1, h1, count, prec, lv, hv, arith)
365 HOST_WIDE_INT l1, h1, count;
367 HOST_WIDE_INT *lv, *hv;
370 unsigned HOST_WIDE_INT signmask;
372 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
375 #ifdef SHIFT_COUNT_TRUNCATED
376 if (SHIFT_COUNT_TRUNCATED)
380 if (count >= HOST_BITS_PER_WIDE_INT)
383 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
384 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
388 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
389 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
390 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
391 | ((unsigned HOST_WIDE_INT) h1 >> count));
395 /* Rotate the doubleword integer in L1, H1 left by COUNT places
396 keeping only PREC bits of result.
397 Rotate right if COUNT is negative.
398 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
401 lrotate_double (l1, h1, count, prec, lv, hv)
402 HOST_WIDE_INT l1, h1, count;
404 HOST_WIDE_INT *lv, *hv;
406 HOST_WIDE_INT s1l, s1h, s2l, s2h;
412 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
413 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
418 /* Rotate the doubleword integer in L1, H1 left by COUNT places
419 keeping only PREC bits of result. COUNT must be positive.
420 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
423 rrotate_double (l1, h1, count, prec, lv, hv)
424 HOST_WIDE_INT l1, h1, count;
426 HOST_WIDE_INT *lv, *hv;
428 HOST_WIDE_INT s1l, s1h, s2l, s2h;
434 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
435 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
440 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
441 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
442 CODE is a tree code for a kind of division, one of
443 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
445 It controls how the quotient is rounded to a integer.
446 Return nonzero if the operation overflows.
447 UNS nonzero says do unsigned division. */
450 div_and_round_double (code, uns,
451 lnum_orig, hnum_orig, lden_orig, hden_orig,
452 lquo, hquo, lrem, hrem)
455 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
456 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
457 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
460 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
461 HOST_WIDE_INT den[4], quo[4];
463 unsigned HOST_WIDE_INT work;
464 register int carry = 0;
465 HOST_WIDE_INT lnum = lnum_orig;
466 HOST_WIDE_INT hnum = hnum_orig;
467 HOST_WIDE_INT lden = lden_orig;
468 HOST_WIDE_INT hden = hden_orig;
471 if ((hden == 0) && (lden == 0))
474 /* calculate quotient sign and convert operands to unsigned. */
480 /* (minimum integer) / (-1) is the only overflow case. */
481 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
487 neg_double (lden, hden, &lden, &hden);
491 if (hnum == 0 && hden == 0)
492 { /* single precision */
494 /* This unsigned division rounds toward zero. */
495 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
500 { /* trivial case: dividend < divisor */
501 /* hden != 0 already checked. */
508 bzero ((char *) quo, sizeof quo);
510 bzero ((char *) num, sizeof num); /* to zero 9th element */
511 bzero ((char *) den, sizeof den);
513 encode (num, lnum, hnum);
514 encode (den, lden, hden);
516 /* Special code for when the divisor < BASE. */
517 if (hden == 0 && lden < BASE)
519 /* hnum != 0 already checked. */
520 for (i = 4 - 1; i >= 0; i--)
522 work = num[i] + carry * BASE;
523 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
524 carry = work % (unsigned HOST_WIDE_INT) lden;
529 /* Full double precision division,
530 with thanks to Don Knuth's "Seminumerical Algorithms". */
531 int quo_est, scale, num_hi_sig, den_hi_sig;
533 /* Find the highest non-zero divisor digit. */
534 for (i = 4 - 1; ; i--)
540 /* Insure that the first digit of the divisor is at least BASE/2.
541 This is required by the quotient digit estimation algorithm. */
543 scale = BASE / (den[den_hi_sig] + 1);
544 if (scale > 1) { /* scale divisor and dividend */
546 for (i = 0; i <= 4 - 1; i++) {
547 work = (num[i] * scale) + carry;
548 num[i] = LOWPART (work);
549 carry = HIGHPART (work);
552 for (i = 0; i <= 4 - 1; i++) {
553 work = (den[i] * scale) + carry;
554 den[i] = LOWPART (work);
555 carry = HIGHPART (work);
556 if (den[i] != 0) den_hi_sig = i;
563 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
564 /* guess the next quotient digit, quo_est, by dividing the first
565 two remaining dividend digits by the high order quotient digit.
566 quo_est is never low and is at most 2 high. */
567 unsigned HOST_WIDE_INT tmp;
569 num_hi_sig = i + den_hi_sig + 1;
570 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
571 if (num[num_hi_sig] != den[den_hi_sig])
572 quo_est = work / den[den_hi_sig];
576 /* refine quo_est so it's usually correct, and at most one high. */
577 tmp = work - quo_est * den[den_hi_sig];
579 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
582 /* Try QUO_EST as the quotient digit, by multiplying the
583 divisor by QUO_EST and subtracting from the remaining dividend.
584 Keep in mind that QUO_EST is the I - 1st digit. */
587 for (j = 0; j <= den_hi_sig; j++)
589 work = quo_est * den[j] + carry;
590 carry = HIGHPART (work);
591 work = num[i + j] - LOWPART (work);
592 num[i + j] = LOWPART (work);
593 carry += HIGHPART (work) != 0;
596 /* if quo_est was high by one, then num[i] went negative and
597 we need to correct things. */
599 if (num[num_hi_sig] < carry)
602 carry = 0; /* add divisor back in */
603 for (j = 0; j <= den_hi_sig; j++)
605 work = num[i + j] + den[j] + carry;
606 carry = HIGHPART (work);
607 num[i + j] = LOWPART (work);
609 num [num_hi_sig] += carry;
612 /* store the quotient digit. */
617 decode (quo, lquo, hquo);
620 /* if result is negative, make it so. */
622 neg_double (*lquo, *hquo, lquo, hquo);
624 /* compute trial remainder: rem = num - (quo * den) */
625 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
626 neg_double (*lrem, *hrem, lrem, hrem);
627 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
632 case TRUNC_MOD_EXPR: /* round toward zero */
633 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
637 case FLOOR_MOD_EXPR: /* round toward negative infinity */
638 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
641 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
644 else return overflow;
648 case CEIL_MOD_EXPR: /* round toward positive infinity */
649 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
651 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
654 else return overflow;
658 case ROUND_MOD_EXPR: /* round to closest integer */
660 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
661 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
663 /* get absolute values */
664 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
665 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
667 /* if (2 * abs (lrem) >= abs (lden)) */
668 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
669 labs_rem, habs_rem, <wice, &htwice);
670 if (((unsigned HOST_WIDE_INT) habs_den
671 < (unsigned HOST_WIDE_INT) htwice)
672 || (((unsigned HOST_WIDE_INT) habs_den
673 == (unsigned HOST_WIDE_INT) htwice)
674 && ((HOST_WIDE_INT unsigned) labs_den
675 < (unsigned HOST_WIDE_INT) ltwice)))
679 add_double (*lquo, *hquo,
680 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
683 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
686 else return overflow;
694 /* compute true remainder: rem = num - (quo * den) */
695 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
696 neg_double (*lrem, *hrem, lrem, hrem);
697 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
701 #ifndef REAL_ARITHMETIC
702 /* Effectively truncate a real value to represent the nearest possible value
703 in a narrower mode. The result is actually represented in the same data
704 type as the argument, but its value is usually different.
706 A trap may occur during the FP operations and it is the responsibility
707 of the calling function to have a handler established. */
710 real_value_truncate (mode, arg)
711 enum machine_mode mode;
714 return REAL_VALUE_TRUNCATE (mode, arg);
717 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
719 /* Check for infinity in an IEEE double precision number. */
725 /* The IEEE 64-bit double format. */
730 unsigned exponent : 11;
731 unsigned mantissa1 : 20;
736 unsigned mantissa1 : 20;
737 unsigned exponent : 11;
743 if (u.big_endian.sign == 1)
746 return (u.big_endian.exponent == 2047
747 && u.big_endian.mantissa1 == 0
748 && u.big_endian.mantissa2 == 0);
753 return (u.little_endian.exponent == 2047
754 && u.little_endian.mantissa1 == 0
755 && u.little_endian.mantissa2 == 0);
759 /* Check whether an IEEE double precision number is a NaN. */
765 /* The IEEE 64-bit double format. */
770 unsigned exponent : 11;
771 unsigned mantissa1 : 20;
776 unsigned mantissa1 : 20;
777 unsigned exponent : 11;
783 if (u.big_endian.sign == 1)
786 return (u.big_endian.exponent == 2047
787 && (u.big_endian.mantissa1 != 0
788 || u.big_endian.mantissa2 != 0));
793 return (u.little_endian.exponent == 2047
794 && (u.little_endian.mantissa1 != 0
795 || u.little_endian.mantissa2 != 0));
799 /* Check for a negative IEEE double precision number. */
805 /* The IEEE 64-bit double format. */
810 unsigned exponent : 11;
811 unsigned mantissa1 : 20;
816 unsigned mantissa1 : 20;
817 unsigned exponent : 11;
823 if (u.big_endian.sign == 1)
826 return u.big_endian.sign;
831 return u.little_endian.sign;
834 #else /* Target not IEEE */
836 /* Let's assume other float formats don't have infinity.
837 (This can be overridden by redefining REAL_VALUE_ISINF.) */
845 /* Let's assume other float formats don't have NaNs.
846 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
854 /* Let's assume other float formats don't have minus zero.
855 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
862 #endif /* Target not IEEE */
863 #endif /* no REAL_ARITHMETIC */
865 /* Split a tree IN into a constant and a variable part
866 that could be combined with CODE to make IN.
867 CODE must be a commutative arithmetic operation.
868 Store the constant part into *CONP and the variable in &VARP.
869 Return 1 if this was done; zero means the tree IN did not decompose
872 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
873 Therefore, we must tell the caller whether the variable part
874 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
875 The value stored is the coefficient for the variable term.
876 The constant term we return should always be added;
877 we negate it if necessary. */
880 split_tree (in, code, varp, conp, varsignp)
886 register tree outtype = TREE_TYPE (in);
890 /* Strip any conversions that don't change the machine mode. */
891 while ((TREE_CODE (in) == NOP_EXPR
892 || TREE_CODE (in) == CONVERT_EXPR)
893 && (TYPE_MODE (TREE_TYPE (in))
894 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
895 in = TREE_OPERAND (in, 0);
897 if (TREE_CODE (in) == code
898 || (! FLOAT_TYPE_P (TREE_TYPE (in))
899 /* We can associate addition and subtraction together
900 (even though the C standard doesn't say so)
901 for integers because the value is not affected.
902 For reals, the value might be affected, so we can't. */
903 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
904 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
906 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
907 if (code == INTEGER_CST)
909 *conp = TREE_OPERAND (in, 0);
910 *varp = TREE_OPERAND (in, 1);
911 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
912 && TREE_TYPE (*varp) != outtype)
913 *varp = convert (outtype, *varp);
914 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
917 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
919 *conp = TREE_OPERAND (in, 1);
920 *varp = TREE_OPERAND (in, 0);
922 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
923 && TREE_TYPE (*varp) != outtype)
924 *varp = convert (outtype, *varp);
925 if (TREE_CODE (in) == MINUS_EXPR)
927 /* If operation is subtraction and constant is second,
928 must negate it to get an additive constant.
929 And this cannot be done unless it is a manifest constant.
930 It could also be the address of a static variable.
931 We cannot negate that, so give up. */
932 if (TREE_CODE (*conp) == INTEGER_CST)
933 /* Subtracting from integer_zero_node loses for long long. */
934 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
940 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
942 *conp = TREE_OPERAND (in, 0);
943 *varp = TREE_OPERAND (in, 1);
944 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
945 && TREE_TYPE (*varp) != outtype)
946 *varp = convert (outtype, *varp);
947 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
954 /* Combine two constants NUM and ARG2 under operation CODE
955 to produce a new constant.
956 We assume ARG1 and ARG2 have the same data type,
957 or at least are the same kind of constant and the same machine mode.
959 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
962 const_binop (code, arg1, arg2, notrunc)
964 register tree arg1, arg2;
967 if (TREE_CODE (arg1) == INTEGER_CST)
969 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
970 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
971 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
972 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
973 HOST_WIDE_INT low, hi;
974 HOST_WIDE_INT garbagel, garbageh;
976 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
982 t = build_int_2 (int1l | int2l, int1h | int2h);
986 t = build_int_2 (int1l ^ int2l, int1h ^ int2h);
990 t = build_int_2 (int1l & int2l, int1h & int2h);
994 t = build_int_2 (int1l & ~int2l, int1h & ~int2h);
1000 /* It's unclear from the C standard whether shifts can overflow.
1001 The following code ignores overflow; perhaps a C standard
1002 interpretation ruling is needed. */
1003 lshift_double (int1l, int1h, int2l,
1004 TYPE_PRECISION (TREE_TYPE (arg1)),
1007 t = build_int_2 (low, hi);
1008 TREE_TYPE (t) = TREE_TYPE (arg1);
1010 force_fit_type (t, 0);
1011 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1012 TREE_CONSTANT_OVERFLOW (t)
1013 = TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2);
1019 lrotate_double (int1l, int1h, int2l,
1020 TYPE_PRECISION (TREE_TYPE (arg1)),
1022 t = build_int_2 (low, hi);
1029 if ((unsigned HOST_WIDE_INT) int2l < int1l)
1032 overflow = int2h < hi;
1034 t = build_int_2 (int2l, int2h);
1040 if ((unsigned HOST_WIDE_INT) int1l < int2l)
1043 overflow = int1h < hi;
1045 t = build_int_2 (int1l, int1h);
1048 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1049 t = build_int_2 (low, hi);
1053 if (int2h == 0 && int2l == 0)
1055 t = build_int_2 (int1l, int1h);
1058 neg_double (int2l, int2h, &low, &hi);
1059 add_double (int1l, int1h, low, hi, &low, &hi);
1060 overflow = overflow_sum_sign (hi, int2h, int1h);
1061 t = build_int_2 (low, hi);
1065 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1066 t = build_int_2 (low, hi);
1069 case TRUNC_DIV_EXPR:
1070 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1071 case EXACT_DIV_EXPR:
1072 /* This is a shortcut for a common special case.
1073 It reduces the number of tree nodes generated
1075 if (int2h == 0 && int2l > 0
1076 && TREE_TYPE (arg1) == sizetype
1077 && int1h == 0 && int1l >= 0)
1079 if (code == CEIL_DIV_EXPR)
1081 return size_int (int1l / int2l);
1083 case ROUND_DIV_EXPR:
1084 if (int2h == 0 && int2l == 1)
1086 t = build_int_2 (int1l, int1h);
1089 if (int1l == int2l && int1h == int2h)
1091 if ((int1l | int1h) == 0)
1093 t = build_int_2 (1, 0);
1096 overflow = div_and_round_double (code, uns,
1097 int1l, int1h, int2l, int2h,
1098 &low, &hi, &garbagel, &garbageh);
1099 t = build_int_2 (low, hi);
1102 case TRUNC_MOD_EXPR: case ROUND_MOD_EXPR:
1103 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1104 overflow = div_and_round_double (code, uns,
1105 int1l, int1h, int2l, int2h,
1106 &garbagel, &garbageh, &low, &hi);
1107 t = build_int_2 (low, hi);
1114 low = (((unsigned HOST_WIDE_INT) int1h
1115 < (unsigned HOST_WIDE_INT) int2h)
1116 || (((unsigned HOST_WIDE_INT) int1h
1117 == (unsigned HOST_WIDE_INT) int2h)
1118 && ((unsigned HOST_WIDE_INT) int1l
1119 < (unsigned HOST_WIDE_INT) int2l)));
1123 low = ((int1h < int2h)
1124 || ((int1h == int2h)
1125 && ((unsigned HOST_WIDE_INT) int1l
1126 < (unsigned HOST_WIDE_INT) int2l)));
1128 if (low == (code == MIN_EXPR))
1129 t = build_int_2 (int1l, int1h);
1131 t = build_int_2 (int2l, int2h);
1138 TREE_TYPE (t) = TREE_TYPE (arg1);
1140 = ((notrunc ? !uns && overflow : force_fit_type (t, overflow && !uns))
1141 | TREE_OVERFLOW (arg1)
1142 | TREE_OVERFLOW (arg2));
1143 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1144 | TREE_CONSTANT_OVERFLOW (arg1)
1145 | TREE_CONSTANT_OVERFLOW (arg2));
1148 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1149 if (TREE_CODE (arg1) == REAL_CST)
1154 REAL_VALUE_TYPE value;
1157 d1 = TREE_REAL_CST (arg1);
1158 d2 = TREE_REAL_CST (arg2);
1160 /* If either operand is a NaN, just return it. Otherwise, set up
1161 for floating-point trap; we return an overflow. */
1162 if (REAL_VALUE_ISNAN (d1))
1164 else if (REAL_VALUE_ISNAN (d2))
1166 else if (setjmp (float_error))
1168 t = copy_node (arg1);
1173 set_float_handler (float_error);
1175 #ifdef REAL_ARITHMETIC
1176 REAL_ARITHMETIC (value, code, d1, d2);
1193 #ifndef REAL_INFINITY
1202 value = MIN (d1, d2);
1206 value = MAX (d1, d2);
1212 #endif /* no REAL_ARITHMETIC */
1213 t = build_real (TREE_TYPE (arg1),
1214 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1216 set_float_handler (NULL_PTR);
1219 = (force_fit_type (t, overflow)
1220 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1221 TREE_CONSTANT_OVERFLOW (t)
1223 | TREE_CONSTANT_OVERFLOW (arg1)
1224 | TREE_CONSTANT_OVERFLOW (arg2);
1227 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1228 if (TREE_CODE (arg1) == COMPLEX_CST)
1230 register tree r1 = TREE_REALPART (arg1);
1231 register tree i1 = TREE_IMAGPART (arg1);
1232 register tree r2 = TREE_REALPART (arg2);
1233 register tree i2 = TREE_IMAGPART (arg2);
1239 t = build_complex (const_binop (PLUS_EXPR, r1, r2, notrunc),
1240 const_binop (PLUS_EXPR, i1, i2, notrunc));
1244 t = build_complex (const_binop (MINUS_EXPR, r1, r2, notrunc),
1245 const_binop (MINUS_EXPR, i1, i2, notrunc));
1249 t = build_complex (const_binop (MINUS_EXPR,
1250 const_binop (MULT_EXPR,
1252 const_binop (MULT_EXPR,
1255 const_binop (PLUS_EXPR,
1256 const_binop (MULT_EXPR,
1258 const_binop (MULT_EXPR,
1265 register tree magsquared
1266 = const_binop (PLUS_EXPR,
1267 const_binop (MULT_EXPR, r2, r2, notrunc),
1268 const_binop (MULT_EXPR, i2, i2, notrunc),
1272 (const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1273 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1274 const_binop (PLUS_EXPR,
1275 const_binop (MULT_EXPR, r1, r2,
1277 const_binop (MULT_EXPR, i1, i2,
1280 magsquared, notrunc),
1281 const_binop (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1282 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1283 const_binop (MINUS_EXPR,
1284 const_binop (MULT_EXPR, i1, r2,
1286 const_binop (MULT_EXPR, r1, i2,
1289 magsquared, notrunc));
1296 TREE_TYPE (t) = TREE_TYPE (arg1);
1302 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1306 unsigned HOST_WIDE_INT number;
1309 /* Type-size nodes already made for small sizes. */
1310 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1312 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1313 && size_table[number] != 0)
1314 return size_table[number];
1315 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1317 push_obstacks_nochange ();
1318 /* Make this a permanent node. */
1319 end_temporary_allocation ();
1320 t = build_int_2 (number, 0);
1321 TREE_TYPE (t) = sizetype;
1322 size_table[number] = t;
1327 t = build_int_2 (number, 0);
1328 TREE_TYPE (t) = sizetype;
1333 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1334 CODE is a tree code. Data type is taken from `sizetype',
1335 If the operands are constant, so is the result. */
1338 size_binop (code, arg0, arg1)
1339 enum tree_code code;
1342 /* Handle the special case of two integer constants faster. */
1343 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1345 /* And some specific cases even faster than that. */
1346 if (code == PLUS_EXPR
1347 && TREE_INT_CST_LOW (arg0) == 0
1348 && TREE_INT_CST_HIGH (arg0) == 0)
1350 if (code == MINUS_EXPR
1351 && TREE_INT_CST_LOW (arg1) == 0
1352 && TREE_INT_CST_HIGH (arg1) == 0)
1354 if (code == MULT_EXPR
1355 && TREE_INT_CST_LOW (arg0) == 1
1356 && TREE_INT_CST_HIGH (arg0) == 0)
1358 /* Handle general case of two integer constants. */
1359 return const_binop (code, arg0, arg1, 1);
1362 if (arg0 == error_mark_node || arg1 == error_mark_node)
1363 return error_mark_node;
1365 return fold (build (code, sizetype, arg0, arg1));
1368 /* Given T, a tree representing type conversion of ARG1, a constant,
1369 return a constant tree representing the result of conversion. */
1372 fold_convert (t, arg1)
1376 register tree type = TREE_TYPE (t);
1379 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1381 if (TREE_CODE (arg1) == INTEGER_CST)
1383 /* If we would build a constant wider than GCC supports,
1384 leave the conversion unfolded. */
1385 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1388 /* Given an integer constant, make new constant with new type,
1389 appropriately sign-extended or truncated. */
1390 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1391 TREE_INT_CST_HIGH (arg1));
1392 TREE_TYPE (t) = type;
1393 /* Indicate an overflow if (1) ARG1 already overflowed,
1394 or (2) force_fit_type indicates an overflow.
1395 Tell force_fit_type that an overflow has already occurred
1396 if ARG1 is a too-large unsigned value and T is signed. */
1398 = (TREE_OVERFLOW (arg1)
1399 | force_fit_type (t,
1400 (TREE_INT_CST_HIGH (arg1) < 0
1401 & (TREE_UNSIGNED (type)
1402 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1403 TREE_CONSTANT_OVERFLOW (t)
1404 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1406 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1407 else if (TREE_CODE (arg1) == REAL_CST)
1409 /* Don't initialize these, use assignments.
1410 Initialized local aggregates don't work on old compilers. */
1415 x = TREE_REAL_CST (arg1);
1416 l = real_value_from_int_cst (TYPE_MIN_VALUE (type));
1417 u = real_value_from_int_cst (TYPE_MAX_VALUE (type));
1418 /* See if X will be in range after truncation towards 0.
1419 To compensate for truncation, move the bounds away from 0,
1420 but reject if X exactly equals the adjusted bounds. */
1421 #ifdef REAL_ARITHMETIC
1422 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1423 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1428 /* If X is a NaN, use zero instead and show we have an overflow.
1429 Otherwise, range check. */
1430 if (REAL_VALUE_ISNAN (x))
1431 overflow = 1, x = dconst0;
1432 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1435 #ifndef REAL_ARITHMETIC
1437 HOST_WIDE_INT low, high;
1438 HOST_WIDE_INT half_word
1439 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1444 high = (HOST_WIDE_INT) (x / half_word / half_word);
1445 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1446 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1448 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1449 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1452 low = (HOST_WIDE_INT) x;
1453 if (TREE_REAL_CST (arg1) < 0)
1454 neg_double (low, high, &low, &high);
1455 t = build_int_2 (low, high);
1459 HOST_WIDE_INT low, high;
1460 REAL_VALUE_TO_INT (&low, &high, x);
1461 t = build_int_2 (low, high);
1464 TREE_TYPE (t) = type;
1466 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1467 TREE_CONSTANT_OVERFLOW (t)
1468 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1470 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1471 TREE_TYPE (t) = type;
1473 else if (TREE_CODE (type) == REAL_TYPE)
1475 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1476 if (TREE_CODE (arg1) == INTEGER_CST)
1477 return build_real_from_int_cst (type, arg1);
1478 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1479 if (TREE_CODE (arg1) == REAL_CST)
1481 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1483 else if (setjmp (float_error))
1486 t = copy_node (arg1);
1489 set_float_handler (float_error);
1491 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1492 TREE_REAL_CST (arg1)));
1493 set_float_handler (NULL_PTR);
1497 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1498 TREE_CONSTANT_OVERFLOW (t)
1499 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1503 TREE_CONSTANT (t) = 1;
1507 /* Return an expr equal to X but certainly not valid as an lvalue.
1508 Also make sure it is not valid as an null pointer constant. */
1516 /* These things are certainly not lvalues. */
1517 if (TREE_CODE (x) == NON_LVALUE_EXPR
1518 || TREE_CODE (x) == INTEGER_CST
1519 || TREE_CODE (x) == REAL_CST
1520 || TREE_CODE (x) == STRING_CST
1521 || TREE_CODE (x) == ADDR_EXPR)
1523 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1525 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1526 so convert_for_assignment won't strip it.
1527 This is so this 0 won't be treated as a null pointer constant. */
1528 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1529 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1535 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1536 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1540 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
1541 Zero means allow extended lvalues. */
1543 int pedantic_lvalues;
1545 /* When pedantic, return an expr equal to X but certainly not valid as a
1546 pedantic lvalue. Otherwise, return X. */
1549 pedantic_non_lvalue (x)
1552 if (pedantic_lvalues)
1553 return non_lvalue (x);
1558 /* Given a tree comparison code, return the code that is the logical inverse
1559 of the given code. It is not safe to do this for floating-point
1560 comparisons, except for NE_EXPR and EQ_EXPR. */
1562 static enum tree_code
1563 invert_tree_comparison (code)
1564 enum tree_code code;
1585 /* Similar, but return the comparison that results if the operands are
1586 swapped. This is safe for floating-point. */
1588 static enum tree_code
1589 swap_tree_comparison (code)
1590 enum tree_code code;
1610 /* Return nonzero if CODE is a tree code that represents a truth value. */
1613 truth_value_p (code)
1614 enum tree_code code;
1616 return (TREE_CODE_CLASS (code) == '<'
1617 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1618 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1619 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1622 /* Return nonzero if two operands are necessarily equal.
1623 If ONLY_CONST is non-zero, only return non-zero for constants.
1624 This function tests whether the operands are indistinguishable;
1625 it does not test whether they are equal using C's == operation.
1626 The distinction is important for IEEE floating point, because
1627 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1628 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1631 operand_equal_p (arg0, arg1, only_const)
1635 /* If both types don't have the same signedness, then we can't consider
1636 them equal. We must check this before the STRIP_NOPS calls
1637 because they may change the signedness of the arguments. */
1638 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1644 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1645 We don't care about side effects in that case because the SAVE_EXPR
1646 takes care of that for us. */
1647 if (TREE_CODE (arg0) == SAVE_EXPR && arg0 == arg1)
1648 return ! only_const;
1650 if (TREE_SIDE_EFFECTS (arg0) || TREE_SIDE_EFFECTS (arg1))
1653 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1654 && TREE_CODE (arg0) == ADDR_EXPR
1655 && TREE_OPERAND (arg0, 0) == TREE_OPERAND (arg1, 0))
1658 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1659 && TREE_CODE (arg0) == INTEGER_CST
1660 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1661 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1))
1664 /* Detect when real constants are equal. */
1665 if (TREE_CODE (arg0) == TREE_CODE (arg1)
1666 && TREE_CODE (arg0) == REAL_CST)
1667 return !bcmp ((char *) &TREE_REAL_CST (arg0),
1668 (char *) &TREE_REAL_CST (arg1),
1669 sizeof (REAL_VALUE_TYPE));
1677 if (TREE_CODE (arg0) != TREE_CODE (arg1))
1679 /* This is needed for conversions and for COMPONENT_REF.
1680 Might as well play it safe and always test this. */
1681 if (TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1684 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1687 /* Two conversions are equal only if signedness and modes match. */
1688 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1689 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1690 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1693 return operand_equal_p (TREE_OPERAND (arg0, 0),
1694 TREE_OPERAND (arg1, 0), 0);
1698 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1699 TREE_OPERAND (arg1, 0), 0)
1700 && operand_equal_p (TREE_OPERAND (arg0, 1),
1701 TREE_OPERAND (arg1, 1), 0));
1704 switch (TREE_CODE (arg0))
1707 return operand_equal_p (TREE_OPERAND (arg0, 0),
1708 TREE_OPERAND (arg1, 0), 0);
1712 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1713 TREE_OPERAND (arg1, 0), 0)
1714 && operand_equal_p (TREE_OPERAND (arg0, 1),
1715 TREE_OPERAND (arg1, 1), 0));
1718 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1719 TREE_OPERAND (arg1, 0), 0)
1720 && operand_equal_p (TREE_OPERAND (arg0, 1),
1721 TREE_OPERAND (arg1, 1), 0)
1722 && operand_equal_p (TREE_OPERAND (arg0, 2),
1723 TREE_OPERAND (arg1, 2), 0));
1731 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1732 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1734 When in doubt, return 0. */
1737 operand_equal_for_comparison_p (arg0, arg1, other)
1741 int unsignedp1, unsignedpo;
1742 tree primarg1, primother;
1743 unsigned correct_width;
1745 if (operand_equal_p (arg0, arg1, 0))
1748 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1749 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
1752 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1753 actual comparison operand, ARG0.
1755 First throw away any conversions to wider types
1756 already present in the operands. */
1758 primarg1 = get_narrower (arg1, &unsignedp1);
1759 primother = get_narrower (other, &unsignedpo);
1761 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1762 if (unsignedp1 == unsignedpo
1763 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1764 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1766 tree type = TREE_TYPE (arg0);
1768 /* Make sure shorter operand is extended the right way
1769 to match the longer operand. */
1770 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1771 TREE_TYPE (primarg1)),
1774 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1781 /* See if ARG is an expression that is either a comparison or is performing
1782 arithmetic on comparisons. The comparisons must only be comparing
1783 two different values, which will be stored in *CVAL1 and *CVAL2; if
1784 they are non-zero it means that some operands have already been found.
1785 No variables may be used anywhere else in the expression except in the
1786 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1787 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1789 If this is true, return 1. Otherwise, return zero. */
1792 twoval_comparison_p (arg, cval1, cval2, save_p)
1794 tree *cval1, *cval2;
1797 enum tree_code code = TREE_CODE (arg);
1798 char class = TREE_CODE_CLASS (code);
1800 /* We can handle some of the 'e' cases here. */
1801 if (class == 'e' && code == TRUTH_NOT_EXPR)
1803 else if (class == 'e'
1804 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1805 || code == COMPOUND_EXPR))
1808 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1809 the expression. There may be no way to make this work, but it needs
1810 to be looked at again for 2.6. */
1812 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1814 /* If we've already found a CVAL1 or CVAL2, this expression is
1815 two complex to handle. */
1816 if (*cval1 || *cval2)
1827 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1830 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1831 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1832 cval1, cval2, save_p));
1838 if (code == COND_EXPR)
1839 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1840 cval1, cval2, save_p)
1841 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1842 cval1, cval2, save_p)
1843 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1844 cval1, cval2, save_p));
1848 /* First see if we can handle the first operand, then the second. For
1849 the second operand, we know *CVAL1 can't be zero. It must be that
1850 one side of the comparison is each of the values; test for the
1851 case where this isn't true by failing if the two operands
1854 if (operand_equal_p (TREE_OPERAND (arg, 0),
1855 TREE_OPERAND (arg, 1), 0))
1859 *cval1 = TREE_OPERAND (arg, 0);
1860 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1862 else if (*cval2 == 0)
1863 *cval2 = TREE_OPERAND (arg, 0);
1864 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1869 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1871 else if (*cval2 == 0)
1872 *cval2 = TREE_OPERAND (arg, 1);
1873 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
1884 /* ARG is a tree that is known to contain just arithmetic operations and
1885 comparisons. Evaluate the operations in the tree substituting NEW0 for
1886 any occurrence of OLD0 as an operand of a comparison and likewise for
1890 eval_subst (arg, old0, new0, old1, new1)
1892 tree old0, new0, old1, new1;
1894 tree type = TREE_TYPE (arg);
1895 enum tree_code code = TREE_CODE (arg);
1896 char class = TREE_CODE_CLASS (code);
1898 /* We can handle some of the 'e' cases here. */
1899 if (class == 'e' && code == TRUTH_NOT_EXPR)
1901 else if (class == 'e'
1902 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
1908 return fold (build1 (code, type,
1909 eval_subst (TREE_OPERAND (arg, 0),
1910 old0, new0, old1, new1)));
1913 return fold (build (code, type,
1914 eval_subst (TREE_OPERAND (arg, 0),
1915 old0, new0, old1, new1),
1916 eval_subst (TREE_OPERAND (arg, 1),
1917 old0, new0, old1, new1)));
1923 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
1926 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
1929 return fold (build (code, type,
1930 eval_subst (TREE_OPERAND (arg, 0),
1931 old0, new0, old1, new1),
1932 eval_subst (TREE_OPERAND (arg, 1),
1933 old0, new0, old1, new1),
1934 eval_subst (TREE_OPERAND (arg, 2),
1935 old0, new0, old1, new1)));
1940 tree arg0 = TREE_OPERAND (arg, 0);
1941 tree arg1 = TREE_OPERAND (arg, 1);
1943 /* We need to check both for exact equality and tree equality. The
1944 former will be true if the operand has a side-effect. In that
1945 case, we know the operand occurred exactly once. */
1947 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
1949 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
1952 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
1954 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
1957 return fold (build (code, type, arg0, arg1));
1964 /* Return a tree for the case when the result of an expression is RESULT
1965 converted to TYPE and OMITTED was previously an operand of the expression
1966 but is now not needed (e.g., we folded OMITTED * 0).
1968 If OMITTED has side effects, we must evaluate it. Otherwise, just do
1969 the conversion of RESULT to TYPE. */
1972 omit_one_operand (type, result, omitted)
1973 tree type, result, omitted;
1975 tree t = convert (type, result);
1977 if (TREE_SIDE_EFFECTS (omitted))
1978 return build (COMPOUND_EXPR, type, omitted, t);
1980 return non_lvalue (t);
1983 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
1986 pedantic_omit_one_operand (type, result, omitted)
1987 tree type, result, omitted;
1989 tree t = convert (type, result);
1991 if (TREE_SIDE_EFFECTS (omitted))
1992 return build (COMPOUND_EXPR, type, omitted, t);
1994 return pedantic_non_lvalue (t);
1999 /* Return a simplified tree node for the truth-negation of ARG. This
2000 never alters ARG itself. We assume that ARG is an operation that
2001 returns a truth value (0 or 1). */
2004 invert_truthvalue (arg)
2007 tree type = TREE_TYPE (arg);
2008 enum tree_code code = TREE_CODE (arg);
2010 if (code == ERROR_MARK)
2013 /* If this is a comparison, we can simply invert it, except for
2014 floating-point non-equality comparisons, in which case we just
2015 enclose a TRUTH_NOT_EXPR around what we have. */
2017 if (TREE_CODE_CLASS (code) == '<')
2019 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2020 && code != NE_EXPR && code != EQ_EXPR)
2021 return build1 (TRUTH_NOT_EXPR, type, arg);
2023 return build (invert_tree_comparison (code), type,
2024 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2030 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2031 && TREE_INT_CST_HIGH (arg) == 0, 0));
2033 case TRUTH_AND_EXPR:
2034 return build (TRUTH_OR_EXPR, type,
2035 invert_truthvalue (TREE_OPERAND (arg, 0)),
2036 invert_truthvalue (TREE_OPERAND (arg, 1)));
2039 return build (TRUTH_AND_EXPR, type,
2040 invert_truthvalue (TREE_OPERAND (arg, 0)),
2041 invert_truthvalue (TREE_OPERAND (arg, 1)));
2043 case TRUTH_XOR_EXPR:
2044 /* Here we can invert either operand. We invert the first operand
2045 unless the second operand is a TRUTH_NOT_EXPR in which case our
2046 result is the XOR of the first operand with the inside of the
2047 negation of the second operand. */
2049 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2050 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2051 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2053 return build (TRUTH_XOR_EXPR, type,
2054 invert_truthvalue (TREE_OPERAND (arg, 0)),
2055 TREE_OPERAND (arg, 1));
2057 case TRUTH_ANDIF_EXPR:
2058 return build (TRUTH_ORIF_EXPR, type,
2059 invert_truthvalue (TREE_OPERAND (arg, 0)),
2060 invert_truthvalue (TREE_OPERAND (arg, 1)));
2062 case TRUTH_ORIF_EXPR:
2063 return build (TRUTH_ANDIF_EXPR, type,
2064 invert_truthvalue (TREE_OPERAND (arg, 0)),
2065 invert_truthvalue (TREE_OPERAND (arg, 1)));
2067 case TRUTH_NOT_EXPR:
2068 return TREE_OPERAND (arg, 0);
2071 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2072 invert_truthvalue (TREE_OPERAND (arg, 1)),
2073 invert_truthvalue (TREE_OPERAND (arg, 2)));
2076 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2077 invert_truthvalue (TREE_OPERAND (arg, 1)));
2079 case NON_LVALUE_EXPR:
2080 return invert_truthvalue (TREE_OPERAND (arg, 0));
2085 return build1 (TREE_CODE (arg), type,
2086 invert_truthvalue (TREE_OPERAND (arg, 0)));
2089 if (!integer_onep (TREE_OPERAND (arg, 1)))
2091 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2094 return build1 (TRUTH_NOT_EXPR, type, arg);
2096 case CLEANUP_POINT_EXPR:
2097 return build1 (CLEANUP_POINT_EXPR, type,
2098 invert_truthvalue (TREE_OPERAND (arg, 0)));
2100 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2102 return build1 (TRUTH_NOT_EXPR, type, arg);
2105 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2106 operands are another bit-wise operation with a common input. If so,
2107 distribute the bit operations to save an operation and possibly two if
2108 constants are involved. For example, convert
2109 (A | B) & (A | C) into A | (B & C)
2110 Further simplification will occur if B and C are constants.
2112 If this optimization cannot be done, 0 will be returned. */
2115 distribute_bit_expr (code, type, arg0, arg1)
2116 enum tree_code code;
2123 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2124 || TREE_CODE (arg0) == code
2125 || (TREE_CODE (arg0) != BIT_AND_EXPR
2126 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2129 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2131 common = TREE_OPERAND (arg0, 0);
2132 left = TREE_OPERAND (arg0, 1);
2133 right = TREE_OPERAND (arg1, 1);
2135 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2137 common = TREE_OPERAND (arg0, 0);
2138 left = TREE_OPERAND (arg0, 1);
2139 right = TREE_OPERAND (arg1, 0);
2141 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2143 common = TREE_OPERAND (arg0, 1);
2144 left = TREE_OPERAND (arg0, 0);
2145 right = TREE_OPERAND (arg1, 1);
2147 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2149 common = TREE_OPERAND (arg0, 1);
2150 left = TREE_OPERAND (arg0, 0);
2151 right = TREE_OPERAND (arg1, 0);
2156 return fold (build (TREE_CODE (arg0), type, common,
2157 fold (build (code, type, left, right))));
2160 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2161 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2164 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2167 int bitsize, bitpos;
2170 tree result = build (BIT_FIELD_REF, type, inner,
2171 size_int (bitsize), size_int (bitpos));
2173 TREE_UNSIGNED (result) = unsignedp;
2178 /* Optimize a bit-field compare.
2180 There are two cases: First is a compare against a constant and the
2181 second is a comparison of two items where the fields are at the same
2182 bit position relative to the start of a chunk (byte, halfword, word)
2183 large enough to contain it. In these cases we can avoid the shift
2184 implicit in bitfield extractions.
2186 For constants, we emit a compare of the shifted constant with the
2187 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2188 compared. For two fields at the same position, we do the ANDs with the
2189 similar mask and compare the result of the ANDs.
2191 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2192 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2193 are the left and right operands of the comparison, respectively.
2195 If the optimization described above can be done, we return the resulting
2196 tree. Otherwise we return zero. */
2199 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2200 enum tree_code code;
2204 int lbitpos, lbitsize, rbitpos, rbitsize;
2205 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2206 tree type = TREE_TYPE (lhs);
2207 tree signed_type, unsigned_type;
2208 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2209 enum machine_mode lmode, rmode, lnmode, rnmode;
2210 int lunsignedp, runsignedp;
2211 int lvolatilep = 0, rvolatilep = 0;
2212 tree linner, rinner;
2216 /* Get all the information about the extractions being done. If the bit size
2217 if the same as the size of the underlying object, we aren't doing an
2218 extraction at all and so can do nothing. */
2219 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2220 &lunsignedp, &lvolatilep);
2221 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2227 /* If this is not a constant, we can only do something if bit positions,
2228 sizes, and signedness are the same. */
2229 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset,
2230 &rmode, &runsignedp, &rvolatilep);
2232 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2233 || lunsignedp != runsignedp || offset != 0)
2237 /* See if we can find a mode to refer to this field. We should be able to,
2238 but fail if we can't. */
2239 lnmode = get_best_mode (lbitsize, lbitpos,
2240 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2242 if (lnmode == VOIDmode)
2245 /* Set signed and unsigned types of the precision of this mode for the
2247 signed_type = type_for_mode (lnmode, 0);
2248 unsigned_type = type_for_mode (lnmode, 1);
2252 rnmode = get_best_mode (rbitsize, rbitpos,
2253 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2255 if (rnmode == VOIDmode)
2259 /* Compute the bit position and size for the new reference and our offset
2260 within it. If the new reference is the same size as the original, we
2261 won't optimize anything, so return zero. */
2262 lnbitsize = GET_MODE_BITSIZE (lnmode);
2263 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2264 lbitpos -= lnbitpos;
2265 if (lnbitsize == lbitsize)
2270 rnbitsize = GET_MODE_BITSIZE (rnmode);
2271 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2272 rbitpos -= rnbitpos;
2273 if (rnbitsize == rbitsize)
2277 if (BYTES_BIG_ENDIAN)
2278 lbitpos = lnbitsize - lbitsize - lbitpos;
2280 /* Make the mask to be used against the extracted field. */
2281 mask = build_int_2 (~0, ~0);
2282 TREE_TYPE (mask) = unsigned_type;
2283 force_fit_type (mask, 0);
2284 mask = convert (unsigned_type, mask);
2285 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2286 mask = const_binop (RSHIFT_EXPR, mask,
2287 size_int (lnbitsize - lbitsize - lbitpos), 0);
2290 /* If not comparing with constant, just rework the comparison
2292 return build (code, compare_type,
2293 build (BIT_AND_EXPR, unsigned_type,
2294 make_bit_field_ref (linner, unsigned_type,
2295 lnbitsize, lnbitpos, 1),
2297 build (BIT_AND_EXPR, unsigned_type,
2298 make_bit_field_ref (rinner, unsigned_type,
2299 rnbitsize, rnbitpos, 1),
2302 /* Otherwise, we are handling the constant case. See if the constant is too
2303 big for the field. Warn and return a tree of for 0 (false) if so. We do
2304 this not only for its own sake, but to avoid having to test for this
2305 error case below. If we didn't, we might generate wrong code.
2307 For unsigned fields, the constant shifted right by the field length should
2308 be all zero. For signed fields, the high-order bits should agree with
2313 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2314 convert (unsigned_type, rhs),
2315 size_int (lbitsize), 0)))
2317 warning ("comparison is always %s due to width of bitfield",
2318 code == NE_EXPR ? "one" : "zero");
2319 return convert (compare_type,
2321 ? integer_one_node : integer_zero_node));
2326 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2327 size_int (lbitsize - 1), 0);
2328 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2330 warning ("comparison is always %s due to width of bitfield",
2331 code == NE_EXPR ? "one" : "zero");
2332 return convert (compare_type,
2334 ? integer_one_node : integer_zero_node));
2338 /* Single-bit compares should always be against zero. */
2339 if (lbitsize == 1 && ! integer_zerop (rhs))
2341 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2342 rhs = convert (type, integer_zero_node);
2345 /* Make a new bitfield reference, shift the constant over the
2346 appropriate number of bits and mask it with the computed mask
2347 (in case this was a signed field). If we changed it, make a new one. */
2348 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2351 TREE_SIDE_EFFECTS (lhs) = 1;
2352 TREE_THIS_VOLATILE (lhs) = 1;
2355 rhs = fold (const_binop (BIT_AND_EXPR,
2356 const_binop (LSHIFT_EXPR,
2357 convert (unsigned_type, rhs),
2358 size_int (lbitpos), 0),
2361 return build (code, compare_type,
2362 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2366 /* Subroutine for fold_truthop: decode a field reference.
2368 If EXP is a comparison reference, we return the innermost reference.
2370 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2371 set to the starting bit number.
2373 If the innermost field can be completely contained in a mode-sized
2374 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2376 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2377 otherwise it is not changed.
2379 *PUNSIGNEDP is set to the signedness of the field.
2381 *PMASK is set to the mask used. This is either contained in a
2382 BIT_AND_EXPR or derived from the width of the field.
2384 Return 0 if this is not a component reference or is one that we can't
2385 do anything with. */
2388 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2391 int *pbitsize, *pbitpos;
2392 enum machine_mode *pmode;
2393 int *punsignedp, *pvolatilep;
2397 tree mask, inner, offset;
2401 /* All the optimizations using this function assume integer fields.
2402 There are problems with FP fields since the type_for_size call
2403 below can fail for, e.g., XFmode. */
2404 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2409 if (TREE_CODE (exp) == BIT_AND_EXPR)
2411 and_mask = TREE_OPERAND (exp, 1);
2412 exp = TREE_OPERAND (exp, 0);
2413 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2414 if (TREE_CODE (and_mask) != INTEGER_CST)
2419 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2420 punsignedp, pvolatilep);
2421 if ((inner == exp && and_mask == 0)
2422 || *pbitsize < 0 || offset != 0)
2425 /* Compute the mask to access the bitfield. */
2426 unsigned_type = type_for_size (*pbitsize, 1);
2427 precision = TYPE_PRECISION (unsigned_type);
2429 mask = build_int_2 (~0, ~0);
2430 TREE_TYPE (mask) = unsigned_type;
2431 force_fit_type (mask, 0);
2432 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2433 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2435 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2437 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2438 convert (unsigned_type, and_mask), mask));
2444 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2448 all_ones_mask_p (mask, size)
2452 tree type = TREE_TYPE (mask);
2453 int precision = TYPE_PRECISION (type);
2456 tmask = build_int_2 (~0, ~0);
2457 TREE_TYPE (tmask) = signed_type (type);
2458 force_fit_type (tmask, 0);
2460 tree_int_cst_equal (mask,
2461 const_binop (RSHIFT_EXPR,
2462 const_binop (LSHIFT_EXPR, tmask,
2463 size_int (precision - size),
2465 size_int (precision - size), 0));
2468 /* Subroutine for fold_truthop: determine if an operand is simple enough
2469 to be evaluated unconditionally. */
2472 simple_operand_p (exp)
2475 /* Strip any conversions that don't change the machine mode. */
2476 while ((TREE_CODE (exp) == NOP_EXPR
2477 || TREE_CODE (exp) == CONVERT_EXPR)
2478 && (TYPE_MODE (TREE_TYPE (exp))
2479 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2480 exp = TREE_OPERAND (exp, 0);
2482 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2483 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2484 && ! TREE_ADDRESSABLE (exp)
2485 && ! TREE_THIS_VOLATILE (exp)
2486 && ! DECL_NONLOCAL (exp)
2487 /* Don't regard global variables as simple. They may be
2488 allocated in ways unknown to the compiler (shared memory,
2489 #pragma weak, etc). */
2490 && ! TREE_PUBLIC (exp)
2491 && ! DECL_EXTERNAL (exp)
2492 /* Loading a static variable is unduly expensive, but global
2493 registers aren't expensive. */
2494 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2497 /* Subroutine for fold_truthop: try to optimize a range test.
2499 For example, "i >= 2 && i =< 9" can be done as "(unsigned) (i - 2) <= 7".
2501 JCODE is the logical combination of the two terms. It is TRUTH_AND_EXPR
2502 (representing TRUTH_ANDIF_EXPR and TRUTH_AND_EXPR) or TRUTH_OR_EXPR
2503 (representing TRUTH_ORIF_EXPR and TRUTH_OR_EXPR). TYPE is the type of
2506 VAR is the value being tested. LO_CODE and HI_CODE are the comparison
2507 operators comparing VAR to LO_CST and HI_CST. LO_CST is known to be no
2508 larger than HI_CST (they may be equal).
2510 We return the simplified tree or 0 if no optimization is possible. */
2513 range_test (jcode, type, lo_code, hi_code, var, lo_cst, hi_cst)
2514 enum tree_code jcode, lo_code, hi_code;
2515 tree type, var, lo_cst, hi_cst;
2518 enum tree_code rcode;
2520 /* See if this is a range test and normalize the constant terms. */
2522 if (jcode == TRUTH_AND_EXPR)
2527 /* See if we have VAR != CST && VAR != CST+1. */
2528 if (! (hi_code == NE_EXPR
2529 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2530 && tree_int_cst_equal (integer_one_node,
2531 const_binop (MINUS_EXPR,
2532 hi_cst, lo_cst, 0))))
2540 if (hi_code == LT_EXPR)
2541 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2542 else if (hi_code != LE_EXPR)
2545 if (lo_code == GT_EXPR)
2546 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2548 /* We now have VAR >= LO_CST && VAR <= HI_CST. */
2561 /* See if we have VAR == CST || VAR == CST+1. */
2562 if (! (hi_code == EQ_EXPR
2563 && TREE_INT_CST_LOW (hi_cst) - TREE_INT_CST_LOW (lo_cst) == 1
2564 && tree_int_cst_equal (integer_one_node,
2565 const_binop (MINUS_EXPR,
2566 hi_cst, lo_cst, 0))))
2574 if (hi_code == GE_EXPR)
2575 hi_cst = const_binop (MINUS_EXPR, hi_cst, integer_one_node, 0);
2576 else if (hi_code != GT_EXPR)
2579 if (lo_code == LE_EXPR)
2580 lo_cst = const_binop (PLUS_EXPR, lo_cst, integer_one_node, 0);
2582 /* We now have VAR < LO_CST || VAR > HI_CST. */
2591 /* When normalizing, it is possible to both increment the smaller constant
2592 and decrement the larger constant. See if they are still ordered. */
2593 if (tree_int_cst_lt (hi_cst, lo_cst))
2596 /* Fail if VAR isn't an integer. */
2597 utype = TREE_TYPE (var);
2598 if (! INTEGRAL_TYPE_P (utype))
2601 /* The range test is invalid if subtracting the two constants results
2602 in overflow. This can happen in traditional mode. */
2603 if (! int_fits_type_p (hi_cst, TREE_TYPE (var))
2604 || ! int_fits_type_p (lo_cst, TREE_TYPE (var)))
2607 if (! TREE_UNSIGNED (utype))
2609 utype = unsigned_type (utype);
2610 var = convert (utype, var);
2611 lo_cst = convert (utype, lo_cst);
2612 hi_cst = convert (utype, hi_cst);
2615 return fold (convert (type,
2616 build (rcode, utype,
2617 build (MINUS_EXPR, utype, var, lo_cst),
2618 const_binop (MINUS_EXPR, hi_cst, lo_cst, 0))));
2621 /* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P
2622 bit value. Arrange things so the extra bits will be set to zero if and
2623 only if C is signed-extended to its full width. */
2626 unextend (c, p, unsignedp)
2631 tree type = TREE_TYPE (c);
2632 int modesize = GET_MODE_BITSIZE (TYPE_MODE (type));
2635 if (p == modesize || unsignedp)
2638 if (TREE_UNSIGNED (type))
2639 c = convert (signed_type (type), c);
2641 /* We work by getting just the sign bit into the low-order bit, then
2642 into the high-order bit, then sign-extend. We then XOR that value
2644 temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
2645 temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
2646 temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
2647 temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
2648 return convert (type, const_binop (BIT_XOR_EXPR, c, temp, 0));
2651 /* Find ways of folding logical expressions of LHS and RHS:
2652 Try to merge two comparisons to the same innermost item.
2653 Look for range tests like "ch >= '0' && ch <= '9'".
2654 Look for combinations of simple terms on machines with expensive branches
2655 and evaluate the RHS unconditionally.
2657 For example, if we have p->a == 2 && p->b == 4 and we can make an
2658 object large enough to span both A and B, we can do this with a comparison
2659 against the object ANDed with the a mask.
2661 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
2662 operations to do this with one comparison.
2664 We check for both normal comparisons and the BIT_AND_EXPRs made this by
2665 function and the one above.
2667 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
2668 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
2670 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
2673 We return the simplified tree or 0 if no optimization is possible. */
2676 fold_truthop (code, truth_type, lhs, rhs)
2677 enum tree_code code;
2678 tree truth_type, lhs, rhs;
2680 /* If this is the "or" of two comparisons, we can do something if we
2681 the comparisons are NE_EXPR. If this is the "and", we can do something
2682 if the comparisons are EQ_EXPR. I.e.,
2683 (a->b == 2 && a->c == 4) can become (a->new == NEW).
2685 WANTED_CODE is this operation code. For single bit fields, we can
2686 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
2687 comparison for one-bit fields. */
2689 enum tree_code wanted_code;
2690 enum tree_code lcode, rcode;
2691 tree ll_arg, lr_arg, rl_arg, rr_arg;
2692 tree ll_inner, lr_inner, rl_inner, rr_inner;
2693 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
2694 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
2695 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
2696 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
2697 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
2698 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
2699 enum machine_mode lnmode, rnmode;
2700 tree ll_mask, lr_mask, rl_mask, rr_mask;
2701 tree l_const, r_const;
2703 int first_bit, end_bit;
2706 /* Start by getting the comparison codes and seeing if this looks like
2707 a range test. Fail if anything is volatile. If one operand is a
2708 BIT_AND_EXPR with the constant one, treat it as if it were surrounded
2711 if (TREE_SIDE_EFFECTS (lhs)
2712 || TREE_SIDE_EFFECTS (rhs))
2715 lcode = TREE_CODE (lhs);
2716 rcode = TREE_CODE (rhs);
2718 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
2719 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
2721 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
2722 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
2724 if (TREE_CODE_CLASS (lcode) != '<'
2725 || TREE_CODE_CLASS (rcode) != '<')
2728 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
2729 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
2731 ll_arg = TREE_OPERAND (lhs, 0);
2732 lr_arg = TREE_OPERAND (lhs, 1);
2733 rl_arg = TREE_OPERAND (rhs, 0);
2734 rr_arg = TREE_OPERAND (rhs, 1);
2736 if (TREE_CODE (lr_arg) == INTEGER_CST
2737 && TREE_CODE (rr_arg) == INTEGER_CST
2738 && operand_equal_p (ll_arg, rl_arg, 0))
2740 if (tree_int_cst_lt (lr_arg, rr_arg))
2741 result = range_test (code, truth_type, lcode, rcode,
2742 ll_arg, lr_arg, rr_arg);
2744 result = range_test (code, truth_type, rcode, lcode,
2745 ll_arg, rr_arg, lr_arg);
2747 /* If this isn't a range test, it also isn't a comparison that
2748 can be merged. However, it wins to evaluate the RHS unconditionally
2749 on machines with expensive branches. */
2751 if (result == 0 && BRANCH_COST >= 2)
2753 if (TREE_CODE (ll_arg) != VAR_DECL
2754 && TREE_CODE (ll_arg) != PARM_DECL)
2756 /* Avoid evaluating the variable part twice. */
2757 ll_arg = save_expr (ll_arg);
2758 lhs = build (lcode, TREE_TYPE (lhs), ll_arg, lr_arg);
2759 rhs = build (rcode, TREE_TYPE (rhs), ll_arg, rr_arg);
2761 return build (code, truth_type, lhs, rhs);
2766 /* If the RHS can be evaluated unconditionally and its operands are
2767 simple, it wins to evaluate the RHS unconditionally on machines
2768 with expensive branches. In this case, this isn't a comparison
2769 that can be merged. */
2771 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
2772 are with zero (tmw). */
2774 if (BRANCH_COST >= 2
2775 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
2776 && simple_operand_p (rl_arg)
2777 && simple_operand_p (rr_arg))
2778 return build (code, truth_type, lhs, rhs);
2780 /* See if the comparisons can be merged. Then get all the parameters for
2783 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
2784 || (rcode != EQ_EXPR && rcode != NE_EXPR))
2788 ll_inner = decode_field_reference (ll_arg,
2789 &ll_bitsize, &ll_bitpos, &ll_mode,
2790 &ll_unsignedp, &volatilep, &ll_mask);
2791 lr_inner = decode_field_reference (lr_arg,
2792 &lr_bitsize, &lr_bitpos, &lr_mode,
2793 &lr_unsignedp, &volatilep, &lr_mask);
2794 rl_inner = decode_field_reference (rl_arg,
2795 &rl_bitsize, &rl_bitpos, &rl_mode,
2796 &rl_unsignedp, &volatilep, &rl_mask);
2797 rr_inner = decode_field_reference (rr_arg,
2798 &rr_bitsize, &rr_bitpos, &rr_mode,
2799 &rr_unsignedp, &volatilep, &rr_mask);
2801 /* It must be true that the inner operation on the lhs of each
2802 comparison must be the same if we are to be able to do anything.
2803 Then see if we have constants. If not, the same must be true for
2805 if (volatilep || ll_inner == 0 || rl_inner == 0
2806 || ! operand_equal_p (ll_inner, rl_inner, 0))
2809 if (TREE_CODE (lr_arg) == INTEGER_CST
2810 && TREE_CODE (rr_arg) == INTEGER_CST)
2811 l_const = lr_arg, r_const = rr_arg;
2812 else if (lr_inner == 0 || rr_inner == 0
2813 || ! operand_equal_p (lr_inner, rr_inner, 0))
2816 l_const = r_const = 0;
2818 /* If either comparison code is not correct for our logical operation,
2819 fail. However, we can convert a one-bit comparison against zero into
2820 the opposite comparison against that bit being set in the field. */
2822 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
2823 if (lcode != wanted_code)
2825 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
2831 if (rcode != wanted_code)
2833 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
2839 /* See if we can find a mode that contains both fields being compared on
2840 the left. If we can't, fail. Otherwise, update all constants and masks
2841 to be relative to a field of that size. */
2842 first_bit = MIN (ll_bitpos, rl_bitpos);
2843 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
2844 lnmode = get_best_mode (end_bit - first_bit, first_bit,
2845 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
2847 if (lnmode == VOIDmode)
2850 lnbitsize = GET_MODE_BITSIZE (lnmode);
2851 lnbitpos = first_bit & ~ (lnbitsize - 1);
2852 type = type_for_size (lnbitsize, 1);
2853 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
2855 if (BYTES_BIG_ENDIAN)
2857 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
2858 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
2861 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
2862 size_int (xll_bitpos), 0);
2863 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
2864 size_int (xrl_bitpos), 0);
2868 l_const = convert (type, unextend (l_const, ll_bitsize, ll_unsignedp));
2869 l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
2870 if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
2871 fold (build1 (BIT_NOT_EXPR,
2875 warning ("comparison is always %s",
2876 wanted_code == NE_EXPR ? "one" : "zero");
2878 return convert (truth_type,
2879 wanted_code == NE_EXPR
2880 ? integer_one_node : integer_zero_node);
2885 r_const = convert (type, unextend (r_const, rl_bitsize, rl_unsignedp));
2886 r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
2887 if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
2888 fold (build1 (BIT_NOT_EXPR,
2892 warning ("comparison is always %s",
2893 wanted_code == NE_EXPR ? "one" : "zero");
2895 return convert (truth_type,
2896 wanted_code == NE_EXPR
2897 ? integer_one_node : integer_zero_node);
2901 /* If the right sides are not constant, do the same for it. Also,
2902 disallow this optimization if a size or signedness mismatch occurs
2903 between the left and right sides. */
2906 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
2907 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
2908 /* Make sure the two fields on the right
2909 correspond to the left without being swapped. */
2910 || ll_bitpos - rl_bitpos != lr_bitpos - rr_bitpos)
2913 first_bit = MIN (lr_bitpos, rr_bitpos);
2914 end_bit = MAX (lr_bitpos + lr_bitsize, rr_bitpos + rr_bitsize);
2915 rnmode = get_best_mode (end_bit - first_bit, first_bit,
2916 TYPE_ALIGN (TREE_TYPE (lr_inner)), word_mode,
2918 if (rnmode == VOIDmode)
2921 rnbitsize = GET_MODE_BITSIZE (rnmode);
2922 rnbitpos = first_bit & ~ (rnbitsize - 1);
2923 xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
2925 if (BYTES_BIG_ENDIAN)
2927 xlr_bitpos = rnbitsize - xlr_bitpos - lr_bitsize;
2928 xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize;
2931 lr_mask = const_binop (LSHIFT_EXPR, convert (type, lr_mask),
2932 size_int (xlr_bitpos), 0);
2933 rr_mask = const_binop (LSHIFT_EXPR, convert (type, rr_mask),
2934 size_int (xrr_bitpos), 0);
2936 /* Make a mask that corresponds to both fields being compared.
2937 Do this for both items being compared. If the masks agree,
2938 we can do this by masking both and comparing the masked
2940 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
2941 lr_mask = const_binop (BIT_IOR_EXPR, lr_mask, rr_mask, 0);
2942 if (operand_equal_p (ll_mask, lr_mask, 0) && lnbitsize == rnbitsize)
2944 lhs = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
2945 ll_unsignedp || rl_unsignedp);
2946 rhs = make_bit_field_ref (lr_inner, type, rnbitsize, rnbitpos,
2947 lr_unsignedp || rr_unsignedp);
2948 if (! all_ones_mask_p (ll_mask, lnbitsize))
2950 lhs = build (BIT_AND_EXPR, type, lhs, ll_mask);
2951 rhs = build (BIT_AND_EXPR, type, rhs, ll_mask);
2953 return build (wanted_code, truth_type, lhs, rhs);
2956 /* There is still another way we can do something: If both pairs of
2957 fields being compared are adjacent, we may be able to make a wider
2958 field containing them both. */
2959 if ((ll_bitsize + ll_bitpos == rl_bitpos
2960 && lr_bitsize + lr_bitpos == rr_bitpos)
2961 || (ll_bitpos == rl_bitpos + rl_bitsize
2962 && lr_bitpos == rr_bitpos + rr_bitsize))
2963 return build (wanted_code, truth_type,
2964 make_bit_field_ref (ll_inner, type,
2965 ll_bitsize + rl_bitsize,
2966 MIN (ll_bitpos, rl_bitpos),
2968 make_bit_field_ref (lr_inner, type,
2969 lr_bitsize + rr_bitsize,
2970 MIN (lr_bitpos, rr_bitpos),
2976 /* Handle the case of comparisons with constants. If there is something in
2977 common between the masks, those bits of the constants must be the same.
2978 If not, the condition is always false. Test for this to avoid generating
2979 incorrect code below. */
2980 result = const_binop (BIT_AND_EXPR, ll_mask, rl_mask, 0);
2981 if (! integer_zerop (result)
2982 && simple_cst_equal (const_binop (BIT_AND_EXPR, result, l_const, 0),
2983 const_binop (BIT_AND_EXPR, result, r_const, 0)) != 1)
2985 if (wanted_code == NE_EXPR)
2987 warning ("`or' of unmatched not-equal tests is always 1");
2988 return convert (truth_type, integer_one_node);
2992 warning ("`and' of mutually exclusive equal-tests is always zero");
2993 return convert (truth_type, integer_zero_node);
2997 /* Construct the expression we will return. First get the component
2998 reference we will make. Unless the mask is all ones the width of
2999 that field, perform the mask operation. Then compare with the
3001 result = make_bit_field_ref (ll_inner, type, lnbitsize, lnbitpos,
3002 ll_unsignedp || rl_unsignedp);
3004 ll_mask = const_binop (BIT_IOR_EXPR, ll_mask, rl_mask, 0);
3005 if (! all_ones_mask_p (ll_mask, lnbitsize))
3006 result = build (BIT_AND_EXPR, type, result, ll_mask);
3008 return build (wanted_code, truth_type, result,
3009 const_binop (BIT_IOR_EXPR, l_const, r_const, 0));
3012 /* If T contains a COMPOUND_EXPR which was inserted merely to evaluate
3013 S, a SAVE_EXPR, return the expression actually being evaluated. Note
3014 that we may sometimes modify the tree. */
3017 strip_compound_expr (t, s)
3021 tree type = TREE_TYPE (t);
3022 enum tree_code code = TREE_CODE (t);
3024 /* See if this is the COMPOUND_EXPR we want to eliminate. */
3025 if (code == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR
3026 && TREE_OPERAND (TREE_OPERAND (t, 0), 0) == s)
3027 return TREE_OPERAND (t, 1);
3029 /* See if this is a COND_EXPR or a simple arithmetic operator. We
3030 don't bother handling any other types. */
3031 else if (code == COND_EXPR)
3033 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3034 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3035 TREE_OPERAND (t, 2) = strip_compound_expr (TREE_OPERAND (t, 2), s);
3037 else if (TREE_CODE_CLASS (code) == '1')
3038 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3039 else if (TREE_CODE_CLASS (code) == '<'
3040 || TREE_CODE_CLASS (code) == '2')
3042 TREE_OPERAND (t, 0) = strip_compound_expr (TREE_OPERAND (t, 0), s);
3043 TREE_OPERAND (t, 1) = strip_compound_expr (TREE_OPERAND (t, 1), s);
3049 /* Perform constant folding and related simplification of EXPR.
3050 The related simplifications include x*1 => x, x*0 => 0, etc.,
3051 and application of the associative law.
3052 NOP_EXPR conversions may be removed freely (as long as we
3053 are careful not to change the C type of the overall expression)
3054 We cannot simplify through a CONVERT_EXPR, FIX_EXPR or FLOAT_EXPR,
3055 but we can constant-fold them if they have constant operands. */
3061 register tree t = expr;
3062 tree t1 = NULL_TREE;
3064 tree type = TREE_TYPE (expr);
3065 register tree arg0, arg1;
3066 register enum tree_code code = TREE_CODE (t);
3070 /* WINS will be nonzero when the switch is done
3071 if all operands are constant. */
3075 /* Don't try to process an RTL_EXPR since its operands aren't trees. */
3076 if (code == RTL_EXPR)
3079 /* Return right away if already constant. */
3080 if (TREE_CONSTANT (t))
3082 if (code == CONST_DECL)
3083 return DECL_INITIAL (t);
3087 kind = TREE_CODE_CLASS (code);
3088 if (code == NOP_EXPR || code == FLOAT_EXPR || code == CONVERT_EXPR)
3092 /* Special case for conversion ops that can have fixed point args. */
3093 arg0 = TREE_OPERAND (t, 0);
3095 /* Don't use STRIP_NOPS, because signedness of argument type matters. */
3097 STRIP_TYPE_NOPS (arg0);
3099 if (arg0 != 0 && TREE_CODE (arg0) == COMPLEX_CST)
3100 subop = TREE_REALPART (arg0);
3104 if (subop != 0 && TREE_CODE (subop) != INTEGER_CST
3105 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3106 && TREE_CODE (subop) != REAL_CST
3107 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3109 /* Note that TREE_CONSTANT isn't enough:
3110 static var addresses are constant but we can't
3111 do arithmetic on them. */
3114 else if (kind == 'e' || kind == '<'
3115 || kind == '1' || kind == '2' || kind == 'r')
3117 register int len = tree_code_length[(int) code];
3119 for (i = 0; i < len; i++)
3121 tree op = TREE_OPERAND (t, i);
3125 continue; /* Valid for CALL_EXPR, at least. */
3127 if (kind == '<' || code == RSHIFT_EXPR)
3129 /* Signedness matters here. Perhaps we can refine this
3131 STRIP_TYPE_NOPS (op);
3135 /* Strip any conversions that don't change the mode. */
3139 if (TREE_CODE (op) == COMPLEX_CST)
3140 subop = TREE_REALPART (op);
3144 if (TREE_CODE (subop) != INTEGER_CST
3145 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3146 && TREE_CODE (subop) != REAL_CST
3147 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3149 /* Note that TREE_CONSTANT isn't enough:
3150 static var addresses are constant but we can't
3151 do arithmetic on them. */
3161 /* If this is a commutative operation, and ARG0 is a constant, move it
3162 to ARG1 to reduce the number of tests below. */
3163 if ((code == PLUS_EXPR || code == MULT_EXPR || code == MIN_EXPR
3164 || code == MAX_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR
3165 || code == BIT_AND_EXPR)
3166 && (TREE_CODE (arg0) == INTEGER_CST || TREE_CODE (arg0) == REAL_CST))
3168 tem = arg0; arg0 = arg1; arg1 = tem;
3170 tem = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = TREE_OPERAND (t, 1);
3171 TREE_OPERAND (t, 1) = tem;
3174 /* Now WINS is set as described above,
3175 ARG0 is the first operand of EXPR,
3176 and ARG1 is the second operand (if it has more than one operand).
3178 First check for cases where an arithmetic operation is applied to a
3179 compound, conditional, or comparison operation. Push the arithmetic
3180 operation inside the compound or conditional to see if any folding
3181 can then be done. Convert comparison to conditional for this purpose.
3182 The also optimizes non-constant cases that used to be done in
3185 Before we do that, see if this is a BIT_AND_EXPR or a BIT_OR_EXPR,
3186 one of the operands is a comparison and the other is a comparison, a
3187 BIT_AND_EXPR with the constant 1, or a truth value. In that case, the
3188 code below would make the expression more complex. Change it to a
3189 TRUTH_{AND,OR}_EXPR. Likewise, convert a similar NE_EXPR to
3190 TRUTH_XOR_EXPR and an EQ_EXPR to the inversion of a TRUTH_XOR_EXPR. */
3192 if ((code == BIT_AND_EXPR || code == BIT_IOR_EXPR
3193 || code == EQ_EXPR || code == NE_EXPR)
3194 && ((truth_value_p (TREE_CODE (arg0))
3195 && (truth_value_p (TREE_CODE (arg1))
3196 || (TREE_CODE (arg1) == BIT_AND_EXPR
3197 && integer_onep (TREE_OPERAND (arg1, 1)))))
3198 || (truth_value_p (TREE_CODE (arg1))
3199 && (truth_value_p (TREE_CODE (arg0))
3200 || (TREE_CODE (arg0) == BIT_AND_EXPR
3201 && integer_onep (TREE_OPERAND (arg0, 1)))))))
3203 t = fold (build (code == BIT_AND_EXPR ? TRUTH_AND_EXPR
3204 : code == BIT_IOR_EXPR ? TRUTH_OR_EXPR
3208 if (code == EQ_EXPR)
3209 t = invert_truthvalue (t);
3214 if (TREE_CODE_CLASS (code) == '1')
3216 if (TREE_CODE (arg0) == COMPOUND_EXPR)
3217 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3218 fold (build1 (code, type, TREE_OPERAND (arg0, 1))));
3219 else if (TREE_CODE (arg0) == COND_EXPR)
3221 t = fold (build (COND_EXPR, type, TREE_OPERAND (arg0, 0),
3222 fold (build1 (code, type, TREE_OPERAND (arg0, 1))),
3223 fold (build1 (code, type, TREE_OPERAND (arg0, 2)))));
3225 /* If this was a conversion, and all we did was to move into
3226 inside the COND_EXPR, bring it back out. But leave it if
3227 it is a conversion from integer to integer and the
3228 result precision is no wider than a word since such a
3229 conversion is cheap and may be optimized away by combine,
3230 while it couldn't if it were outside the COND_EXPR. Then return
3231 so we don't get into an infinite recursion loop taking the
3232 conversion out and then back in. */
3234 if ((code == NOP_EXPR || code == CONVERT_EXPR
3235 || code == NON_LVALUE_EXPR)
3236 && TREE_CODE (t) == COND_EXPR
3237 && TREE_CODE (TREE_OPERAND (t, 1)) == code
3238 && TREE_CODE (TREE_OPERAND (t, 2)) == code
3239 && (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0))
3240 == TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 2), 0)))
3241 && ! (INTEGRAL_TYPE_P (TREE_TYPE (t))
3242 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)))
3243 && TYPE_PRECISION (TREE_TYPE (t)) <= BITS_PER_WORD))
3244 t = build1 (code, type,
3246 TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 1), 0)),
3247 TREE_OPERAND (t, 0),
3248 TREE_OPERAND (TREE_OPERAND (t, 1), 0),
3249 TREE_OPERAND (TREE_OPERAND (t, 2), 0)));
3252 else if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3253 return fold (build (COND_EXPR, type, arg0,
3254 fold (build1 (code, type, integer_one_node)),
3255 fold (build1 (code, type, integer_zero_node))));
3257 else if (TREE_CODE_CLASS (code) == '2'
3258 || TREE_CODE_CLASS (code) == '<')
3260 if (TREE_CODE (arg1) == COMPOUND_EXPR)
3261 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3262 fold (build (code, type,
3263 arg0, TREE_OPERAND (arg1, 1))));
3264 else if (TREE_CODE (arg1) == COND_EXPR
3265 || TREE_CODE_CLASS (TREE_CODE (arg1)) == '<')
3267 tree test, true_value, false_value;
3269 if (TREE_CODE (arg1) == COND_EXPR)
3271 test = TREE_OPERAND (arg1, 0);
3272 true_value = TREE_OPERAND (arg1, 1);
3273 false_value = TREE_OPERAND (arg1, 2);
3277 tree testtype = TREE_TYPE (arg1);
3279 true_value = convert (testtype, integer_one_node);
3280 false_value = convert (testtype, integer_zero_node);
3283 /* If ARG0 is complex we want to make sure we only evaluate
3284 it once. Though this is only required if it is volatile, it
3285 might be more efficient even if it is not. However, if we
3286 succeed in folding one part to a constant, we do not need
3287 to make this SAVE_EXPR. Since we do this optimization
3288 primarily to see if we do end up with constant and this
3289 SAVE_EXPR interferes with later optimizations, suppressing
3290 it when we can is important. */
3292 if (TREE_CODE (arg0) != SAVE_EXPR
3293 && ((TREE_CODE (arg0) != VAR_DECL
3294 && TREE_CODE (arg0) != PARM_DECL)
3295 || TREE_SIDE_EFFECTS (arg0)))
3297 tree lhs = fold (build (code, type, arg0, true_value));
3298 tree rhs = fold (build (code, type, arg0, false_value));
3300 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs))
3301 return fold (build (COND_EXPR, type, test, lhs, rhs));
3303 arg0 = save_expr (arg0);
3306 test = fold (build (COND_EXPR, type, test,
3307 fold (build (code, type, arg0, true_value)),
3308 fold (build (code, type, arg0, false_value))));
3309 if (TREE_CODE (arg0) == SAVE_EXPR)
3310 return build (COMPOUND_EXPR, type,
3311 convert (void_type_node, arg0),
3312 strip_compound_expr (test, arg0));
3314 return convert (type, test);
3317 else if (TREE_CODE (arg0) == COMPOUND_EXPR)
3318 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3319 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3320 else if (TREE_CODE (arg0) == COND_EXPR
3321 || TREE_CODE_CLASS (TREE_CODE (arg0)) == '<')
3323 tree test, true_value, false_value;
3325 if (TREE_CODE (arg0) == COND_EXPR)
3327 test = TREE_OPERAND (arg0, 0);
3328 true_value = TREE_OPERAND (arg0, 1);
3329 false_value = TREE_OPERAND (arg0, 2);
3333 tree testtype = TREE_TYPE (arg0);
3335 true_value = convert (testtype, integer_one_node);
3336 false_value = convert (testtype, integer_zero_node);
3339 if (TREE_CODE (arg1) != SAVE_EXPR
3340 && ((TREE_CODE (arg1) != VAR_DECL
3341 && TREE_CODE (arg1) != PARM_DECL)
3342 || TREE_SIDE_EFFECTS (arg1)))
3344 tree lhs = fold (build (code, type, true_value, arg1));
3345 tree rhs = fold (build (code, type, false_value, arg1));
3347 if (TREE_CONSTANT (lhs) || TREE_CONSTANT (rhs)
3348 || TREE_CONSTANT (arg1))
3349 return fold (build (COND_EXPR, type, test, lhs, rhs));
3351 arg1 = save_expr (arg1);
3354 test = fold (build (COND_EXPR, type, test,
3355 fold (build (code, type, true_value, arg1)),
3356 fold (build (code, type, false_value, arg1))));
3357 if (TREE_CODE (arg1) == SAVE_EXPR)
3358 return build (COMPOUND_EXPR, type,
3359 convert (void_type_node, arg1),
3360 strip_compound_expr (test, arg1));
3362 return convert (type, test);
3365 else if (TREE_CODE_CLASS (code) == '<'
3366 && TREE_CODE (arg0) == COMPOUND_EXPR)
3367 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg0, 0),
3368 fold (build (code, type, TREE_OPERAND (arg0, 1), arg1)));
3369 else if (TREE_CODE_CLASS (code) == '<'
3370 && TREE_CODE (arg1) == COMPOUND_EXPR)
3371 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg1, 0),
3372 fold (build (code, type, arg0, TREE_OPERAND (arg1, 1))));
3384 return fold (DECL_INITIAL (t));
3389 case FIX_TRUNC_EXPR:
3390 /* Other kinds of FIX are not handled properly by fold_convert. */
3392 if (TREE_TYPE (TREE_OPERAND (t, 0)) == TREE_TYPE (t))
3393 return TREE_OPERAND (t, 0);
3395 /* Handle cases of two conversions in a row. */
3396 if (TREE_CODE (TREE_OPERAND (t, 0)) == NOP_EXPR
3397 || TREE_CODE (TREE_OPERAND (t, 0)) == CONVERT_EXPR)
3399 tree inside_type = TREE_TYPE (TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3400 tree inter_type = TREE_TYPE (TREE_OPERAND (t, 0));
3401 tree final_type = TREE_TYPE (t);
3402 int inside_int = INTEGRAL_TYPE_P (inside_type);
3403 int inside_ptr = POINTER_TYPE_P (inside_type);
3404 int inside_float = FLOAT_TYPE_P (inside_type);
3405 int inside_prec = TYPE_PRECISION (inside_type);
3406 int inside_unsignedp = TREE_UNSIGNED (inside_type);
3407 int inter_int = INTEGRAL_TYPE_P (inter_type);
3408 int inter_ptr = POINTER_TYPE_P (inter_type);
3409 int inter_float = FLOAT_TYPE_P (inter_type);
3410 int inter_prec = TYPE_PRECISION (inter_type);
3411 int inter_unsignedp = TREE_UNSIGNED (inter_type);
3412 int final_int = INTEGRAL_TYPE_P (final_type);
3413 int final_ptr = POINTER_TYPE_P (final_type);
3414 int final_float = FLOAT_TYPE_P (final_type);
3415 int final_prec = TYPE_PRECISION (final_type);
3416 int final_unsignedp = TREE_UNSIGNED (final_type);
3418 /* In addition to the cases of two conversions in a row
3419 handled below, if we are converting something to its own
3420 type via an object of identical or wider precision, neither
3421 conversion is needed. */
3422 if (inside_type == final_type
3423 && ((inter_int && final_int) || (inter_float && final_float))
3424 && inter_prec >= final_prec)
3425 return TREE_OPERAND (TREE_OPERAND (t, 0), 0);
3427 /* Likewise, if the intermediate and final types are either both
3428 float or both integer, we don't need the middle conversion if
3429 it is wider than the final type and doesn't change the signedness
3430 (for integers). Avoid this if the final type is a pointer
3431 since then we sometimes need the inner conversion. */
3432 if ((((inter_int || inter_ptr) && (inside_int || inside_ptr))
3433 || (inter_float && inside_float))
3434 && inter_prec >= inside_prec
3435 && (inter_float || inter_unsignedp == inside_unsignedp)
3437 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3439 /* Two conversions in a row are not needed unless:
3440 - some conversion is floating-point (overstrict for now), or
3441 - the intermediate type is narrower than both initial and
3443 - the intermediate type and innermost type differ in signedness,
3444 and the outermost type is wider than the intermediate, or
3445 - the initial type is a pointer type and the precisions of the
3446 intermediate and final types differ, or
3447 - the final type is a pointer type and the precisions of the
3448 initial and intermediate types differ. */
3449 if (! inside_float && ! inter_float && ! final_float
3450 && (inter_prec > inside_prec || inter_prec > final_prec)
3451 && ! (inside_int && inter_int
3452 && inter_unsignedp != inside_unsignedp
3453 && inter_prec < final_prec)
3454 && ((inter_unsignedp && inter_prec > inside_prec)
3455 == (final_unsignedp && final_prec > inter_prec))
3456 && ! (inside_ptr && inter_prec != final_prec)
3457 && ! (final_ptr && inside_prec != inter_prec))
3458 return convert (final_type, TREE_OPERAND (TREE_OPERAND (t, 0), 0));
3461 if (TREE_CODE (TREE_OPERAND (t, 0)) == MODIFY_EXPR
3462 && TREE_CONSTANT (TREE_OPERAND (TREE_OPERAND (t, 0), 1))
3463 /* Detect assigning a bitfield. */
3464 && !(TREE_CODE (TREE_OPERAND (TREE_OPERAND (t, 0), 0)) == COMPONENT_REF
3465 && DECL_BIT_FIELD (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (t, 0), 0), 1))))
3467 /* Don't leave an assignment inside a conversion
3468 unless assigning a bitfield. */
3469 tree prev = TREE_OPERAND (t, 0);
3470 TREE_OPERAND (t, 0) = TREE_OPERAND (prev, 1);
3471 /* First do the assignment, then return converted constant. */
3472 t = build (COMPOUND_EXPR, TREE_TYPE (t), prev, fold (t));
3478 TREE_CONSTANT (t) = TREE_CONSTANT (arg0);
3481 return fold_convert (t, arg0);
3483 #if 0 /* This loses on &"foo"[0]. */
3488 /* Fold an expression like: "foo"[2] */
3489 if (TREE_CODE (arg0) == STRING_CST
3490 && TREE_CODE (arg1) == INTEGER_CST
3491 && !TREE_INT_CST_HIGH (arg1)
3492 && (i = TREE_INT_CST_LOW (arg1)) < TREE_STRING_LENGTH (arg0))
3494 t = build_int_2 (TREE_STRING_POINTER (arg0)[i], 0);
3495 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (arg0));
3496 force_fit_type (t, 0);
3503 if (TREE_CODE (arg0) == CONSTRUCTOR)
3505 tree m = purpose_member (arg1, CONSTRUCTOR_ELTS (arg0));
3512 TREE_CONSTANT (t) = wins;
3518 if (TREE_CODE (arg0) == INTEGER_CST)
3520 HOST_WIDE_INT low, high;
3521 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3522 TREE_INT_CST_HIGH (arg0),
3524 t = build_int_2 (low, high);
3525 TREE_TYPE (t) = type;
3527 = (TREE_OVERFLOW (arg0)
3528 | force_fit_type (t, overflow));
3529 TREE_CONSTANT_OVERFLOW (t)
3530 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3532 else if (TREE_CODE (arg0) == REAL_CST)
3533 t = build_real (type, REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3534 TREE_TYPE (t) = type;
3536 else if (TREE_CODE (arg0) == NEGATE_EXPR)
3537 return TREE_OPERAND (arg0, 0);
3539 /* Convert - (a - b) to (b - a) for non-floating-point. */
3540 else if (TREE_CODE (arg0) == MINUS_EXPR && ! FLOAT_TYPE_P (type))
3541 return build (MINUS_EXPR, type, TREE_OPERAND (arg0, 1),
3542 TREE_OPERAND (arg0, 0));
3549 if (TREE_CODE (arg0) == INTEGER_CST)
3551 if (! TREE_UNSIGNED (type)
3552 && TREE_INT_CST_HIGH (arg0) < 0)
3554 HOST_WIDE_INT low, high;
3555 int overflow = neg_double (TREE_INT_CST_LOW (arg0),
3556 TREE_INT_CST_HIGH (arg0),
3558 t = build_int_2 (low, high);
3559 TREE_TYPE (t) = type;
3561 = (TREE_OVERFLOW (arg0)
3562 | force_fit_type (t, overflow));
3563 TREE_CONSTANT_OVERFLOW (t)
3564 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg0);
3567 else if (TREE_CODE (arg0) == REAL_CST)
3569 if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (arg0)))
3570 t = build_real (type,
3571 REAL_VALUE_NEGATE (TREE_REAL_CST (arg0)));
3573 TREE_TYPE (t) = type;
3575 else if (TREE_CODE (arg0) == ABS_EXPR || TREE_CODE (arg0) == NEGATE_EXPR)
3576 return build1 (ABS_EXPR, type, TREE_OPERAND (arg0, 0));
3580 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
3582 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
3583 return build (COMPLEX_EXPR, TREE_TYPE (arg0),
3584 TREE_OPERAND (arg0, 0),
3585 fold (build1 (NEGATE_EXPR,
3586 TREE_TYPE (TREE_TYPE (arg0)),
3587 TREE_OPERAND (arg0, 1))));
3588 else if (TREE_CODE (arg0) == COMPLEX_CST)
3589 return build_complex (TREE_OPERAND (arg0, 0),
3590 fold (build1 (NEGATE_EXPR,
3591 TREE_TYPE (TREE_TYPE (arg0)),
3592 TREE_OPERAND (arg0, 1))));
3593 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
3594 return fold (build (TREE_CODE (arg0), type,
3595 fold (build1 (CONJ_EXPR, type,
3596 TREE_OPERAND (arg0, 0))),
3597 fold (build1 (CONJ_EXPR,
3598 type, TREE_OPERAND (arg0, 1)))));
3599 else if (TREE_CODE (arg0) == CONJ_EXPR)
3600 return TREE_OPERAND (arg0, 0);
3606 if (TREE_CODE (arg0) == INTEGER_CST)
3607 t = build_int_2 (~ TREE_INT_CST_LOW (arg0),
3608 ~ TREE_INT_CST_HIGH (arg0));
3609 TREE_TYPE (t) = type;
3610 force_fit_type (t, 0);
3611 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg0);
3612 TREE_CONSTANT_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (arg0);
3614 else if (TREE_CODE (arg0) == BIT_NOT_EXPR)
3615 return TREE_OPERAND (arg0, 0);
3619 /* A + (-B) -> A - B */
3620 if (TREE_CODE (arg1) == NEGATE_EXPR)
3621 return fold (build (MINUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3622 else if (! FLOAT_TYPE_P (type))
3624 if (integer_zerop (arg1))
3625 return non_lvalue (convert (type, arg0));
3627 /* If we are adding two BIT_AND_EXPR's, both of which are and'ing
3628 with a constant, and the two constants have no bits in common,
3629 we should treat this as a BIT_IOR_EXPR since this may produce more
3631 if (TREE_CODE (arg0) == BIT_AND_EXPR
3632 && TREE_CODE (arg1) == BIT_AND_EXPR
3633 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3634 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3635 && integer_zerop (const_binop (BIT_AND_EXPR,
3636 TREE_OPERAND (arg0, 1),
3637 TREE_OPERAND (arg1, 1), 0)))
3639 code = BIT_IOR_EXPR;
3643 /* (A * C) + (B * C) -> (A+B) * C. Since we are most concerned
3644 about the case where C is a constant, just try one of the
3645 four possibilities. */
3647 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3648 && operand_equal_p (TREE_OPERAND (arg0, 1),
3649 TREE_OPERAND (arg1, 1), 0))
3650 return fold (build (MULT_EXPR, type,
3651 fold (build (PLUS_EXPR, type,
3652 TREE_OPERAND (arg0, 0),
3653 TREE_OPERAND (arg1, 0))),
3654 TREE_OPERAND (arg0, 1)));
3656 /* In IEEE floating point, x+0 may not equal x. */
3657 else if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3659 && real_zerop (arg1))
3660 return non_lvalue (convert (type, arg0));
3662 /* In most languages, can't associate operations on floats
3663 through parentheses. Rather than remember where the parentheses
3664 were, we don't associate floats at all. It shouldn't matter much.
3665 However, associating multiplications is only very slightly
3666 inaccurate, so do that if -ffast-math is specified. */
3667 if (FLOAT_TYPE_P (type)
3668 && ! (flag_fast_math && code == MULT_EXPR))
3671 /* The varsign == -1 cases happen only for addition and subtraction.
3672 It says that the arg that was split was really CON minus VAR.
3673 The rest of the code applies to all associative operations. */
3679 if (split_tree (arg0, code, &var, &con, &varsign))
3683 /* EXPR is (CON-VAR) +- ARG1. */
3684 /* If it is + and VAR==ARG1, return just CONST. */
3685 if (code == PLUS_EXPR && operand_equal_p (var, arg1, 0))
3686 return convert (TREE_TYPE (t), con);
3688 /* If ARG0 is a constant, don't change things around;
3689 instead keep all the constant computations together. */
3691 if (TREE_CONSTANT (arg0))
3694 /* Otherwise return (CON +- ARG1) - VAR. */
3695 t = build (MINUS_EXPR, type,
3696 fold (build (code, type, con, arg1)), var);
3700 /* EXPR is (VAR+CON) +- ARG1. */
3701 /* If it is - and VAR==ARG1, return just CONST. */
3702 if (code == MINUS_EXPR && operand_equal_p (var, arg1, 0))
3703 return convert (TREE_TYPE (t), con);
3705 /* If ARG0 is a constant, don't change things around;
3706 instead keep all the constant computations together. */
3708 if (TREE_CONSTANT (arg0))
3711 /* Otherwise return VAR +- (ARG1 +- CON). */
3712 tem = fold (build (code, type, arg1, con));
3713 t = build (code, type, var, tem);
3715 if (integer_zerop (tem)
3716 && (code == PLUS_EXPR || code == MINUS_EXPR))
3717 return convert (type, var);
3718 /* If we have x +/- (c - d) [c an explicit integer]
3719 change it to x -/+ (d - c) since if d is relocatable
3720 then the latter can be a single immediate insn
3721 and the former cannot. */
3722 if (TREE_CODE (tem) == MINUS_EXPR
3723 && TREE_CODE (TREE_OPERAND (tem, 0)) == INTEGER_CST)
3725 tree tem1 = TREE_OPERAND (tem, 1);
3726 TREE_OPERAND (tem, 1) = TREE_OPERAND (tem, 0);
3727 TREE_OPERAND (tem, 0) = tem1;
3729 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3735 if (split_tree (arg1, code, &var, &con, &varsign))
3737 if (TREE_CONSTANT (arg1))
3742 (code == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR));
3744 /* EXPR is ARG0 +- (CON +- VAR). */
3745 if (TREE_CODE (t) == MINUS_EXPR
3746 && operand_equal_p (var, arg0, 0))
3748 /* If VAR and ARG0 cancel, return just CON or -CON. */
3749 if (code == PLUS_EXPR)
3750 return convert (TREE_TYPE (t), con);
3751 return fold (build1 (NEGATE_EXPR, TREE_TYPE (t),
3752 convert (TREE_TYPE (t), con)));
3755 t = build (TREE_CODE (t), type,
3756 fold (build (code, TREE_TYPE (t), arg0, con)), var);
3758 if (integer_zerop (TREE_OPERAND (t, 0))
3759 && TREE_CODE (t) == PLUS_EXPR)
3760 return convert (TREE_TYPE (t), var);
3765 #if defined (REAL_IS_NOT_DOUBLE) && ! defined (REAL_ARITHMETIC)
3766 if (TREE_CODE (arg1) == REAL_CST)
3768 #endif /* REAL_IS_NOT_DOUBLE, and no REAL_ARITHMETIC */
3770 t1 = const_binop (code, arg0, arg1, 0);
3771 if (t1 != NULL_TREE)
3773 /* The return value should always have
3774 the same type as the original expression. */
3775 TREE_TYPE (t1) = TREE_TYPE (t);
3781 if (! FLOAT_TYPE_P (type))
3783 if (! wins && integer_zerop (arg0))
3784 return build1 (NEGATE_EXPR, type, arg1);
3785 if (integer_zerop (arg1))
3786 return non_lvalue (convert (type, arg0));
3788 /* (A * C) - (B * C) -> (A-B) * C. Since we are most concerned
3789 about the case where C is a constant, just try one of the
3790 four possibilities. */
3792 if (TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (arg1) == MULT_EXPR
3793 && operand_equal_p (TREE_OPERAND (arg0, 1),
3794 TREE_OPERAND (arg1, 1), 0))
3795 return fold (build (MULT_EXPR, type,
3796 fold (build (MINUS_EXPR, type,
3797 TREE_OPERAND (arg0, 0),
3798 TREE_OPERAND (arg1, 0))),
3799 TREE_OPERAND (arg0, 1)));
3801 /* Convert A - (-B) to A + B. */
3802 else if (TREE_CODE (arg1) == NEGATE_EXPR)
3803 return fold (build (PLUS_EXPR, type, arg0, TREE_OPERAND (arg1, 0)));
3805 else if (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3808 /* Except with IEEE floating point, 0-x equals -x. */
3809 if (! wins && real_zerop (arg0))
3810 return build1 (NEGATE_EXPR, type, arg1);
3811 /* Except with IEEE floating point, x-0 equals x. */
3812 if (real_zerop (arg1))
3813 return non_lvalue (convert (type, arg0));
3816 /* Fold &x - &x. This can happen from &x.foo - &x.
3817 This is unsafe for certain floats even in non-IEEE formats.
3818 In IEEE, it is unsafe because it does wrong for NaNs.
3819 Also note that operand_equal_p is always false if an operand
3822 if ((! FLOAT_TYPE_P (type) || flag_fast_math)
3823 && operand_equal_p (arg0, arg1, 0))
3824 return convert (type, integer_zero_node);
3829 if (! FLOAT_TYPE_P (type))
3831 if (integer_zerop (arg1))
3832 return omit_one_operand (type, arg1, arg0);
3833 if (integer_onep (arg1))
3834 return non_lvalue (convert (type, arg0));
3836 /* ((A / C) * C) is A if the division is an
3837 EXACT_DIV_EXPR. Since C is normally a constant,
3838 just check for one of the four possibilities. */
3840 if (TREE_CODE (arg0) == EXACT_DIV_EXPR
3841 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
3842 return TREE_OPERAND (arg0, 0);
3844 /* (a * (1 << b)) is (a << b) */
3845 if (TREE_CODE (arg1) == LSHIFT_EXPR
3846 && integer_onep (TREE_OPERAND (arg1, 0)))
3847 return fold (build (LSHIFT_EXPR, type, arg0,
3848 TREE_OPERAND (arg1, 1)));
3849 if (TREE_CODE (arg0) == LSHIFT_EXPR
3850 && integer_onep (TREE_OPERAND (arg0, 0)))
3851 return fold (build (LSHIFT_EXPR, type, arg1,
3852 TREE_OPERAND (arg0, 1)));
3856 /* x*0 is 0, except for IEEE floating point. */
3857 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
3859 && real_zerop (arg1))
3860 return omit_one_operand (type, arg1, arg0);
3861 /* In IEEE floating point, x*1 is not equivalent to x for snans.
3862 However, ANSI says we can drop signals,
3863 so we can do this anyway. */
3864 if (real_onep (arg1))
3865 return non_lvalue (convert (type, arg0));
3867 if (! wins && real_twop (arg1))
3869 tree arg = save_expr (arg0);
3870 return build (PLUS_EXPR, type, arg, arg);
3877 if (integer_all_onesp (arg1))
3878 return omit_one_operand (type, arg1, arg0);
3879 if (integer_zerop (arg1))
3880 return non_lvalue (convert (type, arg0));
3881 t1 = distribute_bit_expr (code, type, arg0, arg1);
3882 if (t1 != NULL_TREE)
3885 /* (a << C1) | (a >> C2) if A is unsigned and C1+C2 is the size of A
3886 is a rotate of A by C1 bits. */
3888 if ((TREE_CODE (arg0) == RSHIFT_EXPR
3889 || TREE_CODE (arg0) == LSHIFT_EXPR)
3890 && (TREE_CODE (arg1) == RSHIFT_EXPR
3891 || TREE_CODE (arg1) == LSHIFT_EXPR)
3892 && TREE_CODE (arg0) != TREE_CODE (arg1)
3893 && operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1,0), 0)
3894 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))
3895 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
3896 && TREE_CODE (TREE_OPERAND (arg1, 1)) == INTEGER_CST
3897 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
3898 && TREE_INT_CST_HIGH (TREE_OPERAND (arg1, 1)) == 0
3899 && ((TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))
3900 + TREE_INT_CST_LOW (TREE_OPERAND (arg1, 1)))
3901 == TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
3902 return build (LROTATE_EXPR, type, TREE_OPERAND (arg0, 0),
3903 TREE_CODE (arg0) == LSHIFT_EXPR
3904 ? TREE_OPERAND (arg0, 1) : TREE_OPERAND (arg1, 1));
3909 if (integer_zerop (arg1))
3910 return non_lvalue (convert (type, arg0));
3911 if (integer_all_onesp (arg1))
3912 return fold (build1 (BIT_NOT_EXPR, type, arg0));
3917 if (integer_all_onesp (arg1))
3918 return non_lvalue (convert (type, arg0));
3919 if (integer_zerop (arg1))
3920 return omit_one_operand (type, arg1, arg0);
3921 t1 = distribute_bit_expr (code, type, arg0, arg1);
3922 if (t1 != NULL_TREE)
3924 /* Simplify ((int)c & 0x377) into (int)c, if c is unsigned char. */
3925 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == NOP_EXPR
3926 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg1, 0))))
3928 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg1, 0)));
3929 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3930 && (~TREE_INT_CST_LOW (arg0)
3931 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3932 return build1 (NOP_EXPR, type, TREE_OPERAND (arg1, 0));
3934 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR
3935 && TREE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0))))
3937 int prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
3938 if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
3939 && (~TREE_INT_CST_LOW (arg1)
3940 & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
3941 return build1 (NOP_EXPR, type, TREE_OPERAND (arg0, 0));
3945 case BIT_ANDTC_EXPR:
3946 if (integer_all_onesp (arg0))
3947 return non_lvalue (convert (type, arg1));
3948 if (integer_zerop (arg0))
3949 return omit_one_operand (type, arg0, arg1);
3950 if (TREE_CODE (arg1) == INTEGER_CST)
3952 arg1 = fold (build1 (BIT_NOT_EXPR, type, arg1));
3953 code = BIT_AND_EXPR;
3959 /* In most cases, do nothing with a divide by zero. */
3960 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3961 #ifndef REAL_INFINITY
3962 if (TREE_CODE (arg1) == REAL_CST && real_zerop (arg1))
3965 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
3967 /* In IEEE floating point, x/1 is not equivalent to x for snans.
3968 However, ANSI says we can drop signals, so we can do this anyway. */
3969 if (real_onep (arg1))
3970 return non_lvalue (convert (type, arg0));
3972 /* If ARG1 is a constant, we can convert this to a multiply by the
3973 reciprocal. This does not have the same rounding properties,
3974 so only do this if -ffast-math. We can actually always safely
3975 do it if ARG1 is a power of two, but it's hard to tell if it is
3976 or not in a portable manner. */
3977 if (TREE_CODE (arg1) == REAL_CST && flag_fast_math
3978 && 0 != (tem = const_binop (code, build_real (type, dconst1),
3980 return fold (build (MULT_EXPR, type, arg0, tem));
3984 case TRUNC_DIV_EXPR:
3985 case ROUND_DIV_EXPR:
3986 case FLOOR_DIV_EXPR:
3988 case EXACT_DIV_EXPR:
3989 if (integer_onep (arg1))
3990 return non_lvalue (convert (type, arg0));
3991 if (integer_zerop (arg1))
3994 /* If we have ((a / C1) / C2) where both division are the same type, try
3995 to simplify. First see if C1 * C2 overflows or not. */
3996 if (TREE_CODE (arg0) == code && TREE_CODE (arg1) == INTEGER_CST
3997 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
4001 new_divisor = const_binop (MULT_EXPR, TREE_OPERAND (arg0, 1), arg1, 0);
4002 tem = const_binop (FLOOR_DIV_EXPR, new_divisor, arg1, 0);
4004 if (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_LOW (tem)
4005 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == TREE_INT_CST_HIGH (tem))
4007 /* If no overflow, divide by C1*C2. */
4008 return fold (build (code, type, TREE_OPERAND (arg0, 0), new_divisor));
4012 /* Look for ((a * C1) / C3) or (((a * C1) + C2) / C3),
4013 where C1 % C3 == 0 or C3 % C1 == 0. We can simplify these
4014 expressions, which often appear in the offsets or sizes of
4015 objects with a varying size. Only deal with positive divisors
4016 and multiplicands. If C2 is negative, we must have C2 % C3 == 0.
4018 Look for NOPs and SAVE_EXPRs inside. */
4020 if (TREE_CODE (arg1) == INTEGER_CST
4021 && tree_int_cst_sgn (arg1) >= 0)
4023 int have_save_expr = 0;
4024 tree c2 = integer_zero_node;
4027 if (TREE_CODE (xarg0) == SAVE_EXPR)
4028 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4032 if (TREE_CODE (xarg0) == PLUS_EXPR
4033 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4034 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4035 else if (TREE_CODE (xarg0) == MINUS_EXPR
4036 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4037 /* If we are doing this computation unsigned, the negate
4039 && ! TREE_UNSIGNED (type))
4041 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4042 xarg0 = TREE_OPERAND (xarg0, 0);
4045 if (TREE_CODE (xarg0) == SAVE_EXPR)
4046 have_save_expr = 1, xarg0 = TREE_OPERAND (xarg0, 0);
4050 if (TREE_CODE (xarg0) == MULT_EXPR
4051 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4052 && tree_int_cst_sgn (TREE_OPERAND (xarg0, 1)) >= 0
4053 && (integer_zerop (const_binop (TRUNC_MOD_EXPR,
4054 TREE_OPERAND (xarg0, 1), arg1, 1))
4055 || integer_zerop (const_binop (TRUNC_MOD_EXPR, arg1,
4056 TREE_OPERAND (xarg0, 1), 1)))
4057 && (tree_int_cst_sgn (c2) >= 0
4058 || integer_zerop (const_binop (TRUNC_MOD_EXPR, c2,
4061 tree outer_div = integer_one_node;
4062 tree c1 = TREE_OPERAND (xarg0, 1);
4065 /* If C3 > C1, set them equal and do a divide by
4066 C3/C1 at the end of the operation. */
4067 if (tree_int_cst_lt (c1, c3))
4068 outer_div = const_binop (code, c3, c1, 0), c3 = c1;
4070 /* The result is A * (C1/C3) + (C2/C3). */
4071 t = fold (build (PLUS_EXPR, type,
4072 fold (build (MULT_EXPR, type,
4073 TREE_OPERAND (xarg0, 0),
4074 const_binop (code, c1, c3, 1))),
4075 const_binop (code, c2, c3, 1)));
4077 if (! integer_onep (outer_div))
4078 t = fold (build (code, type, t, convert (type, outer_div)));
4090 case FLOOR_MOD_EXPR:
4091 case ROUND_MOD_EXPR:
4092 case TRUNC_MOD_EXPR:
4093 if (integer_onep (arg1))
4094 return omit_one_operand (type, integer_zero_node, arg0);
4095 if (integer_zerop (arg1))
4098 /* Look for ((a * C1) % C3) or (((a * C1) + C2) % C3),
4099 where C1 % C3 == 0. Handle similarly to the division case,
4100 but don't bother with SAVE_EXPRs. */
4102 if (TREE_CODE (arg1) == INTEGER_CST
4103 && ! integer_zerop (arg1))
4105 tree c2 = integer_zero_node;
4108 if (TREE_CODE (xarg0) == PLUS_EXPR
4109 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST)
4110 c2 = TREE_OPERAND (xarg0, 1), xarg0 = TREE_OPERAND (xarg0, 0);
4111 else if (TREE_CODE (xarg0) == MINUS_EXPR
4112 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4113 && ! TREE_UNSIGNED (type))
4115 c2 = fold (build1 (NEGATE_EXPR, type, TREE_OPERAND (xarg0, 1)));
4116 xarg0 = TREE_OPERAND (xarg0, 0);
4121 if (TREE_CODE (xarg0) == MULT_EXPR
4122 && TREE_CODE (TREE_OPERAND (xarg0, 1)) == INTEGER_CST
4123 && integer_zerop (const_binop (TRUNC_MOD_EXPR,
4124 TREE_OPERAND (xarg0, 1),
4126 && tree_int_cst_sgn (c2) >= 0)
4127 /* The result is (C2%C3). */
4128 return omit_one_operand (type, const_binop (code, c2, arg1, 1),
4129 TREE_OPERAND (xarg0, 0));
4138 if (integer_zerop (arg1))
4139 return non_lvalue (convert (type, arg0));
4140 /* Since negative shift count is not well-defined,
4141 don't try to compute it in the compiler. */
4142 if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_sgn (arg1) < 0)
4144 /* Rewrite an LROTATE_EXPR by a constant into an
4145 RROTATE_EXPR by a new constant. */
4146 if (code == LROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST)
4148 TREE_SET_CODE (t, RROTATE_EXPR);
4149 code = RROTATE_EXPR;
4150 TREE_OPERAND (t, 1) = arg1
4153 convert (TREE_TYPE (arg1),
4154 build_int_2 (GET_MODE_BITSIZE (TYPE_MODE (type)), 0)),
4156 if (tree_int_cst_sgn (arg1) < 0)
4160 /* If we have a rotate of a bit operation with the rotate count and
4161 the second operand of the bit operation both constant,
4162 permute the two operations. */
4163 if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
4164 && (TREE_CODE (arg0) == BIT_AND_EXPR
4165 || TREE_CODE (arg0) == BIT_ANDTC_EXPR
4166 || TREE_CODE (arg0) == BIT_IOR_EXPR
4167 || TREE_CODE (arg0) == BIT_XOR_EXPR)
4168 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
4169 return fold (build (TREE_CODE (arg0), type,
4170 fold (build (code, type,
4171 TREE_OPERAND (arg0, 0), arg1)),
4172 fold (build (code, type,
4173 TREE_OPERAND (arg0, 1), arg1))));
4175 /* Two consecutive rotates adding up to the width of the mode can
4177 if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
4178 && TREE_CODE (arg0) == RROTATE_EXPR
4179 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4180 && TREE_INT_CST_HIGH (arg1) == 0
4181 && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
4182 && ((TREE_INT_CST_LOW (arg1)
4183 + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
4184 == GET_MODE_BITSIZE (TYPE_MODE (type))))
4185 return TREE_OPERAND (arg0, 0);
4190 if (operand_equal_p (arg0, arg1, 0))
4192 if (INTEGRAL_TYPE_P (type)
4193 && operand_equal_p (arg1, TYPE_MIN_VALUE (type), 1))
4194 return omit_one_operand (type, arg1, arg0);
4198 if (operand_equal_p (arg0, arg1, 0))
4200 if (INTEGRAL_TYPE_P (type)
4201 && operand_equal_p (arg1, TYPE_MAX_VALUE (type), 1))
4202 return omit_one_operand (type, arg1, arg0);
4205 case TRUTH_NOT_EXPR:
4206 /* Note that the operand of this must be an int
4207 and its values must be 0 or 1.
4208 ("true" is a fixed value perhaps depending on the language,
4209 but we don't handle values other than 1 correctly yet.) */
4210 tem = invert_truthvalue (arg0);
4211 /* Avoid infinite recursion. */
4212 if (TREE_CODE (tem) == TRUTH_NOT_EXPR)
4214 return convert (type, tem);
4216 case TRUTH_ANDIF_EXPR:
4217 /* Note that the operands of this must be ints
4218 and their values must be 0 or 1.
4219 ("true" is a fixed value perhaps depending on the language.) */
4220 /* If first arg is constant zero, return it. */
4221 if (integer_zerop (arg0))
4223 case TRUTH_AND_EXPR:
4224 /* If either arg is constant true, drop it. */
4225 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4226 return non_lvalue (arg1);
4227 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4228 return non_lvalue (arg0);
4229 /* If second arg is constant zero, result is zero, but first arg
4230 must be evaluated. */
4231 if (integer_zerop (arg1))
4232 return omit_one_operand (type, arg1, arg0);
4235 /* We only do these simplifications if we are optimizing. */
4239 /* Check for things like (A || B) && (A || C). We can convert this
4240 to A || (B && C). Note that either operator can be any of the four
4241 truth and/or operations and the transformation will still be
4242 valid. Also note that we only care about order for the
4243 ANDIF and ORIF operators. */
4244 if (TREE_CODE (arg0) == TREE_CODE (arg1)
4245 && (TREE_CODE (arg0) == TRUTH_ANDIF_EXPR
4246 || TREE_CODE (arg0) == TRUTH_ORIF_EXPR
4247 || TREE_CODE (arg0) == TRUTH_AND_EXPR
4248 || TREE_CODE (arg0) == TRUTH_OR_EXPR))
4250 tree a00 = TREE_OPERAND (arg0, 0);
4251 tree a01 = TREE_OPERAND (arg0, 1);
4252 tree a10 = TREE_OPERAND (arg1, 0);
4253 tree a11 = TREE_OPERAND (arg1, 1);
4254 int commutative = ((TREE_CODE (arg0) == TRUTH_OR_EXPR
4255 || TREE_CODE (arg0) == TRUTH_AND_EXPR)
4256 && (code == TRUTH_AND_EXPR
4257 || code == TRUTH_OR_EXPR));
4259 if (operand_equal_p (a00, a10, 0))
4260 return fold (build (TREE_CODE (arg0), type, a00,
4261 fold (build (code, type, a01, a11))));
4262 else if (commutative && operand_equal_p (a00, a11, 0))
4263 return fold (build (TREE_CODE (arg0), type, a00,
4264 fold (build (code, type, a01, a10))));
4265 else if (commutative && operand_equal_p (a01, a10, 0))
4266 return fold (build (TREE_CODE (arg0), type, a01,
4267 fold (build (code, type, a00, a11))));
4269 /* This case if tricky because we must either have commutative
4270 operators or else A10 must not have side-effects. */
4272 else if ((commutative || ! TREE_SIDE_EFFECTS (a10))
4273 && operand_equal_p (a01, a11, 0))
4274 return fold (build (TREE_CODE (arg0), type,
4275 fold (build (code, type, a00, a10)),
4279 /* Check for the possibility of merging component references. If our
4280 lhs is another similar operation, try to merge its rhs with our
4281 rhs. Then try to merge our lhs and rhs. */
4282 if (TREE_CODE (arg0) == code
4283 && 0 != (tem = fold_truthop (code, type,
4284 TREE_OPERAND (arg0, 1), arg1)))
4285 return fold (build (code, type, TREE_OPERAND (arg0, 0), tem));
4287 if ((tem = fold_truthop (code, type, arg0, arg1)) != 0)
4292 case TRUTH_ORIF_EXPR:
4293 /* Note that the operands of this must be ints
4294 and their values must be 0 or true.
4295 ("true" is a fixed value perhaps depending on the language.) */
4296 /* If first arg is constant true, return it. */
4297 if (TREE_CODE (arg0) == INTEGER_CST && ! integer_zerop (arg0))
4300 /* If either arg is constant zero, drop it. */
4301 if (TREE_CODE (arg0) == INTEGER_CST && integer_zerop (arg0))
4302 return non_lvalue (arg1);
4303 if (TREE_CODE (arg1) == INTEGER_CST && integer_zerop (arg1))
4304 return non_lvalue (arg0);
4305 /* If second arg is constant true, result is true, but we must
4306 evaluate first arg. */
4307 if (TREE_CODE (arg1) == INTEGER_CST && ! integer_zerop (arg1))
4308 return omit_one_operand (type, arg1, arg0);
4311 case TRUTH_XOR_EXPR:
4312 /* If either arg is constant zero, drop it. */
4313 if (integer_zerop (arg0))
4314 return non_lvalue (arg1);
4315 if (integer_zerop (arg1))
4316 return non_lvalue (arg0);
4317 /* If either arg is constant true, this is a logical inversion. */
4318 if (integer_onep (arg0))
4319 return non_lvalue (invert_truthvalue (arg1));
4320 if (integer_onep (arg1))
4321 return non_lvalue (invert_truthvalue (arg0));
4330 /* If one arg is a constant integer, put it last. */
4331 if (TREE_CODE (arg0) == INTEGER_CST
4332 && TREE_CODE (arg1) != INTEGER_CST)
4334 TREE_OPERAND (t, 0) = arg1;
4335 TREE_OPERAND (t, 1) = arg0;
4336 arg0 = TREE_OPERAND (t, 0);
4337 arg1 = TREE_OPERAND (t, 1);
4338 code = swap_tree_comparison (code);
4339 TREE_SET_CODE (t, code);
4342 /* Convert foo++ == CONST into ++foo == CONST + INCR.
4343 First, see if one arg is constant; find the constant arg
4344 and the other one. */
4346 tree constop = 0, varop;
4347 int constopnum = -1;
4349 if (TREE_CONSTANT (arg1))
4350 constopnum = 1, constop = arg1, varop = arg0;
4351 if (TREE_CONSTANT (arg0))
4352 constopnum = 0, constop = arg0, varop = arg1;
4354 if (constop && TREE_CODE (varop) == POSTINCREMENT_EXPR)
4356 /* This optimization is invalid for ordered comparisons
4357 if CONST+INCR overflows or if foo+incr might overflow.
4358 This optimization is invalid for floating point due to rounding.
4359 For pointer types we assume overflow doesn't happen. */
4360 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4361 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4362 && (code == EQ_EXPR || code == NE_EXPR)))
4365 = fold (build (PLUS_EXPR, TREE_TYPE (varop),
4366 constop, TREE_OPERAND (varop, 1)));
4367 TREE_SET_CODE (varop, PREINCREMENT_EXPR);
4369 t = build (code, type, TREE_OPERAND (t, 0),
4370 TREE_OPERAND (t, 1));
4371 TREE_OPERAND (t, constopnum) = newconst;
4375 else if (constop && TREE_CODE (varop) == POSTDECREMENT_EXPR)
4377 if (TREE_CODE (TREE_TYPE (varop)) == POINTER_TYPE
4378 || (! FLOAT_TYPE_P (TREE_TYPE (varop))
4379 && (code == EQ_EXPR || code == NE_EXPR)))
4382 = fold (build (MINUS_EXPR, TREE_TYPE (varop),
4383 constop, TREE_OPERAND (varop, 1)));
4384 TREE_SET_CODE (varop, PREDECREMENT_EXPR);
4385 t = build (code, type, TREE_OPERAND (t, 0),
4386 TREE_OPERAND (t, 1));
4387 TREE_OPERAND (t, constopnum) = newconst;
4393 /* Change X >= CST to X > (CST - 1) if CST is positive. */
4394 if (TREE_CODE (arg1) == INTEGER_CST
4395 && TREE_CODE (arg0) != INTEGER_CST
4396 && tree_int_cst_sgn (arg1) > 0)
4398 switch (TREE_CODE (t))
4402 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4403 t = build (code, type, TREE_OPERAND (t, 0), arg1);
4408 arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node, 0);
4409 t = build (code, type, TREE_OPERAND (t, 0), arg1);
4414 /* If this is an EQ or NE comparison with zero and ARG0 is
4415 (1 << foo) & bar, convert it to (bar >> foo) & 1. Both require
4416 two operations, but the latter can be done in one less insn
4417 one machine that have only two-operand insns or on which a
4418 constant cannot be the first operand. */
4419 if (integer_zerop (arg1) && (code == EQ_EXPR || code == NE_EXPR)
4420 && TREE_CODE (arg0) == BIT_AND_EXPR)
4422 if (TREE_CODE (TREE_OPERAND (arg0, 0)) == LSHIFT_EXPR
4423 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 0), 0)))
4425 fold (build (code, type,
4426 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4428 TREE_TYPE (TREE_OPERAND (arg0, 0)),
4429 TREE_OPERAND (arg0, 1),
4430 TREE_OPERAND (TREE_OPERAND (arg0, 0), 1)),
4431 convert (TREE_TYPE (arg0),
4434 else if (TREE_CODE (TREE_OPERAND (arg0, 1)) == LSHIFT_EXPR
4435 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg0, 1), 0)))
4437 fold (build (code, type,
4438 build (BIT_AND_EXPR, TREE_TYPE (arg0),
4440 TREE_TYPE (TREE_OPERAND (arg0, 1)),
4441 TREE_OPERAND (arg0, 0),
4442 TREE_OPERAND (TREE_OPERAND (arg0, 1), 1)),
4443 convert (TREE_TYPE (arg0),
4448 /* If this is an NE or EQ comparison of zero against the result of a
4449 signed MOD operation whose second operand is a power of 2, make
4450 the MOD operation unsigned since it is simpler and equivalent. */
4451 if ((code == NE_EXPR || code == EQ_EXPR)
4452 && integer_zerop (arg1)
4453 && ! TREE_UNSIGNED (TREE_TYPE (arg0))
4454 && (TREE_CODE (arg0) == TRUNC_MOD_EXPR
4455 || TREE_CODE (arg0) == CEIL_MOD_EXPR
4456 || TREE_CODE (arg0) == FLOOR_MOD_EXPR
4457 || TREE_CODE (arg0) == ROUND_MOD_EXPR)
4458 && integer_pow2p (TREE_OPERAND (arg0, 1)))
4460 tree newtype = unsigned_type (TREE_TYPE (arg0));
4461 tree newmod = build (TREE_CODE (arg0), newtype,
4462 convert (newtype, TREE_OPERAND (arg0, 0)),
4463 convert (newtype, TREE_OPERAND (arg0, 1)));
4465 return build (code, type, newmod, convert (newtype, arg1));
4468 /* If this is an NE comparison of zero with an AND of one, remove the
4469 comparison since the AND will give the correct value. */
4470 if (code == NE_EXPR && integer_zerop (arg1)
4471 && TREE_CODE (arg0) == BIT_AND_EXPR
4472 && integer_onep (TREE_OPERAND (arg0, 1)))
4473 return convert (type, arg0);
4475 /* If we have (A & C) == C where C is a power of 2, convert this into
4476 (A & C) != 0. Similarly for NE_EXPR. */
4477 if ((code == EQ_EXPR || code == NE_EXPR)
4478 && TREE_CODE (arg0) == BIT_AND_EXPR
4479 && integer_pow2p (TREE_OPERAND (arg0, 1))
4480 && operand_equal_p (TREE_OPERAND (arg0, 1), arg1, 0))
4481 return build (code == EQ_EXPR ? NE_EXPR : EQ_EXPR, type,
4482 arg0, integer_zero_node);
4484 /* If X is unsigned, convert X < (1 << Y) into X >> Y == 0
4485 and similarly for >= into !=. */
4486 if ((code == LT_EXPR || code == GE_EXPR)
4487 && TREE_UNSIGNED (TREE_TYPE (arg0))
4488 && TREE_CODE (arg1) == LSHIFT_EXPR
4489 && integer_onep (TREE_OPERAND (arg1, 0)))
4490 return build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4491 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4492 TREE_OPERAND (arg1, 1)),
4493 convert (TREE_TYPE (arg0), integer_zero_node));
4495 else if ((code == LT_EXPR || code == GE_EXPR)
4496 && TREE_UNSIGNED (TREE_TYPE (arg0))
4497 && (TREE_CODE (arg1) == NOP_EXPR
4498 || TREE_CODE (arg1) == CONVERT_EXPR)
4499 && TREE_CODE (TREE_OPERAND (arg1, 0)) == LSHIFT_EXPR
4500 && integer_onep (TREE_OPERAND (TREE_OPERAND (arg1, 0), 0)))
4502 build (code == LT_EXPR ? EQ_EXPR : NE_EXPR, type,
4503 convert (TREE_TYPE (arg0),
4504 build (RSHIFT_EXPR, TREE_TYPE (arg0), arg0,
4505 TREE_OPERAND (TREE_OPERAND (arg1, 0), 1))),
4506 convert (TREE_TYPE (arg0), integer_zero_node));
4508 /* Simplify comparison of something with itself. (For IEEE
4509 floating-point, we can only do some of these simplifications.) */
4510 if (operand_equal_p (arg0, arg1, 0))
4517 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4519 t = build_int_2 (1, 0);
4520 TREE_TYPE (t) = type;
4524 TREE_SET_CODE (t, code);
4528 /* For NE, we can only do this simplification if integer. */
4529 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0)))
4531 /* ... fall through ... */
4534 t = build_int_2 (0, 0);
4535 TREE_TYPE (t) = type;
4540 /* An unsigned comparison against 0 can be simplified. */
4541 if (integer_zerop (arg1)
4542 && (INTEGRAL_TYPE_P (TREE_TYPE (arg1))
4543 || TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE)
4544 && TREE_UNSIGNED (TREE_TYPE (arg1)))
4546 switch (TREE_CODE (t))
4550 TREE_SET_CODE (t, NE_EXPR);
4554 TREE_SET_CODE (t, EQ_EXPR);
4557 return omit_one_operand (type,
4558 convert (type, integer_one_node),
4561 return omit_one_operand (type,
4562 convert (type, integer_zero_node),
4567 /* If we are comparing an expression that just has comparisons
4568 of two integer values, arithmetic expressions of those comparisons,
4569 and constants, we can simplify it. There are only three cases
4570 to check: the two values can either be equal, the first can be
4571 greater, or the second can be greater. Fold the expression for
4572 those three values. Since each value must be 0 or 1, we have
4573 eight possibilities, each of which corresponds to the constant 0
4574 or 1 or one of the six possible comparisons.
4576 This handles common cases like (a > b) == 0 but also handles
4577 expressions like ((x > y) - (y > x)) > 0, which supposedly
4578 occur in macroized code. */
4580 if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) != INTEGER_CST)
4582 tree cval1 = 0, cval2 = 0;
4585 if (twoval_comparison_p (arg0, &cval1, &cval2, &save_p)
4586 /* Don't handle degenerate cases here; they should already
4587 have been handled anyway. */
4588 && cval1 != 0 && cval2 != 0
4589 && ! (TREE_CONSTANT (cval1) && TREE_CONSTANT (cval2))
4590 && TREE_TYPE (cval1) == TREE_TYPE (cval2)
4591 && INTEGRAL_TYPE_P (TREE_TYPE (cval1))
4592 && ! operand_equal_p (TYPE_MIN_VALUE (TREE_TYPE (cval1)),
4593 TYPE_MAX_VALUE (TREE_TYPE (cval2)), 0))
4595 tree maxval = TYPE_MAX_VALUE (TREE_TYPE (cval1));
4596 tree minval = TYPE_MIN_VALUE (TREE_TYPE (cval1));
4598 /* We can't just pass T to eval_subst in case cval1 or cval2
4599 was the same as ARG1. */
4602 = fold (build (code, type,
4603 eval_subst (arg0, cval1, maxval, cval2, minval),
4606 = fold (build (code, type,
4607 eval_subst (arg0, cval1, maxval, cval2, maxval),
4610 = fold (build (code, type,
4611 eval_subst (arg0, cval1, minval, cval2, maxval),
4614 /* All three of these results should be 0 or 1. Confirm they
4615 are. Then use those values to select the proper code
4618 if ((integer_zerop (high_result)
4619 || integer_onep (high_result))
4620 && (integer_zerop (equal_result)
4621 || integer_onep (equal_result))
4622 && (integer_zerop (low_result)
4623 || integer_onep (low_result)))
4625 /* Make a 3-bit mask with the high-order bit being the
4626 value for `>', the next for '=', and the low for '<'. */
4627 switch ((integer_onep (high_result) * 4)
4628 + (integer_onep (equal_result) * 2)
4629 + integer_onep (low_result))
4633 return omit_one_operand (type, integer_zero_node, arg0);
4654 return omit_one_operand (type, integer_one_node, arg0);
4657 t = build (code, type, cval1, cval2);
4659 return save_expr (t);
4666 /* If this is a comparison of a field, we may be able to simplify it. */
4667 if ((TREE_CODE (arg0) == COMPONENT_REF
4668 || TREE_CODE (arg0) == BIT_FIELD_REF)
4669 && (code == EQ_EXPR || code == NE_EXPR)
4670 /* Handle the constant case even without -O
4671 to make sure the warnings are given. */
4672 && (optimize || TREE_CODE (arg1) == INTEGER_CST))
4674 t1 = optimize_bit_field_compare (code, type, arg0, arg1);
4678 /* If this is a comparison of complex values and either or both
4679 sizes are a COMPLEX_EXPR, it is best to split up the comparisons
4680 and join them with a TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR. This
4681 may prevent needless evaluations. */
4682 if ((code == EQ_EXPR || code == NE_EXPR)
4683 && TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE
4684 && (TREE_CODE (arg0) == COMPLEX_EXPR
4685 || TREE_CODE (arg1) == COMPLEX_EXPR))
4687 tree subtype = TREE_TYPE (TREE_TYPE (arg0));
4688 tree real0 = fold (build1 (REALPART_EXPR, subtype, arg0));
4689 tree imag0 = fold (build1 (IMAGPART_EXPR, subtype, arg0));
4690 tree real1 = fold (build1 (REALPART_EXPR, subtype, arg1));
4691 tree imag1 = fold (build1 (IMAGPART_EXPR, subtype, arg1));
4693 return fold (build ((code == EQ_EXPR ? TRUTH_ANDIF_EXPR
4696 fold (build (code, type, real0, real1)),
4697 fold (build (code, type, imag0, imag1))));
4700 /* From here on, the only cases we handle are when the result is
4701 known to be a constant.
4703 To compute GT, swap the arguments and do LT.
4704 To compute GE, do LT and invert the result.
4705 To compute LE, swap the arguments, do LT and invert the result.
4706 To compute NE, do EQ and invert the result.
4708 Therefore, the code below must handle only EQ and LT. */
4710 if (code == LE_EXPR || code == GT_EXPR)
4712 tem = arg0, arg0 = arg1, arg1 = tem;
4713 code = swap_tree_comparison (code);
4716 /* Note that it is safe to invert for real values here because we
4717 will check below in the one case that it matters. */
4720 if (code == NE_EXPR || code == GE_EXPR)
4723 code = invert_tree_comparison (code);
4726 /* Compute a result for LT or EQ if args permit;
4727 otherwise return T. */
4728 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
4730 if (code == EQ_EXPR)
4731 t1 = build_int_2 ((TREE_INT_CST_LOW (arg0)
4732 == TREE_INT_CST_LOW (arg1))
4733 && (TREE_INT_CST_HIGH (arg0)
4734 == TREE_INT_CST_HIGH (arg1)),
4737 t1 = build_int_2 ((TREE_UNSIGNED (TREE_TYPE (arg0))
4738 ? INT_CST_LT_UNSIGNED (arg0, arg1)
4739 : INT_CST_LT (arg0, arg1)),
4743 /* Assume a nonexplicit constant cannot equal an explicit one,
4744 since such code would be undefined anyway.
4745 Exception: on sysvr4, using #pragma weak,
4746 a label can come out as 0. */
4747 else if (TREE_CODE (arg1) == INTEGER_CST
4748 && !integer_zerop (arg1)
4749 && TREE_CONSTANT (arg0)
4750 && TREE_CODE (arg0) == ADDR_EXPR
4752 t1 = build_int_2 (0, 0);
4754 /* Two real constants can be compared explicitly. */
4755 else if (TREE_CODE (arg0) == REAL_CST && TREE_CODE (arg1) == REAL_CST)
4757 /* If either operand is a NaN, the result is false with two
4758 exceptions: First, an NE_EXPR is true on NaNs, but that case
4759 is already handled correctly since we will be inverting the
4760 result for NE_EXPR. Second, if we had inverted a LE_EXPR
4761 or a GE_EXPR into a LT_EXPR, we must return true so that it
4762 will be inverted into false. */
4764 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg0))
4765 || REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
4766 t1 = build_int_2 (invert && code == LT_EXPR, 0);
4768 else if (code == EQ_EXPR)
4769 t1 = build_int_2 (REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
4770 TREE_REAL_CST (arg1)),
4773 t1 = build_int_2 (REAL_VALUES_LESS (TREE_REAL_CST (arg0),
4774 TREE_REAL_CST (arg1)),
4778 if (t1 == NULL_TREE)
4782 TREE_INT_CST_LOW (t1) ^= 1;
4784 TREE_TYPE (t1) = type;
4788 /* Pedantic ANSI C says that a conditional expression is never an lvalue,
4789 so all simple results must be passed through pedantic_non_lvalue. */
4790 if (TREE_CODE (arg0) == INTEGER_CST)
4791 return pedantic_non_lvalue
4792 (TREE_OPERAND (t, (integer_zerop (arg0) ? 2 : 1)));
4793 else if (operand_equal_p (arg1, TREE_OPERAND (expr, 2), 0))
4794 return pedantic_omit_one_operand (type, arg1, arg0);
4796 /* If the second operand is zero, invert the comparison and swap
4797 the second and third operands. Likewise if the second operand
4798 is constant and the third is not or if the third operand is
4799 equivalent to the first operand of the comparison. */
4801 if (integer_zerop (arg1)
4802 || (TREE_CONSTANT (arg1) && ! TREE_CONSTANT (TREE_OPERAND (t, 2)))
4803 || (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4804 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4805 TREE_OPERAND (t, 2),
4806 TREE_OPERAND (arg0, 1))))
4808 /* See if this can be inverted. If it can't, possibly because
4809 it was a floating-point inequality comparison, don't do
4811 tem = invert_truthvalue (arg0);
4813 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4815 t = build (code, type, tem,
4816 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
4818 arg1 = TREE_OPERAND (t, 2);
4823 /* If we have A op B ? A : C, we may be able to convert this to a
4824 simpler expression, depending on the operation and the values
4825 of B and C. IEEE floating point prevents this though,
4826 because A or B might be -0.0 or a NaN. */
4828 if (TREE_CODE_CLASS (TREE_CODE (arg0)) == '<'
4829 && (TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
4830 || ! FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
4832 && operand_equal_for_comparison_p (TREE_OPERAND (arg0, 0),
4833 arg1, TREE_OPERAND (arg0, 1)))
4835 tree arg2 = TREE_OPERAND (t, 2);
4836 enum tree_code comp_code = TREE_CODE (arg0);
4840 /* If we have A op 0 ? A : -A, this is A, -A, abs (A), or abs (-A),
4841 depending on the comparison operation. */
4842 if ((FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 1)))
4843 ? real_zerop (TREE_OPERAND (arg0, 1))
4844 : integer_zerop (TREE_OPERAND (arg0, 1)))
4845 && TREE_CODE (arg2) == NEGATE_EXPR
4846 && operand_equal_p (TREE_OPERAND (arg2, 0), arg1, 0))
4850 return pedantic_non_lvalue
4851 (fold (build1 (NEGATE_EXPR, type, arg1)));
4853 return pedantic_non_lvalue (convert (type, arg1));
4856 return pedantic_non_lvalue
4857 (convert (type, fold (build1 (ABS_EXPR,
4858 TREE_TYPE (arg1), arg1))));
4861 return pedantic_non_lvalue
4862 (fold (build1 (NEGATE_EXPR, type,
4864 fold (build1 (ABS_EXPR,
4869 /* If this is A != 0 ? A : 0, this is simply A. For ==, it is
4872 if (integer_zerop (TREE_OPERAND (arg0, 1)) && integer_zerop (arg2))
4874 if (comp_code == NE_EXPR)
4875 return pedantic_non_lvalue (convert (type, arg1));
4876 else if (comp_code == EQ_EXPR)
4877 return pedantic_non_lvalue (convert (type, integer_zero_node));
4880 /* If this is A op B ? A : B, this is either A, B, min (A, B),
4881 or max (A, B), depending on the operation. */
4883 if (operand_equal_for_comparison_p (TREE_OPERAND (arg0, 1),
4884 arg2, TREE_OPERAND (arg0, 0)))
4886 tree comp_op0 = TREE_OPERAND (arg0, 0);
4887 tree comp_op1 = TREE_OPERAND (arg0, 1);
4888 tree comp_type = TREE_TYPE (comp_op0);
4893 return pedantic_non_lvalue (convert (type, arg2));
4895 return pedantic_non_lvalue (convert (type, arg1));
4898 return pedantic_non_lvalue
4899 (convert (type, (fold (build (MIN_EXPR, comp_type,
4900 comp_op0, comp_op1)))));
4903 return pedantic_non_lvalue
4904 (convert (type, fold (build (MAX_EXPR, comp_type,
4905 comp_op0, comp_op1))));
4909 /* If this is A op C1 ? A : C2 with C1 and C2 constant integers,
4910 we might still be able to simplify this. For example,
4911 if C1 is one less or one more than C2, this might have started
4912 out as a MIN or MAX and been transformed by this function.
4913 Only good for INTEGER_TYPEs, because we need TYPE_MAX_VALUE. */
4915 if (INTEGRAL_TYPE_P (type)
4916 && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
4917 && TREE_CODE (arg2) == INTEGER_CST)
4921 /* We can replace A with C1 in this case. */
4922 arg1 = convert (type, TREE_OPERAND (arg0, 1));
4923 t = build (code, type, TREE_OPERAND (t, 0), arg1,
4924 TREE_OPERAND (t, 2));
4928 /* If C1 is C2 + 1, this is min(A, C2). */
4929 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4930 && operand_equal_p (TREE_OPERAND (arg0, 1),
4931 const_binop (PLUS_EXPR, arg2,
4932 integer_one_node, 0), 1))
4933 return pedantic_non_lvalue
4934 (fold (build (MIN_EXPR, type, arg1, arg2)));
4938 /* If C1 is C2 - 1, this is min(A, C2). */
4939 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4940 && operand_equal_p (TREE_OPERAND (arg0, 1),
4941 const_binop (MINUS_EXPR, arg2,
4942 integer_one_node, 0), 1))
4943 return pedantic_non_lvalue
4944 (fold (build (MIN_EXPR, type, arg1, arg2)));
4948 /* If C1 is C2 - 1, this is max(A, C2). */
4949 if (! operand_equal_p (arg2, TYPE_MIN_VALUE (type), 1)
4950 && operand_equal_p (TREE_OPERAND (arg0, 1),
4951 const_binop (MINUS_EXPR, arg2,
4952 integer_one_node, 0), 1))
4953 return pedantic_non_lvalue
4954 (fold (build (MAX_EXPR, type, arg1, arg2)));
4958 /* If C1 is C2 + 1, this is max(A, C2). */
4959 if (! operand_equal_p (arg2, TYPE_MAX_VALUE (type), 1)
4960 && operand_equal_p (TREE_OPERAND (arg0, 1),
4961 const_binop (PLUS_EXPR, arg2,
4962 integer_one_node, 0), 1))
4963 return pedantic_non_lvalue
4964 (fold (build (MAX_EXPR, type, arg1, arg2)));
4969 /* If the second operand is simpler than the third, swap them
4970 since that produces better jump optimization results. */
4971 if ((TREE_CONSTANT (arg1) || TREE_CODE_CLASS (TREE_CODE (arg1)) == 'd'
4972 || TREE_CODE (arg1) == SAVE_EXPR)
4973 && ! (TREE_CONSTANT (TREE_OPERAND (t, 2))
4974 || TREE_CODE_CLASS (TREE_CODE (TREE_OPERAND (t, 2))) == 'd'
4975 || TREE_CODE (TREE_OPERAND (t, 2)) == SAVE_EXPR))
4977 /* See if this can be inverted. If it can't, possibly because
4978 it was a floating-point inequality comparison, don't do
4980 tem = invert_truthvalue (arg0);
4982 if (TREE_CODE (tem) != TRUTH_NOT_EXPR)
4984 t = build (code, type, tem,
4985 TREE_OPERAND (t, 2), TREE_OPERAND (t, 1));
4987 arg1 = TREE_OPERAND (t, 2);
4992 /* Convert A ? 1 : 0 to simply A. */
4993 if (integer_onep (TREE_OPERAND (t, 1))
4994 && integer_zerop (TREE_OPERAND (t, 2))
4995 /* If we try to convert TREE_OPERAND (t, 0) to our type, the
4996 call to fold will try to move the conversion inside
4997 a COND, which will recurse. In that case, the COND_EXPR
4998 is probably the best choice, so leave it alone. */
4999 && type == TREE_TYPE (arg0))
5000 return pedantic_non_lvalue (arg0);
5002 /* Look for expressions of the form A & 2 ? 2 : 0. The result of this
5003 operation is simply A & 2. */
5005 if (integer_zerop (TREE_OPERAND (t, 2))
5006 && TREE_CODE (arg0) == NE_EXPR
5007 && integer_zerop (TREE_OPERAND (arg0, 1))
5008 && integer_pow2p (arg1)
5009 && TREE_CODE (TREE_OPERAND (arg0, 0)) == BIT_AND_EXPR
5010 && operand_equal_p (TREE_OPERAND (TREE_OPERAND (arg0, 0), 1),
5012 return pedantic_non_lvalue (convert (type, TREE_OPERAND (arg0, 0)));
5017 /* When pedantic, a compound expression can be neither an lvalue
5018 nor an integer constant expression. */
5019 if (TREE_SIDE_EFFECTS (arg0) || pedantic)
5021 /* Don't let (0, 0) be null pointer constant. */
5022 if (integer_zerop (arg1))
5023 return non_lvalue (arg1);
5028 return build_complex (arg0, arg1);
5032 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5034 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5035 return omit_one_operand (type, TREE_OPERAND (arg0, 0),
5036 TREE_OPERAND (arg0, 1));
5037 else if (TREE_CODE (arg0) == COMPLEX_CST)
5038 return TREE_REALPART (arg0);
5039 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5040 return fold (build (TREE_CODE (arg0), type,
5041 fold (build1 (REALPART_EXPR, type,
5042 TREE_OPERAND (arg0, 0))),
5043 fold (build1 (REALPART_EXPR,
5044 type, TREE_OPERAND (arg0, 1)))));
5048 if (TREE_CODE (TREE_TYPE (arg0)) != COMPLEX_TYPE)
5049 return convert (type, integer_zero_node);
5050 else if (TREE_CODE (arg0) == COMPLEX_EXPR)
5051 return omit_one_operand (type, TREE_OPERAND (arg0, 1),
5052 TREE_OPERAND (arg0, 0));
5053 else if (TREE_CODE (arg0) == COMPLEX_CST)
5054 return TREE_IMAGPART (arg0);
5055 else if (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR)
5056 return fold (build (TREE_CODE (arg0), type,
5057 fold (build1 (IMAGPART_EXPR, type,
5058 TREE_OPERAND (arg0, 0))),
5059 fold (build1 (IMAGPART_EXPR, type,
5060 TREE_OPERAND (arg0, 1)))));
5063 /* Pull arithmetic ops out of the CLEANUP_POINT_EXPR where
5065 case CLEANUP_POINT_EXPR:
5066 if (! TREE_SIDE_EFFECTS (arg0))
5067 return convert (type, arg0);
5070 enum tree_code code0 = TREE_CODE (arg0);
5071 int kind0 = TREE_CODE_CLASS (code0);
5072 tree arg00 = TREE_OPERAND (arg0, 0);
5075 if (kind0 == '1' || code0 == TRUTH_NOT_EXPR)
5076 return fold (build1 (code0, type,
5077 fold (build1 (CLEANUP_POINT_EXPR,
5078 TREE_TYPE (arg00), arg00))));
5080 if (kind0 == '<' || kind0 == '2'
5081 || code0 == TRUTH_ANDIF_EXPR || code0 == TRUTH_ORIF_EXPR
5082 || code0 == TRUTH_AND_EXPR || code0 == TRUTH_OR_EXPR
5083 || code0 == TRUTH_XOR_EXPR)
5085 arg01 = TREE_OPERAND (arg0, 1);
5087 if (! TREE_SIDE_EFFECTS (arg00))
5088 return fold (build (code0, type, arg00,
5089 fold (build1 (CLEANUP_POINT_EXPR,
5090 TREE_TYPE (arg01), arg01))));
5092 if (! TREE_SIDE_EFFECTS (arg01))
5093 return fold (build (code0, type,
5094 fold (build1 (CLEANUP_POINT_EXPR,
5095 TREE_TYPE (arg00), arg00)),
5104 } /* switch (code) */