1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 88, 92-96, 1997 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
21 /*@@ This file should be rewritten to use an arbitrary precision
22 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
23 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
24 @@ The routines that translate from the ap rep should
25 @@ warn if precision et. al. is lost.
26 @@ This would also make life easier when this technology is used
27 @@ for cross-compilers. */
30 /* The entry points in this file are fold, size_int and size_binop.
32 fold takes a tree as argument and returns a simplified tree.
34 size_binop takes a tree code for an arithmetic operation
35 and two operands that are trees, and produces a tree for the
36 result, assuming the type comes from `sizetype'.
38 size_int takes an integer value, and creates a tree constant
39 with type from `sizetype'. */
47 /* Handle floating overflow for `const_binop'. */
48 static jmp_buf float_error;
50 static void encode PROTO((HOST_WIDE_INT *,
51 HOST_WIDE_INT, HOST_WIDE_INT));
52 static void decode PROTO((HOST_WIDE_INT *,
53 HOST_WIDE_INT *, HOST_WIDE_INT *));
54 int div_and_round_double PROTO((enum tree_code, int, HOST_WIDE_INT,
55 HOST_WIDE_INT, HOST_WIDE_INT,
56 HOST_WIDE_INT, HOST_WIDE_INT *,
57 HOST_WIDE_INT *, HOST_WIDE_INT *,
59 static int split_tree PROTO((tree, enum tree_code, tree *,
61 static tree const_binop PROTO((enum tree_code, tree, tree, int));
62 static tree fold_convert PROTO((tree, tree));
63 static enum tree_code invert_tree_comparison PROTO((enum tree_code));
64 static enum tree_code swap_tree_comparison PROTO((enum tree_code));
65 static int truth_value_p PROTO((enum tree_code));
66 static int operand_equal_for_comparison_p PROTO((tree, tree, tree));
67 static int twoval_comparison_p PROTO((tree, tree *, tree *, int *));
68 static tree eval_subst PROTO((tree, tree, tree, tree, tree));
69 static tree omit_one_operand PROTO((tree, tree, tree));
70 static tree pedantic_omit_one_operand PROTO((tree, tree, tree));
71 static tree distribute_bit_expr PROTO((enum tree_code, tree, tree, tree));
72 static tree make_bit_field_ref PROTO((tree, tree, int, int, int));
73 static tree optimize_bit_field_compare PROTO((enum tree_code, tree,
75 static tree decode_field_reference PROTO((tree, int *, int *,
76 enum machine_mode *, int *,
77 int *, tree *, tree *));
78 static int all_ones_mask_p PROTO((tree, int));
79 static int simple_operand_p PROTO((tree));
80 static tree range_binop PROTO((enum tree_code, tree, tree, int,
82 static tree make_range PROTO((tree, int *, tree *, tree *));
83 static tree build_range_check PROTO((tree, tree, int, tree, tree));
84 static int merge_ranges PROTO((int *, tree *, tree *, int, tree, tree,
86 static tree fold_range_test PROTO((tree));
87 static tree unextend PROTO((tree, int, int, tree));
88 static tree fold_truthop PROTO((enum tree_code, tree, tree, tree));
89 static tree strip_compound_expr PROTO((tree, tree));
95 /* Suppose A1 + B1 = SUM1, using 2's complement arithmetic ignoring overflow.
96 Suppose A, B and SUM have the same respective signs as A1, B1, and SUM1.
97 Then this yields nonzero if overflow occurred during the addition.
98 Overflow occurs if A and B have the same sign, but A and SUM differ in sign.
99 Use `^' to test whether signs differ, and `< 0' to isolate the sign. */
100 #define overflow_sum_sign(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
102 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
103 We do that by representing the two-word integer in 4 words, with only
104 HOST_BITS_PER_WIDE_INT/2 bits stored in each word, as a positive number. */
107 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT/2)) - 1))
108 #define HIGHPART(x) \
109 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT/2)
110 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT/2)
112 /* Unpack a two-word integer into 4 words.
113 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
114 WORDS points to the array of HOST_WIDE_INTs. */
117 encode (words, low, hi)
118 HOST_WIDE_INT *words;
119 HOST_WIDE_INT low, hi;
121 words[0] = LOWPART (low);
122 words[1] = HIGHPART (low);
123 words[2] = LOWPART (hi);
124 words[3] = HIGHPART (hi);
127 /* Pack an array of 4 words into a two-word integer.
128 WORDS points to the array of words.
129 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
132 decode (words, low, hi)
133 HOST_WIDE_INT *words;
134 HOST_WIDE_INT *low, *hi;
136 *low = words[0] | words[1] * BASE;
137 *hi = words[2] | words[3] * BASE;
140 /* Make the integer constant T valid for its type
141 by setting to 0 or 1 all the bits in the constant
142 that don't belong in the type.
143 Yield 1 if a signed overflow occurs, 0 otherwise.
144 If OVERFLOW is nonzero, a signed overflow has already occurred
145 in calculating T, so propagate it.
147 Make the real constant T valid for its type by calling CHECK_FLOAT_VALUE,
151 force_fit_type (t, overflow)
155 HOST_WIDE_INT low, high;
158 if (TREE_CODE (t) == REAL_CST)
160 #ifdef CHECK_FLOAT_VALUE
161 CHECK_FLOAT_VALUE (TYPE_MODE (TREE_TYPE (t)), TREE_REAL_CST (t),
167 else if (TREE_CODE (t) != INTEGER_CST)
170 low = TREE_INT_CST_LOW (t);
171 high = TREE_INT_CST_HIGH (t);
173 if (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE)
176 prec = TYPE_PRECISION (TREE_TYPE (t));
178 /* First clear all bits that are beyond the type's precision. */
180 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
182 else if (prec > HOST_BITS_PER_WIDE_INT)
184 TREE_INT_CST_HIGH (t)
185 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
189 TREE_INT_CST_HIGH (t) = 0;
190 if (prec < HOST_BITS_PER_WIDE_INT)
191 TREE_INT_CST_LOW (t) &= ~((HOST_WIDE_INT) (-1) << prec);
194 /* Unsigned types do not suffer sign extension or overflow. */
195 if (TREE_UNSIGNED (TREE_TYPE (t)))
198 /* If the value's sign bit is set, extend the sign. */
199 if (prec != 2 * HOST_BITS_PER_WIDE_INT
200 && (prec > HOST_BITS_PER_WIDE_INT
201 ? (TREE_INT_CST_HIGH (t)
202 & ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
203 : TREE_INT_CST_LOW (t) & ((HOST_WIDE_INT) 1 << (prec - 1))))
205 /* Value is negative:
206 set to 1 all the bits that are outside this type's precision. */
207 if (prec > HOST_BITS_PER_WIDE_INT)
209 TREE_INT_CST_HIGH (t)
210 |= ((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
214 TREE_INT_CST_HIGH (t) = -1;
215 if (prec < HOST_BITS_PER_WIDE_INT)
216 TREE_INT_CST_LOW (t) |= ((HOST_WIDE_INT) (-1) << prec);
220 /* Yield nonzero if signed overflow occurred. */
222 ((overflow | (low ^ TREE_INT_CST_LOW (t)) | (high ^ TREE_INT_CST_HIGH (t)))
226 /* Add two doubleword integers with doubleword result.
227 Each argument is given as two `HOST_WIDE_INT' pieces.
228 One argument is L1 and H1; the other, L2 and H2.
229 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
232 add_double (l1, h1, l2, h2, lv, hv)
233 HOST_WIDE_INT l1, h1, l2, h2;
234 HOST_WIDE_INT *lv, *hv;
239 h = h1 + h2 + ((unsigned HOST_WIDE_INT) l < l1);
243 return overflow_sum_sign (h1, h2, h);
246 /* Negate a doubleword integer with doubleword result.
247 Return nonzero if the operation overflows, assuming it's signed.
248 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
249 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
252 neg_double (l1, h1, lv, hv)
253 HOST_WIDE_INT l1, h1;
254 HOST_WIDE_INT *lv, *hv;
260 return (*hv & h1) < 0;
270 /* Multiply two doubleword integers with doubleword result.
271 Return nonzero if the operation overflows, assuming it's signed.
272 Each argument is given as two `HOST_WIDE_INT' pieces.
273 One argument is L1 and H1; the other, L2 and H2.
274 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
277 mul_double (l1, h1, l2, h2, lv, hv)
278 HOST_WIDE_INT l1, h1, l2, h2;
279 HOST_WIDE_INT *lv, *hv;
281 HOST_WIDE_INT arg1[4];
282 HOST_WIDE_INT arg2[4];
283 HOST_WIDE_INT prod[4 * 2];
284 register unsigned HOST_WIDE_INT carry;
285 register int i, j, k;
286 HOST_WIDE_INT toplow, tophigh, neglow, neghigh;
288 encode (arg1, l1, h1);
289 encode (arg2, l2, h2);
291 bzero ((char *) prod, sizeof prod);
293 for (i = 0; i < 4; i++)
296 for (j = 0; j < 4; j++)
299 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
300 carry += arg1[i] * arg2[j];
301 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
303 prod[k] = LOWPART (carry);
304 carry = HIGHPART (carry);
309 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
311 /* Check for overflow by calculating the top half of the answer in full;
312 it should agree with the low half's sign bit. */
313 decode (prod+4, &toplow, &tophigh);
316 neg_double (l2, h2, &neglow, &neghigh);
317 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
321 neg_double (l1, h1, &neglow, &neghigh);
322 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
324 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
327 /* Shift the doubleword integer in L1, H1 left by COUNT places
328 keeping only PREC bits of result.
329 Shift right if COUNT is negative.
330 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
331 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
334 lshift_double (l1, h1, count, prec, lv, hv, arith)
335 HOST_WIDE_INT l1, h1, count;
337 HOST_WIDE_INT *lv, *hv;
342 rshift_double (l1, h1, - count, prec, lv, hv, arith);
346 #ifdef SHIFT_COUNT_TRUNCATED
347 if (SHIFT_COUNT_TRUNCATED)
351 if (count >= HOST_BITS_PER_WIDE_INT)
353 *hv = (unsigned HOST_WIDE_INT) l1 << count - HOST_BITS_PER_WIDE_INT;
358 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
359 | ((unsigned HOST_WIDE_INT) l1 >> HOST_BITS_PER_WIDE_INT - count - 1 >> 1));
360 *lv = (unsigned HOST_WIDE_INT) l1 << count;
364 /* Shift the doubleword integer in L1, H1 right by COUNT places
365 keeping only PREC bits of result. COUNT must be positive.
366 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
367 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
370 rshift_double (l1, h1, count, prec, lv, hv, arith)
371 HOST_WIDE_INT l1, h1, count;
373 HOST_WIDE_INT *lv, *hv;
376 unsigned HOST_WIDE_INT signmask;
378 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
381 #ifdef SHIFT_COUNT_TRUNCATED
382 if (SHIFT_COUNT_TRUNCATED)
386 if (count >= HOST_BITS_PER_WIDE_INT)
389 *lv = ((signmask << 2 * HOST_BITS_PER_WIDE_INT - count - 1 << 1)
390 | ((unsigned HOST_WIDE_INT) h1 >> count - HOST_BITS_PER_WIDE_INT));
394 *lv = (((unsigned HOST_WIDE_INT) l1 >> count)
395 | ((unsigned HOST_WIDE_INT) h1 << HOST_BITS_PER_WIDE_INT - count - 1 << 1));
396 *hv = ((signmask << HOST_BITS_PER_WIDE_INT - count)
397 | ((unsigned HOST_WIDE_INT) h1 >> count));
401 /* Rotate the doubleword integer in L1, H1 left by COUNT places
402 keeping only PREC bits of result.
403 Rotate right if COUNT is negative.
404 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
407 lrotate_double (l1, h1, count, prec, lv, hv)
408 HOST_WIDE_INT l1, h1, count;
410 HOST_WIDE_INT *lv, *hv;
412 HOST_WIDE_INT s1l, s1h, s2l, s2h;
418 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
419 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
424 /* Rotate the doubleword integer in L1, H1 left by COUNT places
425 keeping only PREC bits of result. COUNT must be positive.
426 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
429 rrotate_double (l1, h1, count, prec, lv, hv)
430 HOST_WIDE_INT l1, h1, count;
432 HOST_WIDE_INT *lv, *hv;
434 HOST_WIDE_INT s1l, s1h, s2l, s2h;
440 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
441 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
446 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
447 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
448 CODE is a tree code for a kind of division, one of
449 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
451 It controls how the quotient is rounded to a integer.
452 Return nonzero if the operation overflows.
453 UNS nonzero says do unsigned division. */
456 div_and_round_double (code, uns,
457 lnum_orig, hnum_orig, lden_orig, hden_orig,
458 lquo, hquo, lrem, hrem)
461 HOST_WIDE_INT lnum_orig, hnum_orig; /* num == numerator == dividend */
462 HOST_WIDE_INT lden_orig, hden_orig; /* den == denominator == divisor */
463 HOST_WIDE_INT *lquo, *hquo, *lrem, *hrem;
466 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
467 HOST_WIDE_INT den[4], quo[4];
469 unsigned HOST_WIDE_INT work;
470 register unsigned HOST_WIDE_INT carry = 0;
471 HOST_WIDE_INT lnum = lnum_orig;
472 HOST_WIDE_INT hnum = hnum_orig;
473 HOST_WIDE_INT lden = lden_orig;
474 HOST_WIDE_INT hden = hden_orig;
477 if ((hden == 0) && (lden == 0))
480 /* calculate quotient sign and convert operands to unsigned. */
486 /* (minimum integer) / (-1) is the only overflow case. */
487 if (neg_double (lnum, hnum, &lnum, &hnum) && (lden & hden) == -1)
493 neg_double (lden, hden, &lden, &hden);
497 if (hnum == 0 && hden == 0)
498 { /* single precision */
500 /* This unsigned division rounds toward zero. */
501 *lquo = lnum / (unsigned HOST_WIDE_INT) lden;
506 { /* trivial case: dividend < divisor */
507 /* hden != 0 already checked. */
514 bzero ((char *) quo, sizeof quo);
516 bzero ((char *) num, sizeof num); /* to zero 9th element */
517 bzero ((char *) den, sizeof den);
519 encode (num, lnum, hnum);
520 encode (den, lden, hden);
522 /* Special code for when the divisor < BASE. */
523 if (hden == 0 && lden < BASE)
525 /* hnum != 0 already checked. */
526 for (i = 4 - 1; i >= 0; i--)
528 work = num[i] + carry * BASE;
529 quo[i] = work / (unsigned HOST_WIDE_INT) lden;
530 carry = work % (unsigned HOST_WIDE_INT) lden;
535 /* Full double precision division,
536 with thanks to Don Knuth's "Seminumerical Algorithms". */
537 int num_hi_sig, den_hi_sig;
538 unsigned HOST_WIDE_INT quo_est, scale;
540 /* Find the highest non-zero divisor digit. */
541 for (i = 4 - 1; ; i--)
547 /* Insure that the first digit of the divisor is at least BASE/2.
548 This is required by the quotient digit estimation algorithm. */
550 scale = BASE / (den[den_hi_sig] + 1);
551 if (scale > 1) { /* scale divisor and dividend */
553 for (i = 0; i <= 4 - 1; i++) {
554 work = (num[i] * scale) + carry;
555 num[i] = LOWPART (work);
556 carry = HIGHPART (work);
559 for (i = 0; i <= 4 - 1; i++) {
560 work = (den[i] * scale) + carry;
561 den[i] = LOWPART (work);
562 carry = HIGHPART (work);
563 if (den[i] != 0) den_hi_sig = i;
570 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--) {
571 /* guess the next quotient digit, quo_est, by dividing the first
572 two remaining dividend digits by the high order quotient digit.
573 quo_est is never low and is at most 2 high. */
574 unsigned HOST_WIDE_INT tmp;
576 num_hi_sig = i + den_hi_sig + 1;
577 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
578 if (num[num_hi_sig] != den[den_hi_sig])
579 quo_est = work / den[den_hi_sig];
583 /* refine quo_est so it's usually correct, and at most one high. */
584 tmp = work - quo_est * den[den_hi_sig];
586 && den[den_hi_sig - 1] * quo_est > (tmp * BASE + num[num_hi_sig - 2]))
589 /* Try QUO_EST as the quotient digit, by multiplying the
590 divisor by QUO_EST and subtracting from the remaining dividend.
591 Keep in mind that QUO_EST is the I - 1st digit. */
594 for (j = 0; j <= den_hi_sig; j++)
596 work = quo_est * den[j] + carry;
597 carry = HIGHPART (work);
598 work = num[i + j] - LOWPART (work);
599 num[i + j] = LOWPART (work);
600 carry += HIGHPART (work) != 0;
603 /* if quo_est was high by one, then num[i] went negative and
604 we need to correct things. */
606 if (num[num_hi_sig] < carry)
609 carry = 0; /* add divisor back in */
610 for (j = 0; j <= den_hi_sig; j++)
612 work = num[i + j] + den[j] + carry;
613 carry = HIGHPART (work);
614 num[i + j] = LOWPART (work);
616 num [num_hi_sig] += carry;
619 /* store the quotient digit. */
624 decode (quo, lquo, hquo);
627 /* if result is negative, make it so. */
629 neg_double (*lquo, *hquo, lquo, hquo);
631 /* compute trial remainder: rem = num - (quo * den) */
632 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
633 neg_double (*lrem, *hrem, lrem, hrem);
634 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
639 case TRUNC_MOD_EXPR: /* round toward zero */
640 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
644 case FLOOR_MOD_EXPR: /* round toward negative infinity */
645 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
648 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
651 else return overflow;
655 case CEIL_MOD_EXPR: /* round toward positive infinity */
656 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
658 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
661 else return overflow;
665 case ROUND_MOD_EXPR: /* round to closest integer */
667 HOST_WIDE_INT labs_rem = *lrem, habs_rem = *hrem;
668 HOST_WIDE_INT labs_den = lden, habs_den = hden, ltwice, htwice;
670 /* get absolute values */
671 if (*hrem < 0) neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
672 if (hden < 0) neg_double (lden, hden, &labs_den, &habs_den);
674 /* if (2 * abs (lrem) >= abs (lden)) */
675 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
676 labs_rem, habs_rem, <wice, &htwice);
677 if (((unsigned HOST_WIDE_INT) habs_den
678 < (unsigned HOST_WIDE_INT) htwice)
679 || (((unsigned HOST_WIDE_INT) habs_den
680 == (unsigned HOST_WIDE_INT) htwice)
681 && ((HOST_WIDE_INT unsigned) labs_den
682 < (unsigned HOST_WIDE_INT) ltwice)))
686 add_double (*lquo, *hquo,
687 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
690 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
693 else return overflow;
701 /* compute true remainder: rem = num - (quo * den) */
702 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
703 neg_double (*lrem, *hrem, lrem, hrem);
704 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
708 #ifndef REAL_ARITHMETIC
709 /* Effectively truncate a real value to represent the nearest possible value
710 in a narrower mode. The result is actually represented in the same data
711 type as the argument, but its value is usually different.
713 A trap may occur during the FP operations and it is the responsibility
714 of the calling function to have a handler established. */
717 real_value_truncate (mode, arg)
718 enum machine_mode mode;
721 return REAL_VALUE_TRUNCATE (mode, arg);
724 #if TARGET_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
726 /* Check for infinity in an IEEE double precision number. */
732 /* The IEEE 64-bit double format. */
737 unsigned exponent : 11;
738 unsigned mantissa1 : 20;
743 unsigned mantissa1 : 20;
744 unsigned exponent : 11;
750 if (u.big_endian.sign == 1)
753 return (u.big_endian.exponent == 2047
754 && u.big_endian.mantissa1 == 0
755 && u.big_endian.mantissa2 == 0);
760 return (u.little_endian.exponent == 2047
761 && u.little_endian.mantissa1 == 0
762 && u.little_endian.mantissa2 == 0);
766 /* Check whether an IEEE double precision number is a NaN. */
772 /* The IEEE 64-bit double format. */
777 unsigned exponent : 11;
778 unsigned mantissa1 : 20;
783 unsigned mantissa1 : 20;
784 unsigned exponent : 11;
790 if (u.big_endian.sign == 1)
793 return (u.big_endian.exponent == 2047
794 && (u.big_endian.mantissa1 != 0
795 || u.big_endian.mantissa2 != 0));
800 return (u.little_endian.exponent == 2047
801 && (u.little_endian.mantissa1 != 0
802 || u.little_endian.mantissa2 != 0));
806 /* Check for a negative IEEE double precision number. */
812 /* The IEEE 64-bit double format. */
817 unsigned exponent : 11;
818 unsigned mantissa1 : 20;
823 unsigned mantissa1 : 20;
824 unsigned exponent : 11;
830 if (u.big_endian.sign == 1)
833 return u.big_endian.sign;
838 return u.little_endian.sign;
841 #else /* Target not IEEE */
843 /* Let's assume other float formats don't have infinity.
844 (This can be overridden by redefining REAL_VALUE_ISINF.) */
852 /* Let's assume other float formats don't have NaNs.
853 (This can be overridden by redefining REAL_VALUE_ISNAN.) */
861 /* Let's assume other float formats don't have minus zero.
862 (This can be overridden by redefining REAL_VALUE_NEGATIVE.) */
869 #endif /* Target not IEEE */
871 /* Try to change R into its exact multiplicative inverse in machine mode
872 MODE. Return nonzero function value if successful. */
875 exact_real_inverse (mode, r)
876 enum machine_mode mode;
886 /* Usually disable if bounds checks are not reliable. */
887 if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT) && !flag_pretend_float)
890 /* Set array index to the less significant bits in the unions, depending
891 on the endian-ness of the host doubles.
892 Disable if insufficient information on the data structure. */
893 #if HOST_FLOAT_FORMAT == UNKNOWN_FLOAT_FORMAT
896 #if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
899 #if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
902 #define K (2 * HOST_FLOAT_WORDS_BIG_ENDIAN)
907 if (setjmp (float_error))
909 /* Don't do the optimization if there was an arithmetic error. */
911 set_float_handler (NULL_PTR);
914 set_float_handler (float_error);
916 /* Domain check the argument. */
922 if (REAL_VALUE_ISINF (x.d) || REAL_VALUE_ISNAN (x.d))
926 /* Compute the reciprocal and check for numerical exactness.
927 It is unnecessary to check all the significand bits to determine
928 whether X is a power of 2. If X is not, then it is impossible for
929 the bottom half significand of both X and 1/X to be all zero bits.
930 Hence we ignore the data structure of the top half and examine only
931 the low order bits of the two significands. */
933 if (x.i[K] != 0 || x.i[K + 1] != 0 || t.i[K] != 0 || t.i[K + 1] != 0)
936 /* Truncate to the required mode and range-check the result. */
937 y.d = REAL_VALUE_TRUNCATE (mode, t.d);
938 #ifdef CHECK_FLOAT_VALUE
940 if (CHECK_FLOAT_VALUE (mode, y.d, i))
944 /* Fail if truncation changed the value. */
945 if (y.d != t.d || y.d == 0.0)
949 if (REAL_VALUE_ISINF (y.d) || REAL_VALUE_ISNAN (y.d))
953 /* Output the reciprocal and return success flag. */
954 set_float_handler (NULL_PTR);
958 #endif /* no REAL_ARITHMETIC */
960 /* Split a tree IN into a constant and a variable part
961 that could be combined with CODE to make IN.
962 CODE must be a commutative arithmetic operation.
963 Store the constant part into *CONP and the variable in &VARP.
964 Return 1 if this was done; zero means the tree IN did not decompose
967 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR.
968 Therefore, we must tell the caller whether the variable part
969 was subtracted. We do this by storing 1 or -1 into *VARSIGNP.
970 The value stored is the coefficient for the variable term.
971 The constant term we return should always be added;
972 we negate it if necessary. */
975 split_tree (in, code, varp, conp, varsignp)
981 register tree outtype = TREE_TYPE (in);
985 /* Strip any conversions that don't change the machine mode. */
986 while ((TREE_CODE (in) == NOP_EXPR
987 || TREE_CODE (in) == CONVERT_EXPR)
988 && (TYPE_MODE (TREE_TYPE (in))
989 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (in, 0)))))
990 in = TREE_OPERAND (in, 0);
992 if (TREE_CODE (in) == code
993 || (! FLOAT_TYPE_P (TREE_TYPE (in))
994 /* We can associate addition and subtraction together
995 (even though the C standard doesn't say so)
996 for integers because the value is not affected.
997 For reals, the value might be affected, so we can't. */
998 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
999 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1001 enum tree_code code = TREE_CODE (TREE_OPERAND (in, 0));
1002 if (code == INTEGER_CST)
1004 *conp = TREE_OPERAND (in, 0);
1005 *varp = TREE_OPERAND (in, 1);
1006 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1007 && TREE_TYPE (*varp) != outtype)
1008 *varp = convert (outtype, *varp);
1009 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1012 if (TREE_CONSTANT (TREE_OPERAND (in, 1)))
1014 *conp = TREE_OPERAND (in, 1);
1015 *varp = TREE_OPERAND (in, 0);
1017 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1018 && TREE_TYPE (*varp) != outtype)
1019 *varp = convert (outtype, *varp);
1020 if (TREE_CODE (in) == MINUS_EXPR)
1022 /* If operation is subtraction and constant is second,
1023 must negate it to get an additive constant.
1024 And this cannot be done unless it is a manifest constant.
1025 It could also be the address of a static variable.
1026 We cannot negate that, so give up. */
1027 if (TREE_CODE (*conp) == INTEGER_CST)
1028 /* Subtracting from integer_zero_node loses for long long. */
1029 *conp = fold (build1 (NEGATE_EXPR, TREE_TYPE (*conp), *conp));
1035 if (TREE_CONSTANT (TREE_OPERAND (in, 0)))
1037 *conp = TREE_OPERAND (in, 0);
1038 *varp = TREE_OPERAND (in, 1);
1039 if (TYPE_MODE (TREE_TYPE (*varp)) != TYPE_MODE (outtype)
1040 && TREE_TYPE (*varp) != outtype)
1041 *varp = convert (outtype, *varp);
1042 *varsignp = (TREE_CODE (in) == MINUS_EXPR) ? -1 : 1;
1049 /* Combine two constants ARG1 and ARG2 under operation CODE
1050 to produce a new constant.
1051 We assume ARG1 and ARG2 have the same data type,
1052 or at least are the same kind of constant and the same machine mode.
1054 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1057 const_binop (code, arg1, arg2, notrunc)
1058 enum tree_code code;
1059 register tree arg1, arg2;
1062 STRIP_NOPS (arg1); STRIP_NOPS (arg2);
1064 if (TREE_CODE (arg1) == INTEGER_CST)
1066 register HOST_WIDE_INT int1l = TREE_INT_CST_LOW (arg1);
1067 register HOST_WIDE_INT int1h = TREE_INT_CST_HIGH (arg1);
1068 HOST_WIDE_INT int2l = TREE_INT_CST_LOW (arg2);
1069 HOST_WIDE_INT int2h = TREE_INT_CST_HIGH (arg2);
1070 HOST_WIDE_INT low, hi;
1071 HOST_WIDE_INT garbagel, garbageh;
1073 int uns = TREE_UNSIGNED (TREE_TYPE (arg1));
1075 int no_overflow = 0;
1080 low = int1l | int2l, hi = int1h | int2h;
1084 low = int1l ^ int2l, hi = int1h ^ int2h;
1088 low = int1l & int2l, hi = int1h & int2h;
1091 case BIT_ANDTC_EXPR:
1092 low = int1l & ~int2l, hi = int1h & ~int2h;
1098 /* It's unclear from the C standard whether shifts can overflow.
1099 The following code ignores overflow; perhaps a C standard
1100 interpretation ruling is needed. */
1101 lshift_double (int1l, int1h, int2l,
1102 TYPE_PRECISION (TREE_TYPE (arg1)),
1111 lrotate_double (int1l, int1h, int2l,
1112 TYPE_PRECISION (TREE_TYPE (arg1)),
1117 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1121 neg_double (int2l, int2h, &low, &hi);
1122 add_double (int1l, int1h, low, hi, &low, &hi);
1123 overflow = overflow_sum_sign (hi, int2h, int1h);
1127 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1130 case TRUNC_DIV_EXPR:
1131 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1132 case EXACT_DIV_EXPR:
1133 /* This is a shortcut for a common special case. */
1134 if (int2h == 0 && int2l > 0
1135 && ! TREE_CONSTANT_OVERFLOW (arg1)
1136 && ! TREE_CONSTANT_OVERFLOW (arg2)
1137 && int1h == 0 && int1l >= 0)
1139 if (code == CEIL_DIV_EXPR)
1141 low = int1l / int2l, hi = 0;
1145 /* ... fall through ... */
1147 case ROUND_DIV_EXPR:
1148 if (int2h == 0 && int2l == 1)
1150 low = int1l, hi = int1h;
1153 if (int1l == int2l && int1h == int2h
1154 && ! (int1l == 0 && int1h == 0))
1159 overflow = div_and_round_double (code, uns,
1160 int1l, int1h, int2l, int2h,
1161 &low, &hi, &garbagel, &garbageh);
1164 case TRUNC_MOD_EXPR:
1165 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1166 /* This is a shortcut for a common special case. */
1167 if (int2h == 0 && int2l > 0
1168 && ! TREE_CONSTANT_OVERFLOW (arg1)
1169 && ! TREE_CONSTANT_OVERFLOW (arg2)
1170 && int1h == 0 && int1l >= 0)
1172 if (code == CEIL_MOD_EXPR)
1174 low = int1l % int2l, hi = 0;
1178 /* ... fall through ... */
1180 case ROUND_MOD_EXPR:
1181 overflow = div_and_round_double (code, uns,
1182 int1l, int1h, int2l, int2h,
1183 &garbagel, &garbageh, &low, &hi);
1190 low = (((unsigned HOST_WIDE_INT) int1h
1191 < (unsigned HOST_WIDE_INT) int2h)
1192 || (((unsigned HOST_WIDE_INT) int1h
1193 == (unsigned HOST_WIDE_INT) int2h)
1194 && ((unsigned HOST_WIDE_INT) int1l
1195 < (unsigned HOST_WIDE_INT) int2l)));
1199 low = ((int1h < int2h)
1200 || ((int1h == int2h)
1201 && ((unsigned HOST_WIDE_INT) int1l
1202 < (unsigned HOST_WIDE_INT) int2l)));
1204 if (low == (code == MIN_EXPR))
1205 low = int1l, hi = int1h;
1207 low = int2l, hi = int2h;
1214 if (TREE_TYPE (arg1) == sizetype && hi == 0
1215 && low >= 0 && low <= TREE_INT_CST_LOW (TYPE_MAX_VALUE (sizetype))
1217 && ! TREE_OVERFLOW (arg1) && ! TREE_OVERFLOW (arg2))
1221 t = build_int_2 (low, hi);
1222 TREE_TYPE (t) = TREE_TYPE (arg1);
1226 = ((notrunc ? !uns && overflow
1227 : force_fit_type (t, overflow && !uns) && ! no_overflow)
1228 | TREE_OVERFLOW (arg1)
1229 | TREE_OVERFLOW (arg2));
1230 TREE_CONSTANT_OVERFLOW (t) = (TREE_OVERFLOW (t)
1231 | TREE_CONSTANT_OVERFLOW (arg1)
1232 | TREE_CONSTANT_OVERFLOW (arg2));
1235 #if ! defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1236 if (TREE_CODE (arg1) == REAL_CST)
1241 REAL_VALUE_TYPE value;
1244 d1 = TREE_REAL_CST (arg1);
1245 d2 = TREE_REAL_CST (arg2);
1247 /* If either operand is a NaN, just return it. Otherwise, set up
1248 for floating-point trap; we return an overflow. */
1249 if (REAL_VALUE_ISNAN (d1))
1251 else if (REAL_VALUE_ISNAN (d2))
1253 else if (setjmp (float_error))
1255 t = copy_node (arg1);
1260 set_float_handler (float_error);
1262 #ifdef REAL_ARITHMETIC
1263 REAL_ARITHMETIC (value, code, d1, d2);
1280 #ifndef REAL_INFINITY
1289 value = MIN (d1, d2);
1293 value = MAX (d1, d2);
1299 #endif /* no REAL_ARITHMETIC */
1300 t = build_real (TREE_TYPE (arg1),
1301 real_value_truncate (TYPE_MODE (TREE_TYPE (arg1)), value));
1303 set_float_handler (NULL_PTR);
1306 = (force_fit_type (t, overflow)
1307 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1308 TREE_CONSTANT_OVERFLOW (t)
1310 | TREE_CONSTANT_OVERFLOW (arg1)
1311 | TREE_CONSTANT_OVERFLOW (arg2);
1314 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1315 if (TREE_CODE (arg1) == COMPLEX_CST)
1317 register tree type = TREE_TYPE (arg1);
1318 register tree r1 = TREE_REALPART (arg1);
1319 register tree i1 = TREE_IMAGPART (arg1);
1320 register tree r2 = TREE_REALPART (arg2);
1321 register tree i2 = TREE_IMAGPART (arg2);
1327 t = build_complex (type,
1328 const_binop (PLUS_EXPR, r1, r2, notrunc),
1329 const_binop (PLUS_EXPR, i1, i2, notrunc));
1333 t = build_complex (type,
1334 const_binop (MINUS_EXPR, r1, r2, notrunc),
1335 const_binop (MINUS_EXPR, i1, i2, notrunc));
1339 t = build_complex (type,
1340 const_binop (MINUS_EXPR,
1341 const_binop (MULT_EXPR,
1343 const_binop (MULT_EXPR,
1346 const_binop (PLUS_EXPR,
1347 const_binop (MULT_EXPR,
1349 const_binop (MULT_EXPR,
1356 register tree magsquared
1357 = const_binop (PLUS_EXPR,
1358 const_binop (MULT_EXPR, r2, r2, notrunc),
1359 const_binop (MULT_EXPR, i2, i2, notrunc),
1362 t = build_complex (type,
1364 (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1365 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1366 const_binop (PLUS_EXPR,
1367 const_binop (MULT_EXPR, r1, r2,
1369 const_binop (MULT_EXPR, i1, i2,
1372 magsquared, notrunc),
1374 (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1375 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1376 const_binop (MINUS_EXPR,
1377 const_binop (MULT_EXPR, i1, r2,
1379 const_binop (MULT_EXPR, r1, i2,
1382 magsquared, notrunc));
1394 /* Return an INTEGER_CST with value V and type from `sizetype'. */
1398 unsigned HOST_WIDE_INT number;
1401 /* Type-size nodes already made for small sizes. */
1402 static tree size_table[2*HOST_BITS_PER_WIDE_INT + 1];
1404 if (number < 2*HOST_BITS_PER_WIDE_INT + 1
1405 && size_table[number] != 0)
1406 return size_table[number];
1407 if (number < 2*HOST_BITS_PER_WIDE_INT + 1)
1409 push_obstacks_nochange ();
1410 /* Make this a permanent node. */
1411 end_temporary_allocation ();
1412 t = build_int_2 (number, 0);
1413 TREE_TYPE (t) = sizetype;
1414 size_table[number] = t;
1419 t = build_int_2 (number, 0);
1420 TREE_TYPE (t) = sizetype;
1421 TREE_OVERFLOW (t) = TREE_CONSTANT_OVERFLOW (t) = force_fit_type (t, 0);
1426 /* Combine operands OP1 and OP2 with arithmetic operation CODE.
1427 CODE is a tree code. Data type is taken from `sizetype',
1428 If the operands are constant, so is the result. */
1431 size_binop (code, arg0, arg1)
1432 enum tree_code code;
1435 /* Handle the special case of two integer constants faster. */
1436 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1438 /* And some specific cases even faster than that. */
1439 if (code == PLUS_EXPR && integer_zerop (arg0))
1441 else if ((code == MINUS_EXPR || code == PLUS_EXPR)
1442 && integer_zerop (arg1))
1444 else if (code == MULT_EXPR && integer_onep (arg0))
1447 /* Handle general case of two integer constants. */
1448 return const_binop (code, arg0, arg1, 0);
1451 if (arg0 == error_mark_node || arg1 == error_mark_node)
1452 return error_mark_node;
1454 return fold (build (code, sizetype, arg0, arg1));
1457 /* Given T, a tree representing type conversion of ARG1, a constant,
1458 return a constant tree representing the result of conversion. */
1461 fold_convert (t, arg1)
1465 register tree type = TREE_TYPE (t);
1468 if (TREE_CODE (type) == POINTER_TYPE || INTEGRAL_TYPE_P (type))
1470 if (TREE_CODE (arg1) == INTEGER_CST)
1472 /* If we would build a constant wider than GCC supports,
1473 leave the conversion unfolded. */
1474 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1477 /* Given an integer constant, make new constant with new type,
1478 appropriately sign-extended or truncated. */
1479 t = build_int_2 (TREE_INT_CST_LOW (arg1),
1480 TREE_INT_CST_HIGH (arg1));
1481 TREE_TYPE (t) = type;
1482 /* Indicate an overflow if (1) ARG1 already overflowed,
1483 or (2) force_fit_type indicates an overflow.
1484 Tell force_fit_type that an overflow has already occurred
1485 if ARG1 is a too-large unsigned value and T is signed. */
1487 = (TREE_OVERFLOW (arg1)
1488 | force_fit_type (t,
1489 (TREE_INT_CST_HIGH (arg1) < 0
1490 & (TREE_UNSIGNED (type)
1491 < TREE_UNSIGNED (TREE_TYPE (arg1))))));
1492 TREE_CONSTANT_OVERFLOW (t)
1493 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1495 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1496 else if (TREE_CODE (arg1) == REAL_CST)
1498 /* Don't initialize these, use assignments.
1499 Initialized local aggregates don't work on old compilers. */
1503 tree type1 = TREE_TYPE (arg1);
1505 x = TREE_REAL_CST (arg1);
1506 l = real_value_from_int_cst (type1, TYPE_MIN_VALUE (type));
1507 u = real_value_from_int_cst (type1, TYPE_MAX_VALUE (type));
1508 /* See if X will be in range after truncation towards 0.
1509 To compensate for truncation, move the bounds away from 0,
1510 but reject if X exactly equals the adjusted bounds. */
1511 #ifdef REAL_ARITHMETIC
1512 REAL_ARITHMETIC (l, MINUS_EXPR, l, dconst1);
1513 REAL_ARITHMETIC (u, PLUS_EXPR, u, dconst1);
1518 /* If X is a NaN, use zero instead and show we have an overflow.
1519 Otherwise, range check. */
1520 if (REAL_VALUE_ISNAN (x))
1521 overflow = 1, x = dconst0;
1522 else if (! (REAL_VALUES_LESS (l, x) && REAL_VALUES_LESS (x, u)))
1525 #ifndef REAL_ARITHMETIC
1527 HOST_WIDE_INT low, high;
1528 HOST_WIDE_INT half_word
1529 = (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2);
1534 high = (HOST_WIDE_INT) (x / half_word / half_word);
1535 x -= (REAL_VALUE_TYPE) high * half_word * half_word;
1536 if (x >= (REAL_VALUE_TYPE) half_word * half_word / 2)
1538 low = x - (REAL_VALUE_TYPE) half_word * half_word / 2;
1539 low |= (HOST_WIDE_INT) -1 << (HOST_BITS_PER_WIDE_INT - 1);
1542 low = (HOST_WIDE_INT) x;
1543 if (TREE_REAL_CST (arg1) < 0)
1544 neg_double (low, high, &low, &high);
1545 t = build_int_2 (low, high);
1549 HOST_WIDE_INT low, high;
1550 REAL_VALUE_TO_INT (&low, &high, x);
1551 t = build_int_2 (low, high);
1554 TREE_TYPE (t) = type;
1556 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1557 TREE_CONSTANT_OVERFLOW (t)
1558 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1560 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1561 TREE_TYPE (t) = type;
1563 else if (TREE_CODE (type) == REAL_TYPE)
1565 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
1566 if (TREE_CODE (arg1) == INTEGER_CST)
1567 return build_real_from_int_cst (type, arg1);
1568 #endif /* not REAL_IS_NOT_DOUBLE, or REAL_ARITHMETIC */
1569 if (TREE_CODE (arg1) == REAL_CST)
1571 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1574 TREE_TYPE (arg1) = type;
1577 else if (setjmp (float_error))
1580 t = copy_node (arg1);
1583 set_float_handler (float_error);
1585 t = build_real (type, real_value_truncate (TYPE_MODE (type),
1586 TREE_REAL_CST (arg1)));
1587 set_float_handler (NULL_PTR);
1591 = TREE_OVERFLOW (arg1) | force_fit_type (t, overflow);
1592 TREE_CONSTANT_OVERFLOW (t)
1593 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1597 TREE_CONSTANT (t) = 1;
1601 /* Return an expr equal to X but certainly not valid as an lvalue.
1602 Also make sure it is not valid as an null pointer constant. */
1610 /* These things are certainly not lvalues. */
1611 if (TREE_CODE (x) == NON_LVALUE_EXPR
1612 || TREE_CODE (x) == INTEGER_CST
1613 || TREE_CODE (x) == REAL_CST
1614 || TREE_CODE (x) == STRING_CST
1615 || TREE_CODE (x) == ADDR_EXPR)
1617 if (TREE_CODE (x) == INTEGER_CST && integer_zerop (x))
1619 /* Use NOP_EXPR instead of NON_LVALUE_EXPR
1620 so convert_for_assignment won't strip it.
1621 This is so this 0 won't be treated as a null pointer constant. */
1622 result = build1 (NOP_EXPR, TREE_TYPE (x), x);
1623 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1629 result = build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
1630 TREE_CONSTANT (result) = TREE_CONSTANT (x);
1634 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
1635 Zero means allow extended lvalues. */
1637 int pedantic_lvalues;
1639 /* When pedantic, return an expr equal to X but certainly not valid as a
1640 pedantic lvalue. Otherwise, return X. */
1643 pedantic_non_lvalue (x)
1646 if (pedantic_lvalues)
1647 return non_lvalue (x);
1652 /* Given a tree comparison code, return the code that is the logical inverse
1653 of the given code. It is not safe to do this for floating-point
1654 comparisons, except for NE_EXPR and EQ_EXPR. */
1656 static enum tree_code
1657 invert_tree_comparison (code)
1658 enum tree_code code;
1679 /* Similar, but return the comparison that results if the operands are
1680 swapped. This is safe for floating-point. */
1682 static enum tree_code
1683 swap_tree_comparison (code)
1684 enum tree_code code;
1704 /* Return nonzero if CODE is a tree code that represents a truth value. */
1707 truth_value_p (code)
1708 enum tree_code code;
1710 return (TREE_CODE_CLASS (code) == '<'
1711 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
1712 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
1713 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
1716 /* Return nonzero if two operands are necessarily equal.
1717 If ONLY_CONST is non-zero, only return non-zero for constants.
1718 This function tests whether the operands are indistinguishable;
1719 it does not test whether they are equal using C's == operation.
1720 The distinction is important for IEEE floating point, because
1721 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
1722 (2) two NaNs may be indistinguishable, but NaN!=NaN. */
1725 operand_equal_p (arg0, arg1, only_const)
1729 /* If both types don't have the same signedness, then we can't consider
1730 them equal. We must check this before the STRIP_NOPS calls
1731 because they may change the signedness of the arguments. */
1732 if (TREE_UNSIGNED (TREE_TYPE (arg0)) != TREE_UNSIGNED (TREE_TYPE (arg1)))
1738 if (TREE_CODE (arg0) != TREE_CODE (arg1)
1739 /* This is needed for conversions and for COMPONENT_REF.
1740 Might as well play it safe and always test this. */
1741 || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
1744 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
1745 We don't care about side effects in that case because the SAVE_EXPR
1746 takes care of that for us. In all other cases, two expressions are
1747 equal if they have no side effects. If we have two identical
1748 expressions with side effects that should be treated the same due
1749 to the only side effects being identical SAVE_EXPR's, that will
1750 be detected in the recursive calls below. */
1751 if (arg0 == arg1 && ! only_const
1752 && (TREE_CODE (arg0) == SAVE_EXPR
1753 || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
1756 /* Next handle constant cases, those for which we can return 1 even
1757 if ONLY_CONST is set. */
1758 if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
1759 switch (TREE_CODE (arg0))
1762 return (! TREE_CONSTANT_OVERFLOW (arg0)
1763 && ! TREE_CONSTANT_OVERFLOW (arg1)
1764 && TREE_INT_CST_LOW (arg0) == TREE_INT_CST_LOW (arg1)
1765 && TREE_INT_CST_HIGH (arg0) == TREE_INT_CST_HIGH (arg1));
1768 return (! TREE_CONSTANT_OVERFLOW (arg0)
1769 && ! TREE_CONSTANT_OVERFLOW (arg1)
1770 && REAL_VALUES_EQUAL (TREE_REAL_CST (arg0),
1771 TREE_REAL_CST (arg1)));
1774 return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
1776 && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
1780 return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
1781 && ! strncmp (TREE_STRING_POINTER (arg0),
1782 TREE_STRING_POINTER (arg1),
1783 TREE_STRING_LENGTH (arg0)));
1786 return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
1793 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
1796 /* Two conversions are equal only if signedness and modes match. */
1797 if ((TREE_CODE (arg0) == NOP_EXPR || TREE_CODE (arg0) == CONVERT_EXPR)
1798 && (TREE_UNSIGNED (TREE_TYPE (arg0))
1799 != TREE_UNSIGNED (TREE_TYPE (arg1))))
1802 return operand_equal_p (TREE_OPERAND (arg0, 0),
1803 TREE_OPERAND (arg1, 0), 0);
1807 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0)
1808 && operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1),
1812 /* For commutative ops, allow the other order. */
1813 return ((TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MULT_EXPR
1814 || TREE_CODE (arg0) == MIN_EXPR || TREE_CODE (arg0) == MAX_EXPR
1815 || TREE_CODE (arg0) == BIT_IOR_EXPR
1816 || TREE_CODE (arg0) == BIT_XOR_EXPR
1817 || TREE_CODE (arg0) == BIT_AND_EXPR
1818 || TREE_CODE (arg0) == NE_EXPR || TREE_CODE (arg0) == EQ_EXPR)
1819 && operand_equal_p (TREE_OPERAND (arg0, 0),
1820 TREE_OPERAND (arg1, 1), 0)
1821 && operand_equal_p (TREE_OPERAND (arg0, 1),
1822 TREE_OPERAND (arg1, 0), 0));
1825 switch (TREE_CODE (arg0))
1828 return operand_equal_p (TREE_OPERAND (arg0, 0),
1829 TREE_OPERAND (arg1, 0), 0);
1833 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1834 TREE_OPERAND (arg1, 0), 0)
1835 && operand_equal_p (TREE_OPERAND (arg0, 1),
1836 TREE_OPERAND (arg1, 1), 0));
1839 return (operand_equal_p (TREE_OPERAND (arg0, 0),
1840 TREE_OPERAND (arg1, 0), 0)
1841 && operand_equal_p (TREE_OPERAND (arg0, 1),
1842 TREE_OPERAND (arg1, 1), 0)
1843 && operand_equal_p (TREE_OPERAND (arg0, 2),
1844 TREE_OPERAND (arg1, 2), 0));
1852 /* Similar to operand_equal_p, but see if ARG0 might have been made by
1853 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
1855 When in doubt, return 0. */
1858 operand_equal_for_comparison_p (arg0, arg1, other)
1862 int unsignedp1, unsignedpo;
1863 tree primarg1, primother;
1864 unsigned correct_width;
1866 if (operand_equal_p (arg0, arg1, 0))
1869 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1870 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
1873 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
1874 actual comparison operand, ARG0.
1876 First throw away any conversions to wider types
1877 already present in the operands. */
1879 primarg1 = get_narrower (arg1, &unsignedp1);
1880 primother = get_narrower (other, &unsignedpo);
1882 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
1883 if (unsignedp1 == unsignedpo
1884 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
1885 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
1887 tree type = TREE_TYPE (arg0);
1889 /* Make sure shorter operand is extended the right way
1890 to match the longer operand. */
1891 primarg1 = convert (signed_or_unsigned_type (unsignedp1,
1892 TREE_TYPE (primarg1)),
1895 if (operand_equal_p (arg0, convert (type, primarg1), 0))
1902 /* See if ARG is an expression that is either a comparison or is performing
1903 arithmetic on comparisons. The comparisons must only be comparing
1904 two different values, which will be stored in *CVAL1 and *CVAL2; if
1905 they are non-zero it means that some operands have already been found.
1906 No variables may be used anywhere else in the expression except in the
1907 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
1908 the expression and save_expr needs to be called with CVAL1 and CVAL2.
1910 If this is true, return 1. Otherwise, return zero. */
1913 twoval_comparison_p (arg, cval1, cval2, save_p)
1915 tree *cval1, *cval2;
1918 enum tree_code code = TREE_CODE (arg);
1919 char class = TREE_CODE_CLASS (code);
1921 /* We can handle some of the 'e' cases here. */
1922 if (class == 'e' && code == TRUTH_NOT_EXPR)
1924 else if (class == 'e'
1925 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
1926 || code == COMPOUND_EXPR))
1929 /* ??? Disable this since the SAVE_EXPR might already be in use outside
1930 the expression. There may be no way to make this work, but it needs
1931 to be looked at again for 2.6. */
1933 else if (class == 'e' && code == SAVE_EXPR && SAVE_EXPR_RTL (arg) == 0)
1935 /* If we've already found a CVAL1 or CVAL2, this expression is
1936 two complex to handle. */
1937 if (*cval1 || *cval2)
1948 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
1951 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
1952 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1953 cval1, cval2, save_p));
1959 if (code == COND_EXPR)
1960 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
1961 cval1, cval2, save_p)
1962 && twoval_comparison_p (TREE_OPERAND (arg, 1),
1963 cval1, cval2, save_p)
1964 && twoval_comparison_p (TREE_OPERAND (arg, 2),
1965 cval1, cval2, save_p));
1969 /* First see if we can handle the first operand, then the second. For
1970 the second operand, we know *CVAL1 can't be zero. It must be that
1971 one side of the comparison is each of the values; test for the
1972 case where this isn't true by failing if the two operands
1975 if (operand_equal_p (TREE_OPERAND (arg, 0),
1976 TREE_OPERAND (arg, 1), 0))
1980 *cval1 = TREE_OPERAND (arg, 0);
1981 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
1983 else if (*cval2 == 0)
1984 *cval2 = TREE_OPERAND (arg, 0);
1985 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
1990 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
1992 else if (*cval2 == 0)
1993 *cval2 = TREE_OPERAND (arg, 1);
1994 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
2005 /* ARG is a tree that is known to contain just arithmetic operations and
2006 comparisons. Evaluate the operations in the tree substituting NEW0 for
2007 any occurrence of OLD0 as an operand of a comparison and likewise for
2011 eval_subst (arg, old0, new0, old1, new1)
2013 tree old0, new0, old1, new1;
2015 tree type = TREE_TYPE (arg);
2016 enum tree_code code = TREE_CODE (arg);
2017 char class = TREE_CODE_CLASS (code);
2019 /* We can handle some of the 'e' cases here. */
2020 if (class == 'e' && code == TRUTH_NOT_EXPR)
2022 else if (class == 'e'
2023 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2029 return fold (build1 (code, type,
2030 eval_subst (TREE_OPERAND (arg, 0),
2031 old0, new0, old1, new1)));
2034 return fold (build (code, type,
2035 eval_subst (TREE_OPERAND (arg, 0),
2036 old0, new0, old1, new1),
2037 eval_subst (TREE_OPERAND (arg, 1),
2038 old0, new0, old1, new1)));
2044 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
2047 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
2050 return fold (build (code, type,
2051 eval_subst (TREE_OPERAND (arg, 0),
2052 old0, new0, old1, new1),
2053 eval_subst (TREE_OPERAND (arg, 1),
2054 old0, new0, old1, new1),
2055 eval_subst (TREE_OPERAND (arg, 2),
2056 old0, new0, old1, new1)));
2061 tree arg0 = TREE_OPERAND (arg, 0);
2062 tree arg1 = TREE_OPERAND (arg, 1);
2064 /* We need to check both for exact equality and tree equality. The
2065 former will be true if the operand has a side-effect. In that
2066 case, we know the operand occurred exactly once. */
2068 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
2070 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
2073 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2075 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2078 return fold (build (code, type, arg0, arg1));
2085 /* Return a tree for the case when the result of an expression is RESULT
2086 converted to TYPE and OMITTED was previously an operand of the expression
2087 but is now not needed (e.g., we folded OMITTED * 0).
2089 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2090 the conversion of RESULT to TYPE. */
2093 omit_one_operand (type, result, omitted)
2094 tree type, result, omitted;
2096 tree t = convert (type, result);
2098 if (TREE_SIDE_EFFECTS (omitted))
2099 return build (COMPOUND_EXPR, type, omitted, t);
2101 return non_lvalue (t);
2104 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
2107 pedantic_omit_one_operand (type, result, omitted)
2108 tree type, result, omitted;
2110 tree t = convert (type, result);
2112 if (TREE_SIDE_EFFECTS (omitted))
2113 return build (COMPOUND_EXPR, type, omitted, t);
2115 return pedantic_non_lvalue (t);
2120 /* Return a simplified tree node for the truth-negation of ARG. This
2121 never alters ARG itself. We assume that ARG is an operation that
2122 returns a truth value (0 or 1). */
2125 invert_truthvalue (arg)
2128 tree type = TREE_TYPE (arg);
2129 enum tree_code code = TREE_CODE (arg);
2131 if (code == ERROR_MARK)
2134 /* If this is a comparison, we can simply invert it, except for
2135 floating-point non-equality comparisons, in which case we just
2136 enclose a TRUTH_NOT_EXPR around what we have. */
2138 if (TREE_CODE_CLASS (code) == '<')
2140 if (FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
2141 && code != NE_EXPR && code != EQ_EXPR)
2142 return build1 (TRUTH_NOT_EXPR, type, arg);
2144 return build (invert_tree_comparison (code), type,
2145 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2151 return convert (type, build_int_2 (TREE_INT_CST_LOW (arg) == 0
2152 && TREE_INT_CST_HIGH (arg) == 0, 0));
2154 case TRUTH_AND_EXPR:
2155 return build (TRUTH_OR_EXPR, type,
2156 invert_truthvalue (TREE_OPERAND (arg, 0)),
2157 invert_truthvalue (TREE_OPERAND (arg, 1)));
2160 return build (TRUTH_AND_EXPR, type,
2161 invert_truthvalue (TREE_OPERAND (arg, 0)),
2162 invert_truthvalue (TREE_OPERAND (arg, 1)));
2164 case TRUTH_XOR_EXPR:
2165 /* Here we can invert either operand. We invert the first operand
2166 unless the second operand is a TRUTH_NOT_EXPR in which case our
2167 result is the XOR of the first operand with the inside of the
2168 negation of the second operand. */
2170 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2171 return build (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2172 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2174 return build (TRUTH_XOR_EXPR, type,
2175 invert_truthvalue (TREE_OPERAND (arg, 0)),
2176 TREE_OPERAND (arg, 1));
2178 case TRUTH_ANDIF_EXPR:
2179 return build (TRUTH_ORIF_EXPR, type,
2180 invert_truthvalue (TREE_OPERAND (arg, 0)),
2181 invert_truthvalue (TREE_OPERAND (arg, 1)));
2183 case TRUTH_ORIF_EXPR:
2184 return build (TRUTH_ANDIF_EXPR, type,
2185 invert_truthvalue (TREE_OPERAND (arg, 0)),
2186 invert_truthvalue (TREE_OPERAND (arg, 1)));
2188 case TRUTH_NOT_EXPR:
2189 return TREE_OPERAND (arg, 0);
2192 return build (COND_EXPR, type, TREE_OPERAND (arg, 0),
2193 invert_truthvalue (TREE_OPERAND (arg, 1)),
2194 invert_truthvalue (TREE_OPERAND (arg, 2)));
2197 return build (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2198 invert_truthvalue (TREE_OPERAND (arg, 1)));
2200 case NON_LVALUE_EXPR:
2201 return invert_truthvalue (TREE_OPERAND (arg, 0));
2206 return build1 (TREE_CODE (arg), type,
2207 invert_truthvalue (TREE_OPERAND (arg, 0)));
2210 if (!integer_onep (TREE_OPERAND (arg, 1)))
2212 return build (EQ_EXPR, type, arg, convert (type, integer_zero_node));
2215 return build1 (TRUTH_NOT_EXPR, type, arg);
2217 case CLEANUP_POINT_EXPR:
2218 return build1 (CLEANUP_POINT_EXPR, type,
2219 invert_truthvalue (TREE_OPERAND (arg, 0)));
2221 if (TREE_CODE (TREE_TYPE (arg)) != BOOLEAN_TYPE)
2223 return build1 (TRUTH_NOT_EXPR, type, arg);
2226 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2227 operands are another bit-wise operation with a common input. If so,
2228 distribute the bit operations to save an operation and possibly two if
2229 constants are involved. For example, convert
2230 (A | B) & (A | C) into A | (B & C)
2231 Further simplification will occur if B and C are constants.
2233 If this optimization cannot be done, 0 will be returned. */
2236 distribute_bit_expr (code, type, arg0, arg1)
2237 enum tree_code code;
2244 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2245 || TREE_CODE (arg0) == code
2246 || (TREE_CODE (arg0) != BIT_AND_EXPR
2247 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2250 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
2252 common = TREE_OPERAND (arg0, 0);
2253 left = TREE_OPERAND (arg0, 1);
2254 right = TREE_OPERAND (arg1, 1);
2256 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
2258 common = TREE_OPERAND (arg0, 0);
2259 left = TREE_OPERAND (arg0, 1);
2260 right = TREE_OPERAND (arg1, 0);
2262 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
2264 common = TREE_OPERAND (arg0, 1);
2265 left = TREE_OPERAND (arg0, 0);
2266 right = TREE_OPERAND (arg1, 1);
2268 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
2270 common = TREE_OPERAND (arg0, 1);
2271 left = TREE_OPERAND (arg0, 0);
2272 right = TREE_OPERAND (arg1, 0);
2277 return fold (build (TREE_CODE (arg0), type, common,
2278 fold (build (code, type, left, right))));
2281 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
2282 starting at BITPOS. The field is unsigned if UNSIGNEDP is non-zero. */
2285 make_bit_field_ref (inner, type, bitsize, bitpos, unsignedp)
2288 int bitsize, bitpos;
2291 tree result = build (BIT_FIELD_REF, type, inner,
2292 size_int (bitsize), size_int (bitpos));
2294 TREE_UNSIGNED (result) = unsignedp;
2299 /* Optimize a bit-field compare.
2301 There are two cases: First is a compare against a constant and the
2302 second is a comparison of two items where the fields are at the same
2303 bit position relative to the start of a chunk (byte, halfword, word)
2304 large enough to contain it. In these cases we can avoid the shift
2305 implicit in bitfield extractions.
2307 For constants, we emit a compare of the shifted constant with the
2308 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
2309 compared. For two fields at the same position, we do the ANDs with the
2310 similar mask and compare the result of the ANDs.
2312 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
2313 COMPARE_TYPE is the type of the comparison, and LHS and RHS
2314 are the left and right operands of the comparison, respectively.
2316 If the optimization described above can be done, we return the resulting
2317 tree. Otherwise we return zero. */
2320 optimize_bit_field_compare (code, compare_type, lhs, rhs)
2321 enum tree_code code;
2325 int lbitpos, lbitsize, rbitpos, rbitsize;
2326 int lnbitpos, lnbitsize, rnbitpos, rnbitsize;
2327 tree type = TREE_TYPE (lhs);
2328 tree signed_type, unsigned_type;
2329 int const_p = TREE_CODE (rhs) == INTEGER_CST;
2330 enum machine_mode lmode, rmode, lnmode, rnmode;
2331 int lunsignedp, runsignedp;
2332 int lvolatilep = 0, rvolatilep = 0;
2334 tree linner, rinner;
2338 /* Get all the information about the extractions being done. If the bit size
2339 if the same as the size of the underlying object, we aren't doing an
2340 extraction at all and so can do nothing. */
2341 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
2342 &lunsignedp, &lvolatilep, &alignment);
2343 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
2349 /* If this is not a constant, we can only do something if bit positions,
2350 sizes, and signedness are the same. */
2351 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
2352 &runsignedp, &rvolatilep, &alignment);
2354 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
2355 || lunsignedp != runsignedp || offset != 0)
2359 /* See if we can find a mode to refer to this field. We should be able to,
2360 but fail if we can't. */
2361 lnmode = get_best_mode (lbitsize, lbitpos,
2362 TYPE_ALIGN (TREE_TYPE (linner)), word_mode,
2364 if (lnmode == VOIDmode)
2367 /* Set signed and unsigned types of the precision of this mode for the
2369 signed_type = type_for_mode (lnmode, 0);
2370 unsigned_type = type_for_mode (lnmode, 1);
2374 rnmode = get_best_mode (rbitsize, rbitpos,
2375 TYPE_ALIGN (TREE_TYPE (rinner)), word_mode,
2377 if (rnmode == VOIDmode)
2381 /* Compute the bit position and size for the new reference and our offset
2382 within it. If the new reference is the same size as the original, we
2383 won't optimize anything, so return zero. */
2384 lnbitsize = GET_MODE_BITSIZE (lnmode);
2385 lnbitpos = lbitpos & ~ (lnbitsize - 1);
2386 lbitpos -= lnbitpos;
2387 if (lnbitsize == lbitsize)
2392 rnbitsize = GET_MODE_BITSIZE (rnmode);
2393 rnbitpos = rbitpos & ~ (rnbitsize - 1);
2394 rbitpos -= rnbitpos;
2395 if (rnbitsize == rbitsize)
2399 if (BYTES_BIG_ENDIAN)
2400 lbitpos = lnbitsize - lbitsize - lbitpos;
2402 /* Make the mask to be used against the extracted field. */
2403 mask = build_int_2 (~0, ~0);
2404 TREE_TYPE (mask) = unsigned_type;
2405 force_fit_type (mask, 0);
2406 mask = convert (unsigned_type, mask);
2407 mask = const_binop (LSHIFT_EXPR, mask, size_int (lnbitsize - lbitsize), 0);
2408 mask = const_binop (RSHIFT_EXPR, mask,
2409 size_int (lnbitsize - lbitsize - lbitpos), 0);
2412 /* If not comparing with constant, just rework the comparison
2414 return build (code, compare_type,
2415 build (BIT_AND_EXPR, unsigned_type,
2416 make_bit_field_ref (linner, unsigned_type,
2417 lnbitsize, lnbitpos, 1),
2419 build (BIT_AND_EXPR, unsigned_type,
2420 make_bit_field_ref (rinner, unsigned_type,
2421 rnbitsize, rnbitpos, 1),
2424 /* Otherwise, we are handling the constant case. See if the constant is too
2425 big for the field. Warn and return a tree of for 0 (false) if so. We do
2426 this not only for its own sake, but to avoid having to test for this
2427 error case below. If we didn't, we might generate wrong code.
2429 For unsigned fields, the constant shifted right by the field length should
2430 be all zero. For signed fields, the high-order bits should agree with
2435 if (! integer_zerop (const_binop (RSHIFT_EXPR,
2436 convert (unsigned_type, rhs),
2437 size_int (lbitsize), 0)))
2439 warning ("comparison is always %s due to width of bitfield",
2440 code == NE_EXPR ? "one" : "zero");
2441 return convert (compare_type,
2443 ? integer_one_node : integer_zero_node));
2448 tree tem = const_binop (RSHIFT_EXPR, convert (signed_type, rhs),
2449 size_int (lbitsize - 1), 0);
2450 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
2452 warning ("comparison is always %s due to width of bitfield",
2453 code == NE_EXPR ? "one" : "zero");
2454 return convert (compare_type,
2456 ? integer_one_node : integer_zero_node));
2460 /* Single-bit compares should always be against zero. */
2461 if (lbitsize == 1 && ! integer_zerop (rhs))
2463 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
2464 rhs = convert (type, integer_zero_node);
2467 /* Make a new bitfield reference, shift the constant over the
2468 appropriate number of bits and mask it with the computed mask
2469 (in case this was a signed field). If we changed it, make a new one. */
2470 lhs = make_bit_field_ref (linner, unsigned_type, lnbitsize, lnbitpos, 1);
2473 TREE_SIDE_EFFECTS (lhs) = 1;
2474 TREE_THIS_VOLATILE (lhs) = 1;
2477 rhs = fold (const_binop (BIT_AND_EXPR,
2478 const_binop (LSHIFT_EXPR,
2479 convert (unsigned_type, rhs),
2480 size_int (lbitpos), 0),
2483 return build (code, compare_type,
2484 build (BIT_AND_EXPR, unsigned_type, lhs, mask),
2488 /* Subroutine for fold_truthop: decode a field reference.
2490 If EXP is a comparison reference, we return the innermost reference.
2492 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
2493 set to the starting bit number.
2495 If the innermost field can be completely contained in a mode-sized
2496 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
2498 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
2499 otherwise it is not changed.
2501 *PUNSIGNEDP is set to the signedness of the field.
2503 *PMASK is set to the mask used. This is either contained in a
2504 BIT_AND_EXPR or derived from the width of the field.
2506 *PAND_MASK is set the the mask found in a BIT_AND_EXPR, if any.
2508 Return 0 if this is not a component reference or is one that we can't
2509 do anything with. */
2512 decode_field_reference (exp, pbitsize, pbitpos, pmode, punsignedp,
2513 pvolatilep, pmask, pand_mask)
2515 int *pbitsize, *pbitpos;
2516 enum machine_mode *pmode;
2517 int *punsignedp, *pvolatilep;
2522 tree mask, inner, offset;
2527 /* All the optimizations using this function assume integer fields.
2528 There are problems with FP fields since the type_for_size call
2529 below can fail for, e.g., XFmode. */
2530 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
2535 if (TREE_CODE (exp) == BIT_AND_EXPR)
2537 and_mask = TREE_OPERAND (exp, 1);
2538 exp = TREE_OPERAND (exp, 0);
2539 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
2540 if (TREE_CODE (and_mask) != INTEGER_CST)
2545 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
2546 punsignedp, pvolatilep, &alignment);
2547 if ((inner == exp && and_mask == 0)
2548 || *pbitsize < 0 || offset != 0)
2551 /* Compute the mask to access the bitfield. */
2552 unsigned_type = type_for_size (*pbitsize, 1);
2553 precision = TYPE_PRECISION (unsigned_type);
2555 mask = build_int_2 (~0, ~0);
2556 TREE_TYPE (mask) = unsigned_type;
2557 force_fit_type (mask, 0);
2558 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2559 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
2561 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
2563 mask = fold (build (BIT_AND_EXPR, unsigned_type,
2564 convert (unsigned_type, and_mask), mask));
2567 *pand_mask = and_mask;
2571 /* Return non-zero if MASK represents a mask of SIZE ones in the low-order
2575 all_ones_mask_p (mask, size)
2579 tree type = TREE_TYPE (mask);
2580 int precision = TYPE_PRECISION (type);
2583 tmask = build_int_2 (~0, ~0);
2584 TREE_TYPE (tmask) = signed_type (type);
2585 force_fit_type (tmask, 0);
2587 tree_int_cst_equal (mask,
2588 const_binop (RSHIFT_EXPR,
2589 const_binop (LSHIFT_EXPR, tmask,
2590 size_int (precision - size),
2592 size_int (precision - size), 0));
2595 /* Subroutine for fold_truthop: determine if an operand is simple enough
2596 to be evaluated unconditionally. */
2599 simple_operand_p (exp)
2602 /* Strip any conversions that don't change the machine mode. */
2603 while ((TREE_CODE (exp) == NOP_EXPR
2604 || TREE_CODE (exp) == CONVERT_EXPR)
2605 && (TYPE_MODE (TREE_TYPE (exp))
2606 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
2607 exp = TREE_OPERAND (exp, 0);
2609 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
2610 || (TREE_CODE_CLASS (TREE_CODE (exp)) == 'd'
2611 && ! TREE_ADDRESSABLE (exp)
2612 && ! TREE_THIS_VOLATILE (exp)
2613 && ! DECL_NONLOCAL (exp)
2614 /* Don't regard global variables as simple. They may be
2615 allocated in ways unknown to the compiler (shared memory,
2616 #pragma weak, etc). */
2617 && ! TREE_PUBLIC (exp)
2618 && ! DECL_EXTERNAL (exp)
2619 /* Loading a static variable is unduly expensive, but global
2620 registers aren't expensive. */
2621 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
2624 /* The following functions are subroutines to fold_range_test and allow it to
2625 try to change a logical combination of comparisons into a range test.
2628 X == 2 && X == 3 && X == 4 && X == 5
2632 (unsigned) (X - 2) <= 3
2634 We decribe each set of comparisons as being either inside or outside
2635 a range, using a variable named like IN_P, and then describe the
2636 range with a lower and upper bound. If one of the bounds is omitted,
2637 it represents either the highest or lowest value of the type.
2639 In the comments below, we represent a range by two numbers in brackets
2640 preceeded by a "+" to designate being inside that range, or a "-" to
2641 designate being outside that range, so the condition can be inverted by
2642 flipping the prefix. An omitted bound is represented by a "-". For
2643 example, "- [-, 10]" means being outside the range starting at the lowest
2644 possible value and ending at 10, in other words, being greater than 10.
2645 The range "+ [-, -]" is always true and hence the range "- [-, -]" is
2648 We set up things so that the missing bounds are handled in a consistent
2649 manner so neither a missing bound nor "true" and "false" need to be
2650 handled using a special case. */
2652 /* Return the result of applying CODE to ARG0 and ARG1, but handle the case
2653 of ARG0 and/or ARG1 being omitted, meaning an unlimited range. UPPER0_P
2654 and UPPER1_P are nonzero if the respective argument is an upper bound
2655 and zero for a lower. TYPE, if nonzero, is the type of the result; it
2656 must be specified for a comparison. ARG1 will be converted to ARG0's
2657 type if both are specified. */
2660 range_binop (code, type, arg0, upper0_p, arg1, upper1_p)
2661 enum tree_code code;
2664 int upper0_p, upper1_p;
2670 /* If neither arg represents infinity, do the normal operation.
2671 Else, if not a comparison, return infinity. Else handle the special
2672 comparison rules. Note that most of the cases below won't occur, but
2673 are handled for consistency. */
2675 if (arg0 != 0 && arg1 != 0)
2677 tem = fold (build (code, type != 0 ? type : TREE_TYPE (arg0),
2678 arg0, convert (TREE_TYPE (arg0), arg1)));
2680 return TREE_CODE (tem) == INTEGER_CST ? tem : 0;
2683 if (TREE_CODE_CLASS (code) != '<')
2686 /* Set SGN[01] to -1 if ARG[01] is a lower bound, 1 for upper, and 0
2687 for neither. Then compute our result treating them as never equal
2688 and comparing bounds to non-bounds as above. */
2689 sgn0 = arg0 != 0 ? 0 : (upper0_p ? 1 : -1);
2690 sgn1 = arg1 != 0 ? 0 : (upper1_p ? 1 : -1);
2693 case EQ_EXPR: case NE_EXPR:
2694 result = (code == NE_EXPR);
2696 case LT_EXPR: case LE_EXPR:
2697 result = sgn0 < sgn1;
2699 case GT_EXPR: case GE_EXPR:
2700 result = sgn0 > sgn1;
2704 return convert (type, result ? integer_one_node : integer_zero_node);
2707 /* Given EXP, a logical expression, set the range it is testing into
2708 variables denoted by PIN_P, PLOW, and PHIGH. Return the expression
2709 actually being tested. *PLOW and *PHIGH will have be made the same type
2710 as the returned expression. If EXP is not a comparison, we will most
2711 likely not be returning a useful value and range. */
2714 make_range (exp, pin_p, plow, phigh)
2719 enum tree_code code;
2720 tree arg0, arg1, type;
2722 tree low, high, n_low, n_high;
2724 /* Start with simply saying "EXP != 0" and then look at the code of EXP
2725 and see if we can refine the range. Some of the cases below may not
2726 happen, but it doesn't seem worth worrying about this. We "continue"
2727 the outer loop when we've changed something; otherwise we "break"
2728 the switch, which will "break" the while. */
2730 in_p = 0, low = high = convert (TREE_TYPE (exp), integer_zero_node);
2734 code = TREE_CODE (exp);
2735 arg0 = TREE_OPERAND (exp, 0), arg1 = TREE_OPERAND (exp, 1);
2736 if (TREE_CODE_CLASS (code) == '<' || TREE_CODE_CLASS (code) == '1'
2737 || TREE_CODE_CLASS (code) == '2')
2738 type = TREE_TYPE (arg0);
2742 case TRUTH_NOT_EXPR:
2743 in_p = ! in_p, exp = arg0;
2746 case EQ_EXPR: case NE_EXPR:
2747 case LT_EXPR: case LE_EXPR: case GE_EXPR: case GT_EXPR:
2748 /* We can only do something if the range is testing for zero
2749 and if the second operand is an integer constant. Note that
2750 saying something is "in" the range we make is done by
2751 complementing IN_P since it will set in the initial case of
2752 being not equal to zero; "out" is leaving it alone. */
2753 if (low == 0 || high == 0
2754 || ! integer_zerop (low) || ! integer_zerop (high)
2755 || TREE_CODE (arg1) != INTEGER_CST)
2760 case NE_EXPR: /* - [c, c] */
2763 case EQ_EXPR: /* + [c, c] */
2764 in_p = ! in_p, low = high = arg1;
2766 case GT_EXPR: /* - [-, c] */
2767 low = 0, high = arg1;
2769 case GE_EXPR: /* + [c, -] */
2770 in_p = ! in_p, low = arg1, high = 0;
2772 case LT_EXPR: /* - [c, -] */
2773 low = arg1, high = 0;
2775 case LE_EXPR: /* + [-, c] */
2776 in_p = ! in_p, low = 0, high = arg1;
2782 /* If this is an unsigned comparison, we also know that EXP is
2783 greater than or equal to zero. We base the range tests we make
2784 on that fact, so we record it here so we can parse existing
2786 if (TREE_UNSIGNED (type) && (low == 0 || high == 0))
2788 if (! merge_ranges (&n_in_p, &n_low, &n_high, in_p, low, high,
2789 1, convert (type, integer_zero_node),
2793 in_p = n_in_p, low = n_low, high = n_high;
2795 /* If the high bound is missing, reverse the range so it
2796 goes from zero to the low bound minus 1. */
2800 high = range_binop (MINUS_EXPR, NULL_TREE, low, 0,
2801 integer_one_node, 0);
2802 low = convert (type, integer_zero_node);
2808 /* (-x) IN [a,b] -> x in [-b, -a] */
2809 n_low = range_binop (MINUS_EXPR, type,
2810 convert (type, integer_zero_node), 0, high, 1);
2811 n_high = range_binop (MINUS_EXPR, type,
2812 convert (type, integer_zero_node), 0, low, 0);
2813 low = n_low, high = n_high;
2819 exp = build (MINUS_EXPR, type, build1 (NEGATE_EXPR, type, arg0),
2820 convert (type, integer_one_node));
2823 case PLUS_EXPR: case MINUS_EXPR:
2824 if (TREE_CODE (arg1) != INTEGER_CST)
2827 /* If EXP is signed, any overflow in the computation is undefined,
2828 so we don't worry about it so long as our computations on
2829 the bounds don't overflow. For unsigned, overflow is defined
2830 and this is exactly the right thing. */
2831 n_low = range_binop (code == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR,
2832 type, low, 0, arg1, 0);
2833 n_high = range_binop (code == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR,
2834 type, high, 1, arg1, 0);
2835 if ((n_low != 0 && TREE_OVERFLOW (n_low))
2836 || (n_high != 0 && TREE_OVERFLOW (n_high)))
2839 /* Check for an unsigned range which has wrapped around the maximum
2840 value thus making n_high < n_low, and normalize it. */
2841 if (n_low && n_high && tree_int_cst_lt (n_high, n_low))
2843 low = range_binop (PLUS_EXPR, type, n_high, 0,
2844 integer_one_node, 0);
2845 high = range_binop (MINUS_EXPR, type, n_low, 0,
2846 integer_one_node, 0);
2850 low = n_low, high = n_high;
2855 case NOP_EXPR: case NON_LVALUE_EXPR: case CONVERT_EXPR:
2856 if (! INTEGRAL_TYPE_P (type)
2857 || (low != 0 && ! int_fits_type_p (low, type))
2858 || (high != 0 && ! int_fits_type_p (high, type)))
2862 low = convert (type, low);
2865 high = convert (type, high);
2874 /* If EXP is a constant, we can evaluate whether this is true or false. */
2875 if (TREE_CODE (exp) == INTEGER_CST)
2877 in_p = in_p == (integer_onep (range_binop (GE_EXPR, integer_type_node,
2879 && integer_onep (range_binop (LE_EXPR, integer_type_node,
2885 *pin_p = in_p, *plow = low, *phigh = high;
2889 /* Given a range, LOW, HIGH, and IN_P, an expression, EXP, and a result
2890 type, TYPE, return an expression to test if EXP is in (or out of, depending
2891 on IN_P) the range. */
2894 build_range_check (type, exp, in_p, low, high)
2900 tree etype = TREE_TYPE (exp);
2904 && (0 != (value = build_range_check (type, exp, 1, low, high))))
2905 return invert_truthvalue (value);
2907 else if (low == 0 && high == 0)
2908 return convert (type, integer_one_node);
2911 return fold (build (LE_EXPR, type, exp, high));
2914 return fold (build (GE_EXPR, type, exp, low));
2916 else if (operand_equal_p (low, high, 0))
2917 return fold (build (EQ_EXPR, type, exp, low));
2919 else if (TREE_UNSIGNED (etype) && integer_zerop (low))
2920 return build_range_check (type, exp, 1, 0, high);
2922 else if (integer_zerop (low))
2924 utype = unsigned_type (etype);
2925 return build_range_check (type, convert (utype, exp), 1, 0,
2926 convert (utype, high));
2929 else if (0 != (value = const_binop (MINUS_EXPR, high, low, 0))
2930 && ! TREE_OVERFLOW (value))
2931 return build_range_check (type,
2932 fold (build (MINUS_EXPR, etype, exp, low)),
2933 1, convert (etype, integer_zero_node), value);
2938 /* Given two ranges, see if we can merge them into one. Return 1 if we
2939 can, 0 if we can't. Set the output range into the specified parameters. */
2942 merge_ranges (pin_p, plow, phigh, in0_p, low0, high0, in1_p, low1, high1)
2946 tree low0, high0, low1, high1;
2955 /* Make range 0 be the range that starts first. Swap them if it isn't. */
2956 if (integer_onep (range_binop (GT_EXPR, integer_type_node,
2958 || (((low0 == 0 && low1 == 0)
2959 || integer_onep (range_binop (EQ_EXPR, integer_type_node,
2961 && integer_onep (range_binop (GT_EXPR, integer_type_node,
2962 high0, 1, high1, 1))))
2964 temp = in0_p, in0_p = in1_p, in1_p = temp;
2965 tem = low0, low0 = low1, low1 = tem;
2966 tem = high0, high0 = high1, high1 = tem;
2969 /* Now flag two cases, whether the ranges are disjoint or whether the
2970 second range is totally subsumed in the first. Note that the tests
2971 below are simplified by the ones above. */
2972 no_overlap = integer_onep (range_binop (LT_EXPR, integer_type_node,
2973 high0, 1, low1, 0));
2974 subset = integer_onep (range_binop (LE_EXPR, integer_type_node,
2975 high1, 1, high0, 1));
2977 /* We now have four cases, depending on whether we are including or
2978 excluding the two ranges. */
2981 /* If they don't overlap, the result is false. If the second range
2982 is a subset it is the result. Otherwise, the range is from the start
2983 of the second to the end of the first. */
2985 in_p = 0, low = high = 0;
2987 in_p = 1, low = low1, high = high1;
2989 in_p = 1, low = low1, high = high0;
2992 else if (in0_p && ! in1_p)
2994 /* If they don't overlap, the result is the first range. If the
2995 second range is a subset of the first, we can't describe this as
2996 a single range unless both ranges end at the same place. If both
2997 ranges start in the same place, then the result is false.
2998 Otherwise, we go from the start of the first range to just before
2999 the start of the second. */
3001 in_p = 1, low = low0, high = high0;
3003 && integer_zerop (range_binop (EQ_EXPR, integer_type_node,
3004 high0, 1, high1, 0)))
3006 else if (integer_onep (range_binop (EQ_EXPR, integer_type_node,
3008 in_p = 0, low = high = 0;
3011 in_p = 1, low = low0;
3012 high = range_binop (MINUS_EXPR, NULL_TREE, low1, 0,
3013 integer_one_node, 0);
3017 else if (! in0_p && in1_p)
3019 /* If they don't overlap, the result is the second range. If the second
3020 is a subset of the first, the result is false. Otherwise,
3021 the range starts just after the first range and ends at the
3022 end of the second. */
3024 in_p = 1, low = low1, high = high1;
3026 in_p = 0, low = high = 0;
3029 in_p = 1, high = high1;
3030 low = range_binop (PLUS_EXPR, NULL_TREE, high0, 1,
3031 integer_one_node, 0);
3037 /* The case where we are excluding both ranges. Here the complex case
3038 is if they don't overlap. In that case, the only time we have a
3039 range is if they are adjacent. If the second is a subset of the
3040 first, the result is the first. Otherwise, the range to exclude
3041 starts at the beginning of the first range and ends at the end of the
3045 if (integer_onep (range_binop (EQ_EXPR, integer_type_node,
3046 range_binop (PLUS_EXPR, NULL_TREE,
3048 integer_one_node, 1),
3050 in_p = 0, low = low0, high = high1;
3055 in_p = 0, low = low0, high = high0;
3057 in_p = 0, low = low0, high = high1;
3060 *pin_p = in_p, *plow = low, *phigh = high;
3064 /* EXP is some logical combination of boolean tests. See if we can
3065 merge it into some range test. Return the new tree if so. */
3068 fold_range_test (exp)
3071 int or_op = (TREE_CODE (exp) == TRUTH_ORIF_EXPR
3072 || TREE_CODE (exp) == TRUTH_OR_EXPR);
3073 int in0_p, in1_p, in_p;
3074 tree low0, low1, low, high0, high1, high;
3075 tree lhs = make_range (TREE_OPERAND (exp, 0), &in0_p, &low0, &high0);
3076 tree rhs = make_range (TREE_OPERAND (exp, 1), &in1_p, &low1, &high1);
3079 /* If this is an OR operation, invert both sides; we will invert
3080 again at the end. */
3082 in0_p = ! in0_p, in1_p = ! in1_p;
3084 /* If both expressions are the same, if we can merge the ranges, and we
3085 can build the range test, return it or it inverted. If one of the
3086 ranges is always true or always false, consider it to be the same
3087 expression as the other. */
3088 if ((lhs == 0 || rhs == 0 || operand_equal_p (lhs, rhs, 0))
3089 && merge_ranges (&in_p, &low, &high, in0_p, low0, high0,
3091 && 0 != (tem = (build_range_check (TREE_TYPE (exp),
3093 : rhs != 0 ? rhs : integer_zero_node,
3095 return or_op ? invert_truthvalue (tem) : tem;
3097 /* On machines where the branch cost is expensive, if this is a
3098 short-circuited branch and the underlying object on both sides
3099 is the same, make a non-short-circuit operation. */
3100 else if (BRANCH_COST >= 2
3101 && (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
3102 || TREE_CODE (exp) == TRUTH_ORIF_EXPR)
3103 && operand_equal_p (lhs, rhs, 0))
3105 /* If simple enough, just rewrite. Otherwise, make a SAVE_EXPR. */
3106 if (simple_operand_p (lhs))
3107 return build (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
3108 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
3109 TREE_TYPE (exp), TREE_OPERAND (exp, 0),
3110 TREE_OPERAND (exp, 1));
3113 tree common = save_expr (lhs);
3115 if (0 != (lhs = build_range_check (TREE_TYPE (exp), common,
3116 or_op ? ! in0_p : in0_p,
3118 && (0 != (rhs = build_range_check (TREE_TYPE (exp), common,
3119 or_op ? ! in1_p : in1_p,
3121 return build (TREE_CODE (exp) == TRUTH_ANDIF_EXPR
3122 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR,
3123 TREE_TYPE (exp), lhs, rhs);
3130 /* Subroutine for fold_truthop: C is an INTEGER_CST interpreted as a P
3131 bit value. Arrange things so the extra bits will be set to zero if and
3132 only if C is signed-extended to its full width. If MASK is nonzero,
3133 it is an INTEGER_CST that should be AND'ed with the extra bits. */
3136 unextend (c, p, unsignedp, mask)
3142 tree type = TREE_TYPE (c);
3143 int modesize = GET_MODE_BITSIZE (TYPE_MODE (type));
3146 if (p == modesize || unsignedp)
3149 /* We work by getting just the sign bit into the low-order bit, then
3150 into the high-order bit, then sign-extend. We then XOR that value
3152 temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1), 0);
3153 temp = const_binop (BIT_AND_EXPR, temp, size_int (1), 0);
3155 /* We must use a signed type in order to get an arithmetic right shift.
3156 However, we must also avoid introducing accidental overflows, so that
3157 a subsequent call to integer_zerop will work. Hence we must
3158 do the type conversion here. At this point, the constant is either
3159 zero or one, and the conversion to a signed type can never overflow.
3160 We could get an overflow if this conversion is done anywhere else. */
3161 if (TREE_UNSIGNED (type))
3162 temp = convert (signed_type (type), temp);
3164 temp = const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1), 0);
3165 temp = const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1), 0);
3167 temp = const_binop (BIT_AND_EXPR, temp, convert (TREE_TYPE (c), mask), 0);
3168 /* If necessary, convert the type back to match the type of C. */
3169 if (TREE_UNSIGNED (type))
3170 temp = convert (type, temp);
3172 return convert (type, const_binop (BIT_XOR_EXPR, c, temp, 0));
3175 /* Find ways of folding logical expressions of LHS and RHS:
3176 Try to merge two comparisons to the same innermost item.
3177 Look for range tests like "ch >= '0' && ch <= '9'".
3178 Look for combinations of simple terms on machines with expensive branches
3179 and evaluate the RHS unconditionally.
3181 For example, if we have p->a == 2 && p->b == 4 and we can make an
3182 object large enough to span both A and B, we can do this with a comparison
3183 against the object ANDed with the a mask.
3185 If we have p->a == q->a && p->b == q->b, we may be able to use bit masking
3186 operations to do this with one comparison.
3188 We check for both normal comparisons and the BIT_AND_EXPRs made this by
3189 function and the one above.
3191 CODE is the logical operation being done. It can be TRUTH_ANDIF_EXPR,
3192 TRUTH_AND_EXPR, TRUTH_ORIF_EXPR, or TRUTH_OR_EXPR.
3194 TRUTH_TYPE is the type of the logical operand and LHS and RHS are its
3197 We return the simplified tree or 0 if no optimization is possible. */
3200 fold_truthop (code, truth_type, lhs, rhs)
3201 enum tree_code code;
3202 tree truth_type, lhs, rhs;
3204 /* If this is the "or" of two comparisons, we can do something if we
3205 the comparisons are NE_EXPR. If this is the "and", we can do something
3206 if the comparisons are EQ_EXPR. I.e.,
3207 (a->b == 2 && a->c == 4) can become (a->new == NEW).
3209 WANTED_CODE is this operation code. For single bit fields, we can
3210 convert EQ_EXPR to NE_EXPR so we need not reject the "wrong"
3211 comparison for one-bit fields. */
3213 enum tree_code wanted_code;
3214 enum tree_code lcode, rcode;
3215 tree ll_arg, lr_arg, rl_arg, rr_arg;
3216 tree ll_inner, lr_inner, rl_inner, rr_inner;
3217 int ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos;
3218 int rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos;
3219 int xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos;
3220 int lnbitsize, lnbitpos, rnbitsize, rnbitpos;
3221 int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
3222 enum machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
3223 enum machine_mode lnmode, rnmode;
3224 tree ll_mask, lr_mask, rl_mask, rr_mask;
3225 tree ll_and_mask, lr_and_mask, rl_and_mask, rr_and_mask;
3226 tree l_const, r_const;
3228 int first_bit, end_bit;
3231 /* Start by getting the comparison codes. Fail if anything is volatile.
3232 If one operand is a BIT_AND_EXPR with the constant one, treat it as if
3233 it were surrounded with a NE_EXPR. */
3235 if (TREE_SIDE_EFFECTS (lhs) || TREE_SIDE_EFFECTS (rhs))
3238 lcode = TREE_CODE (lhs);
3239 rcode = TREE_CODE (rhs);
3241 if (lcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (lhs, 1)))
3242 lcode = NE_EXPR, lhs = build (NE_EXPR, truth_type, lhs, integer_zero_node);
3244 if (rcode == BIT_AND_EXPR && integer_onep (TREE_OPERAND (rhs, 1)))
3245 rcode = NE_EXPR, rhs = build (NE_EXPR, truth_type, rhs, integer_zero_node);
3247 if (TREE_CODE_CLASS (lcode) != '<' || TREE_CODE_CLASS (rcode) != '<')
3250 code = ((code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR)
3251 ? TRUTH_AND_EXPR : TRUTH_OR_EXPR);
3253 ll_arg = TREE_OPERAND (lhs, 0);
3254 lr_arg = TREE_OPERAND (lhs, 1);
3255 rl_arg = TREE_OPERAND (rhs, 0);
3256 rr_arg = TREE_OPERAND (rhs, 1);
3258 /* If the RHS can be evaluated unconditionally and its operands are
3259 simple, it wins to evaluate the RHS unconditionally on machines
3260 with expensive branches. In this case, this isn't a comparison
3261 that can be merged. */
3263 /* @@ I'm not sure it wins on the m88110 to do this if the comparisons
3264 are with zero (tmw). */
3266 if (BRANCH_COST >= 2
3267 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3268 && simple_operand_p (rl_arg)
3269 && simple_operand_p (rr_arg))
3270 return build (code, truth_type, lhs, rhs);
3272 /* See if the comparisons can be merged. Then get all the parameters for
3275 if ((lcode != EQ_EXPR && lcode != NE_EXPR)
3276 || (rcode != EQ_EXPR && rcode != NE_EXPR))
3280 ll_inner = decode_field_reference (ll_arg,
3281 &ll_bitsize, &ll_bitpos, &ll_mode,
3282 &ll_unsignedp, &volatilep, &ll_mask,
3284 lr_inner = decode_field_reference (lr_arg,
3285 &lr_bitsize, &lr_bitpos, &lr_mode,
3286 &lr_unsignedp, &volatilep, &lr_mask,
3288 rl_inner = decode_field_reference (rl_arg,
3289 &rl_bitsize, &rl_bitpos, &rl_mode,
3290 &rl_unsignedp, &volatilep, &rl_mask,
3292 rr_inner = decode_field_reference (rr_arg,
3293 &rr_bitsize, &rr_bitpos, &rr_mode,
3294 &rr_unsignedp, &volatilep, &rr_mask,
3297 /* It must be true that the inner operation on the lhs of each
3298 comparison must be the same if we are to be able to do anything.
3299 Then see if we have constants. If not, the same must be true for
3301 if (volatilep || ll_inner == 0 || rl_inner == 0
3302 || ! operand_equal_p (ll_inner, rl_inner, 0))
3305 if (TREE_CODE (lr_arg) == INTEGER_CST
3306 && TREE_CODE (rr_arg) == INTEGER_CST)
3307 l_const = lr_arg, r_const = rr_arg;
3308 else if (lr_inner == 0 || rr_inner == 0
3309 || ! operand_equal_p (lr_inner, rr_inner, 0))
3312 l_const = r_const = 0;
3314 /* If either comparison code is not correct for our logical operation,
3315 fail. However, we can convert a one-bit comparison against zero into
3316 the opposite comparison against that bit being set in the field. */
3318 wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR);
3319 if (lcode != wanted_code)
3321 if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask))
3327 if (rcode != wanted_code)
3329 if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask))
3335 /* See if we can find a mode that contains both fields being compared on
3336 the left. If we can't, fail. Otherwise, update all constants and masks
3337 to be relative to a field of that size. */
3338 first_bit = MIN (ll_bitpos, rl_bitpos);
3339 end_bit = MAX (ll_bitpos + ll_bitsize, rl_bitpos + rl_bitsize);
3340 lnmode = get_best_mode (end_bit - first_bit, first_bit,
3341 TYPE_ALIGN (TREE_TYPE (ll_inner)), word_mode,
3343 if (lnmode == VOIDmode)
3346 lnbitsize = GET_MODE_BITSIZE (lnmode);
3347 lnbitpos = first_bit & ~ (lnbitsize - 1);
3348 type = type_for_size (lnbitsize, 1);
3349 xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
3351 if (BYTES_BIG_ENDIAN)
3353 xll_bitpos = lnbitsize - xll_bitpos - ll_bitsize;
3354 xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize;
3357 ll_mask = const_binop (LSHIFT_EXPR, convert (type, ll_mask),
3358 size_int (xll_bitpos), 0);
3359 rl_mask = const_binop (LSHIFT_EXPR, convert (type, rl_mask),
3360 size_int (xrl_bitpos), 0);
3364 l_const = convert (type, l_const);
3365 l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask);
3366 l_const = const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos), 0);
3367 if (! integer_zerop (const_binop (BIT_AND_EXPR, l_const,
3368 fold (build1 (BIT_NOT_EXPR,
3372 warning ("comparison is always %s",
3373 wanted_code == NE_EXPR ? "one" : "zero");
3375 return convert (truth_type,
3376 wanted_code == NE_EXPR
3377 ? integer_one_node : integer_zero_node);
3382 r_const = convert (type, r_const);
3383 r_const = unextend (r_const, rl_bitsize, rl_unsignedp, rl_and_mask);
3384 r_const = const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos), 0);
3385 if (! integer_zerop (const_binop (BIT_AND_EXPR, r_const,
3386 fold (build1 (BIT_NOT_EXPR,
3390 warning ("comparison is always %s",
3391 wanted_code == NE_EXPR ? "one" : "zero");
3393 return convert (truth_type,
3394 wanted_code == NE_EXPR
3395 ? integer_one_node : integer_zero_node);
3399 /* If the right sides are not constant, do the same for it. Also,
3400 disallow this optimization if a size or signedness mismatch occurs
3401 between the left and right sides. */
3404 if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize
3405 || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp
3406 /* Make sure the two fields on the right