1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
30 /* The entry points in this file are fold, size_int_wide, size_binop
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'.
42 force_fit_type takes a constant, an overflowable flag and prior
43 overflow indicators. It forces the value to fit the type and sets
44 TREE_OVERFLOW and TREE_CONSTANT_OVERFLOW as appropriate. */
48 #include "coretypes.h"
59 #include "langhooks.h"
62 /* The following constants represent a bit based encoding of GCC's
63 comparison operators. This encoding simplifies transformations
64 on relational comparison operators, such as AND and OR. */
65 enum comparison_code {
84 static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
85 static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
86 static bool negate_mathfn_p (enum built_in_function);
87 static bool negate_expr_p (tree);
88 static tree negate_expr (tree);
89 static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
90 static tree associate_trees (tree, tree, enum tree_code, tree);
91 static tree const_binop (enum tree_code, tree, tree, int);
92 static tree build_zero_vector (tree);
93 static tree fold_convert_const (enum tree_code, tree, tree);
94 static enum tree_code invert_tree_comparison (enum tree_code, bool);
95 static enum comparison_code comparison_to_compcode (enum tree_code);
96 static enum tree_code compcode_to_comparison (enum comparison_code);
97 static tree combine_comparisons (enum tree_code, enum tree_code,
98 enum tree_code, tree, tree, tree);
99 static int truth_value_p (enum tree_code);
100 static int operand_equal_for_comparison_p (tree, tree, tree);
101 static int twoval_comparison_p (tree, tree *, tree *, int *);
102 static tree eval_subst (tree, tree, tree, tree, tree);
103 static tree pedantic_omit_one_operand (tree, tree, tree);
104 static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
105 static tree make_bit_field_ref (tree, tree, int, int, int);
106 static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree);
107 static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
108 enum machine_mode *, int *, int *,
110 static int all_ones_mask_p (tree, int);
111 static tree sign_bit_p (tree, tree);
112 static int simple_operand_p (tree);
113 static tree range_binop (enum tree_code, tree, tree, int, tree, int);
114 static tree make_range (tree, int *, tree *, tree *);
115 static tree build_range_check (tree, tree, int, tree, tree);
116 static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree,
118 static tree fold_range_test (tree);
119 static tree fold_cond_expr_with_comparison (tree, tree, tree, tree);
120 static tree unextend (tree, int, int, tree);
121 static tree fold_truthop (enum tree_code, tree, tree, tree);
122 static tree optimize_minmax_comparison (tree);
123 static tree extract_muldiv (tree, tree, enum tree_code, tree);
124 static tree extract_muldiv_1 (tree, tree, enum tree_code, tree);
125 static int multiple_of_p (tree, tree, tree);
126 static tree fold_binary_op_with_conditional_arg (enum tree_code, tree, tree,
128 static bool fold_real_zero_addition_p (tree, tree, int);
129 static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
131 static tree fold_inf_compare (enum tree_code, tree, tree, tree);
132 static tree fold_div_compare (enum tree_code, tree, tree, tree);
133 static bool reorder_operands_p (tree, tree);
134 static tree fold_negate_const (tree, tree);
135 static tree fold_not_const (tree, tree);
136 static tree fold_relational_const (enum tree_code, tree, tree, tree);
137 static tree fold_relational_hi_lo (enum tree_code *, const tree,
139 static bool tree_expr_nonzero_p (tree);
141 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
142 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
143 and SUM1. Then this yields nonzero if overflow occurred during the
146 Overflow occurs if A and B have the same sign, but A and SUM differ in
147 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
149 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
151 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
152 We do that by representing the two-word integer in 4 words, with only
153 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
154 number. The value of the word is LOWPART + HIGHPART * BASE. */
157 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
158 #define HIGHPART(x) \
159 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
160 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
162 /* Unpack a two-word integer into 4 words.
163 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
164 WORDS points to the array of HOST_WIDE_INTs. */
167 encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
169 words[0] = LOWPART (low);
170 words[1] = HIGHPART (low);
171 words[2] = LOWPART (hi);
172 words[3] = HIGHPART (hi);
175 /* Pack an array of 4 words into a two-word integer.
176 WORDS points to the array of words.
177 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
180 decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
183 *low = words[0] + words[1] * BASE;
184 *hi = words[2] + words[3] * BASE;
187 /* T is an INT_CST node. OVERFLOWABLE indicates if we are interested
188 in overflow of the value, when >0 we are only interested in signed
189 overflow, for <0 we are interested in any overflow. OVERFLOWED
190 indicates whether overflow has already occurred. CONST_OVERFLOWED
191 indicates whether constant overflow has already occurred. We force
192 T's value to be within range of T's type (by setting to 0 or 1 all
193 the bits outside the type's range). We set TREE_OVERFLOWED if,
194 OVERFLOWED is non-zero,
195 or OVERFLOWABLE is >0 and signed overflow occurs
196 or OVERFLOWABLE is <0 and any overflow occurs
197 We set TREE_CONSTANT_OVERFLOWED if,
198 CONST_OVERFLOWED is non-zero
199 or we set TREE_OVERFLOWED.
200 We return either the original T, or a copy. */
203 force_fit_type (tree t, int overflowable,
204 bool overflowed, bool overflowed_const)
206 unsigned HOST_WIDE_INT low;
209 int sign_extended_type;
211 gcc_assert (TREE_CODE (t) == INTEGER_CST);
213 low = TREE_INT_CST_LOW (t);
214 high = TREE_INT_CST_HIGH (t);
216 if (POINTER_TYPE_P (TREE_TYPE (t))
217 || TREE_CODE (TREE_TYPE (t)) == OFFSET_TYPE)
220 prec = TYPE_PRECISION (TREE_TYPE (t));
221 /* Size types *are* sign extended. */
222 sign_extended_type = (!TYPE_UNSIGNED (TREE_TYPE (t))
223 || (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
224 && TYPE_IS_SIZETYPE (TREE_TYPE (t))));
226 /* First clear all bits that are beyond the type's precision. */
228 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
230 else if (prec > HOST_BITS_PER_WIDE_INT)
231 high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
235 if (prec < HOST_BITS_PER_WIDE_INT)
236 low &= ~((HOST_WIDE_INT) (-1) << prec);
239 if (!sign_extended_type)
240 /* No sign extension */;
241 else if (prec == 2 * HOST_BITS_PER_WIDE_INT)
242 /* Correct width already. */;
243 else if (prec > HOST_BITS_PER_WIDE_INT)
245 /* Sign extend top half? */
246 if (high & ((unsigned HOST_WIDE_INT)1
247 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
248 high |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
250 else if (prec == HOST_BITS_PER_WIDE_INT)
252 if ((HOST_WIDE_INT)low < 0)
257 /* Sign extend bottom half? */
258 if (low & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
261 low |= (HOST_WIDE_INT)(-1) << prec;
265 /* If the value changed, return a new node. */
266 if (overflowed || overflowed_const
267 || low != TREE_INT_CST_LOW (t) || high != TREE_INT_CST_HIGH (t))
269 t = build_int_cst_wide (TREE_TYPE (t), low, high);
273 || (overflowable > 0 && sign_extended_type))
276 TREE_OVERFLOW (t) = 1;
277 TREE_CONSTANT_OVERFLOW (t) = 1;
279 else if (overflowed_const)
282 TREE_CONSTANT_OVERFLOW (t) = 1;
289 /* Add two doubleword integers with doubleword result.
290 Each argument is given as two `HOST_WIDE_INT' pieces.
291 One argument is L1 and H1; the other, L2 and H2.
292 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
295 add_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
296 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
297 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
299 unsigned HOST_WIDE_INT l;
303 h = h1 + h2 + (l < l1);
307 return OVERFLOW_SUM_SIGN (h1, h2, h);
310 /* Negate a doubleword integer with doubleword result.
311 Return nonzero if the operation overflows, assuming it's signed.
312 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
313 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
316 neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
317 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
323 return (*hv & h1) < 0;
333 /* Multiply two doubleword integers with doubleword result.
334 Return nonzero if the operation overflows, assuming it's signed.
335 Each argument is given as two `HOST_WIDE_INT' pieces.
336 One argument is L1 and H1; the other, L2 and H2.
337 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
340 mul_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
341 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
342 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
344 HOST_WIDE_INT arg1[4];
345 HOST_WIDE_INT arg2[4];
346 HOST_WIDE_INT prod[4 * 2];
347 unsigned HOST_WIDE_INT carry;
349 unsigned HOST_WIDE_INT toplow, neglow;
350 HOST_WIDE_INT tophigh, neghigh;
352 encode (arg1, l1, h1);
353 encode (arg2, l2, h2);
355 memset (prod, 0, sizeof prod);
357 for (i = 0; i < 4; i++)
360 for (j = 0; j < 4; j++)
363 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
364 carry += arg1[i] * arg2[j];
365 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
367 prod[k] = LOWPART (carry);
368 carry = HIGHPART (carry);
373 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
375 /* Check for overflow by calculating the top half of the answer in full;
376 it should agree with the low half's sign bit. */
377 decode (prod + 4, &toplow, &tophigh);
380 neg_double (l2, h2, &neglow, &neghigh);
381 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
385 neg_double (l1, h1, &neglow, &neghigh);
386 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
388 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
391 /* Shift the doubleword integer in L1, H1 left by COUNT places
392 keeping only PREC bits of result.
393 Shift right if COUNT is negative.
394 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
395 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
398 lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
399 HOST_WIDE_INT count, unsigned int prec,
400 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
402 unsigned HOST_WIDE_INT signmask;
406 rshift_double (l1, h1, -count, prec, lv, hv, arith);
410 if (SHIFT_COUNT_TRUNCATED)
413 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
415 /* Shifting by the host word size is undefined according to the
416 ANSI standard, so we must handle this as a special case. */
420 else if (count >= HOST_BITS_PER_WIDE_INT)
422 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
427 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
428 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
432 /* Sign extend all bits that are beyond the precision. */
434 signmask = -((prec > HOST_BITS_PER_WIDE_INT
435 ? ((unsigned HOST_WIDE_INT) *hv
436 >> (prec - HOST_BITS_PER_WIDE_INT - 1))
437 : (*lv >> (prec - 1))) & 1);
439 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
441 else if (prec >= HOST_BITS_PER_WIDE_INT)
443 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
444 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
449 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
450 *lv |= signmask << prec;
454 /* Shift the doubleword integer in L1, H1 right by COUNT places
455 keeping only PREC bits of result. COUNT must be positive.
456 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
457 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
460 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
461 HOST_WIDE_INT count, unsigned int prec,
462 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
465 unsigned HOST_WIDE_INT signmask;
468 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
471 if (SHIFT_COUNT_TRUNCATED)
474 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
476 /* Shifting by the host word size is undefined according to the
477 ANSI standard, so we must handle this as a special case. */
481 else if (count >= HOST_BITS_PER_WIDE_INT)
484 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
488 *hv = (unsigned HOST_WIDE_INT) h1 >> count;
490 | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
493 /* Zero / sign extend all bits that are beyond the precision. */
495 if (count >= (HOST_WIDE_INT)prec)
500 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
502 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
504 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
505 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
510 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
511 *lv |= signmask << (prec - count);
515 /* Rotate the doubleword integer in L1, H1 left by COUNT places
516 keeping only PREC bits of result.
517 Rotate right if COUNT is negative.
518 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
521 lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
522 HOST_WIDE_INT count, unsigned int prec,
523 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
525 unsigned HOST_WIDE_INT s1l, s2l;
526 HOST_WIDE_INT s1h, s2h;
532 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
533 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
538 /* Rotate the doubleword integer in L1, H1 left by COUNT places
539 keeping only PREC bits of result. COUNT must be positive.
540 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
543 rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
544 HOST_WIDE_INT count, unsigned int prec,
545 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
547 unsigned HOST_WIDE_INT s1l, s2l;
548 HOST_WIDE_INT s1h, s2h;
554 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
555 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
560 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
561 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
562 CODE is a tree code for a kind of division, one of
563 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
565 It controls how the quotient is rounded to an integer.
566 Return nonzero if the operation overflows.
567 UNS nonzero says do unsigned division. */
570 div_and_round_double (enum tree_code code, int uns,
571 unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
572 HOST_WIDE_INT hnum_orig,
573 unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
574 HOST_WIDE_INT hden_orig,
575 unsigned HOST_WIDE_INT *lquo,
576 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
580 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
581 HOST_WIDE_INT den[4], quo[4];
583 unsigned HOST_WIDE_INT work;
584 unsigned HOST_WIDE_INT carry = 0;
585 unsigned HOST_WIDE_INT lnum = lnum_orig;
586 HOST_WIDE_INT hnum = hnum_orig;
587 unsigned HOST_WIDE_INT lden = lden_orig;
588 HOST_WIDE_INT hden = hden_orig;
591 if (hden == 0 && lden == 0)
592 overflow = 1, lden = 1;
594 /* Calculate quotient sign and convert operands to unsigned. */
600 /* (minimum integer) / (-1) is the only overflow case. */
601 if (neg_double (lnum, hnum, &lnum, &hnum)
602 && ((HOST_WIDE_INT) lden & hden) == -1)
608 neg_double (lden, hden, &lden, &hden);
612 if (hnum == 0 && hden == 0)
613 { /* single precision */
615 /* This unsigned division rounds toward zero. */
621 { /* trivial case: dividend < divisor */
622 /* hden != 0 already checked. */
629 memset (quo, 0, sizeof quo);
631 memset (num, 0, sizeof num); /* to zero 9th element */
632 memset (den, 0, sizeof den);
634 encode (num, lnum, hnum);
635 encode (den, lden, hden);
637 /* Special code for when the divisor < BASE. */
638 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
640 /* hnum != 0 already checked. */
641 for (i = 4 - 1; i >= 0; i--)
643 work = num[i] + carry * BASE;
644 quo[i] = work / lden;
650 /* Full double precision division,
651 with thanks to Don Knuth's "Seminumerical Algorithms". */
652 int num_hi_sig, den_hi_sig;
653 unsigned HOST_WIDE_INT quo_est, scale;
655 /* Find the highest nonzero divisor digit. */
656 for (i = 4 - 1;; i--)
663 /* Insure that the first digit of the divisor is at least BASE/2.
664 This is required by the quotient digit estimation algorithm. */
666 scale = BASE / (den[den_hi_sig] + 1);
668 { /* scale divisor and dividend */
670 for (i = 0; i <= 4 - 1; i++)
672 work = (num[i] * scale) + carry;
673 num[i] = LOWPART (work);
674 carry = HIGHPART (work);
679 for (i = 0; i <= 4 - 1; i++)
681 work = (den[i] * scale) + carry;
682 den[i] = LOWPART (work);
683 carry = HIGHPART (work);
684 if (den[i] != 0) den_hi_sig = i;
691 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
693 /* Guess the next quotient digit, quo_est, by dividing the first
694 two remaining dividend digits by the high order quotient digit.
695 quo_est is never low and is at most 2 high. */
696 unsigned HOST_WIDE_INT tmp;
698 num_hi_sig = i + den_hi_sig + 1;
699 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
700 if (num[num_hi_sig] != den[den_hi_sig])
701 quo_est = work / den[den_hi_sig];
705 /* Refine quo_est so it's usually correct, and at most one high. */
706 tmp = work - quo_est * den[den_hi_sig];
708 && (den[den_hi_sig - 1] * quo_est
709 > (tmp * BASE + num[num_hi_sig - 2])))
712 /* Try QUO_EST as the quotient digit, by multiplying the
713 divisor by QUO_EST and subtracting from the remaining dividend.
714 Keep in mind that QUO_EST is the I - 1st digit. */
717 for (j = 0; j <= den_hi_sig; j++)
719 work = quo_est * den[j] + carry;
720 carry = HIGHPART (work);
721 work = num[i + j] - LOWPART (work);
722 num[i + j] = LOWPART (work);
723 carry += HIGHPART (work) != 0;
726 /* If quo_est was high by one, then num[i] went negative and
727 we need to correct things. */
728 if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
731 carry = 0; /* add divisor back in */
732 for (j = 0; j <= den_hi_sig; j++)
734 work = num[i + j] + den[j] + carry;
735 carry = HIGHPART (work);
736 num[i + j] = LOWPART (work);
739 num [num_hi_sig] += carry;
742 /* Store the quotient digit. */
747 decode (quo, lquo, hquo);
750 /* If result is negative, make it so. */
752 neg_double (*lquo, *hquo, lquo, hquo);
754 /* Compute trial remainder: rem = num - (quo * den) */
755 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
756 neg_double (*lrem, *hrem, lrem, hrem);
757 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
762 case TRUNC_MOD_EXPR: /* round toward zero */
763 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
767 case FLOOR_MOD_EXPR: /* round toward negative infinity */
768 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
771 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
779 case CEIL_MOD_EXPR: /* round toward positive infinity */
780 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
782 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
790 case ROUND_MOD_EXPR: /* round to closest integer */
792 unsigned HOST_WIDE_INT labs_rem = *lrem;
793 HOST_WIDE_INT habs_rem = *hrem;
794 unsigned HOST_WIDE_INT labs_den = lden, ltwice;
795 HOST_WIDE_INT habs_den = hden, htwice;
797 /* Get absolute values. */
799 neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
801 neg_double (lden, hden, &labs_den, &habs_den);
803 /* If (2 * abs (lrem) >= abs (lden)) */
804 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
805 labs_rem, habs_rem, <wice, &htwice);
807 if (((unsigned HOST_WIDE_INT) habs_den
808 < (unsigned HOST_WIDE_INT) htwice)
809 || (((unsigned HOST_WIDE_INT) habs_den
810 == (unsigned HOST_WIDE_INT) htwice)
811 && (labs_den < ltwice)))
815 add_double (*lquo, *hquo,
816 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
819 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
831 /* Compute true remainder: rem = num - (quo * den) */
832 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
833 neg_double (*lrem, *hrem, lrem, hrem);
834 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
838 /* Return true if built-in mathematical function specified by CODE
839 preserves the sign of it argument, i.e. -f(x) == f(-x). */
842 negate_mathfn_p (enum built_in_function code)
866 /* Check whether we may negate an integer constant T without causing
870 may_negate_without_overflow_p (tree t)
872 unsigned HOST_WIDE_INT val;
876 gcc_assert (TREE_CODE (t) == INTEGER_CST);
878 type = TREE_TYPE (t);
879 if (TYPE_UNSIGNED (type))
882 prec = TYPE_PRECISION (type);
883 if (prec > HOST_BITS_PER_WIDE_INT)
885 if (TREE_INT_CST_LOW (t) != 0)
887 prec -= HOST_BITS_PER_WIDE_INT;
888 val = TREE_INT_CST_HIGH (t);
891 val = TREE_INT_CST_LOW (t);
892 if (prec < HOST_BITS_PER_WIDE_INT)
893 val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
894 return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
897 /* Determine whether an expression T can be cheaply negated using
898 the function negate_expr. */
901 negate_expr_p (tree t)
908 type = TREE_TYPE (t);
911 switch (TREE_CODE (t))
914 if (TYPE_UNSIGNED (type) || ! flag_trapv)
917 /* Check that -CST will not overflow type. */
918 return may_negate_without_overflow_p (t);
925 return negate_expr_p (TREE_REALPART (t))
926 && negate_expr_p (TREE_IMAGPART (t));
929 if (FLOAT_TYPE_P (type) && !flag_unsafe_math_optimizations)
931 /* -(A + B) -> (-B) - A. */
932 if (negate_expr_p (TREE_OPERAND (t, 1))
933 && reorder_operands_p (TREE_OPERAND (t, 0),
934 TREE_OPERAND (t, 1)))
936 /* -(A + B) -> (-A) - B. */
937 return negate_expr_p (TREE_OPERAND (t, 0));
940 /* We can't turn -(A-B) into B-A when we honor signed zeros. */
941 return (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
942 && reorder_operands_p (TREE_OPERAND (t, 0),
943 TREE_OPERAND (t, 1));
946 if (TYPE_UNSIGNED (TREE_TYPE (t)))
952 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
953 return negate_expr_p (TREE_OPERAND (t, 1))
954 || negate_expr_p (TREE_OPERAND (t, 0));
958 /* Negate -((double)float) as (double)(-float). */
959 if (TREE_CODE (type) == REAL_TYPE)
961 tree tem = strip_float_extensions (t);
963 return negate_expr_p (tem);
968 /* Negate -f(x) as f(-x). */
969 if (negate_mathfn_p (builtin_mathfn_code (t)))
970 return negate_expr_p (TREE_VALUE (TREE_OPERAND (t, 1)));
974 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
975 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
977 tree op1 = TREE_OPERAND (t, 1);
978 if (TREE_INT_CST_HIGH (op1) == 0
979 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
980 == TREE_INT_CST_LOW (op1))
991 /* Given T, an expression, return the negation of T. Allow for T to be
992 null, in which case return null. */
1003 type = TREE_TYPE (t);
1004 STRIP_SIGN_NOPS (t);
1006 switch (TREE_CODE (t))
1009 tem = fold_negate_const (t, type);
1010 if (! TREE_OVERFLOW (tem)
1011 || TYPE_UNSIGNED (type)
1017 tem = fold_negate_const (t, type);
1018 /* Two's complement FP formats, such as c4x, may overflow. */
1019 if (! TREE_OVERFLOW (tem) || ! flag_trapping_math)
1020 return fold_convert (type, tem);
1025 tree rpart = negate_expr (TREE_REALPART (t));
1026 tree ipart = negate_expr (TREE_IMAGPART (t));
1028 if ((TREE_CODE (rpart) == REAL_CST
1029 && TREE_CODE (ipart) == REAL_CST)
1030 || (TREE_CODE (rpart) == INTEGER_CST
1031 && TREE_CODE (ipart) == INTEGER_CST))
1032 return build_complex (type, rpart, ipart);
1037 return fold_convert (type, TREE_OPERAND (t, 0));
1040 if (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
1042 /* -(A + B) -> (-B) - A. */
1043 if (negate_expr_p (TREE_OPERAND (t, 1))
1044 && reorder_operands_p (TREE_OPERAND (t, 0),
1045 TREE_OPERAND (t, 1)))
1047 tem = negate_expr (TREE_OPERAND (t, 1));
1048 tem = fold (build2 (MINUS_EXPR, TREE_TYPE (t),
1049 tem, TREE_OPERAND (t, 0)));
1050 return fold_convert (type, tem);
1053 /* -(A + B) -> (-A) - B. */
1054 if (negate_expr_p (TREE_OPERAND (t, 0)))
1056 tem = negate_expr (TREE_OPERAND (t, 0));
1057 tem = fold (build2 (MINUS_EXPR, TREE_TYPE (t),
1058 tem, TREE_OPERAND (t, 1)));
1059 return fold_convert (type, tem);
1065 /* - (A - B) -> B - A */
1066 if ((! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
1067 && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)))
1068 return fold_convert (type,
1069 fold (build2 (MINUS_EXPR, TREE_TYPE (t),
1070 TREE_OPERAND (t, 1),
1071 TREE_OPERAND (t, 0))));
1075 if (TYPE_UNSIGNED (TREE_TYPE (t)))
1081 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
1083 tem = TREE_OPERAND (t, 1);
1084 if (negate_expr_p (tem))
1085 return fold_convert (type,
1086 fold (build2 (TREE_CODE (t), TREE_TYPE (t),
1087 TREE_OPERAND (t, 0),
1088 negate_expr (tem))));
1089 tem = TREE_OPERAND (t, 0);
1090 if (negate_expr_p (tem))
1091 return fold_convert (type,
1092 fold (build2 (TREE_CODE (t), TREE_TYPE (t),
1094 TREE_OPERAND (t, 1))));
1099 /* Convert -((double)float) into (double)(-float). */
1100 if (TREE_CODE (type) == REAL_TYPE)
1102 tem = strip_float_extensions (t);
1103 if (tem != t && negate_expr_p (tem))
1104 return fold_convert (type, negate_expr (tem));
1109 /* Negate -f(x) as f(-x). */
1110 if (negate_mathfn_p (builtin_mathfn_code (t))
1111 && negate_expr_p (TREE_VALUE (TREE_OPERAND (t, 1))))
1113 tree fndecl, arg, arglist;
1115 fndecl = get_callee_fndecl (t);
1116 arg = negate_expr (TREE_VALUE (TREE_OPERAND (t, 1)));
1117 arglist = build_tree_list (NULL_TREE, arg);
1118 return build_function_call_expr (fndecl, arglist);
1123 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1124 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1126 tree op1 = TREE_OPERAND (t, 1);
1127 if (TREE_INT_CST_HIGH (op1) == 0
1128 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1129 == TREE_INT_CST_LOW (op1))
1131 tree ntype = TYPE_UNSIGNED (type)
1132 ? lang_hooks.types.signed_type (type)
1133 : lang_hooks.types.unsigned_type (type);
1134 tree temp = fold_convert (ntype, TREE_OPERAND (t, 0));
1135 temp = fold (build2 (RSHIFT_EXPR, ntype, temp, op1));
1136 return fold_convert (type, temp);
1145 tem = fold (build1 (NEGATE_EXPR, TREE_TYPE (t), t));
1146 return fold_convert (type, tem);
1149 /* Split a tree IN into a constant, literal and variable parts that could be
1150 combined with CODE to make IN. "constant" means an expression with
1151 TREE_CONSTANT but that isn't an actual constant. CODE must be a
1152 commutative arithmetic operation. Store the constant part into *CONP,
1153 the literal in *LITP and return the variable part. If a part isn't
1154 present, set it to null. If the tree does not decompose in this way,
1155 return the entire tree as the variable part and the other parts as null.
1157 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
1158 case, we negate an operand that was subtracted. Except if it is a
1159 literal for which we use *MINUS_LITP instead.
1161 If NEGATE_P is true, we are negating all of IN, again except a literal
1162 for which we use *MINUS_LITP instead.
1164 If IN is itself a literal or constant, return it as appropriate.
1166 Note that we do not guarantee that any of the three values will be the
1167 same type as IN, but they will have the same signedness and mode. */
1170 split_tree (tree in, enum tree_code code, tree *conp, tree *litp,
1171 tree *minus_litp, int negate_p)
1179 /* Strip any conversions that don't change the machine mode or signedness. */
1180 STRIP_SIGN_NOPS (in);
1182 if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST)
1184 else if (TREE_CODE (in) == code
1185 || (! FLOAT_TYPE_P (TREE_TYPE (in))
1186 /* We can associate addition and subtraction together (even
1187 though the C standard doesn't say so) for integers because
1188 the value is not affected. For reals, the value might be
1189 affected, so we can't. */
1190 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1191 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1193 tree op0 = TREE_OPERAND (in, 0);
1194 tree op1 = TREE_OPERAND (in, 1);
1195 int neg1_p = TREE_CODE (in) == MINUS_EXPR;
1196 int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
1198 /* First see if either of the operands is a literal, then a constant. */
1199 if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST)
1200 *litp = op0, op0 = 0;
1201 else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST)
1202 *litp = op1, neg_litp_p = neg1_p, op1 = 0;
1204 if (op0 != 0 && TREE_CONSTANT (op0))
1205 *conp = op0, op0 = 0;
1206 else if (op1 != 0 && TREE_CONSTANT (op1))
1207 *conp = op1, neg_conp_p = neg1_p, op1 = 0;
1209 /* If we haven't dealt with either operand, this is not a case we can
1210 decompose. Otherwise, VAR is either of the ones remaining, if any. */
1211 if (op0 != 0 && op1 != 0)
1216 var = op1, neg_var_p = neg1_p;
1218 /* Now do any needed negations. */
1220 *minus_litp = *litp, *litp = 0;
1222 *conp = negate_expr (*conp);
1224 var = negate_expr (var);
1226 else if (TREE_CONSTANT (in))
1234 *minus_litp = *litp, *litp = 0;
1235 else if (*minus_litp)
1236 *litp = *minus_litp, *minus_litp = 0;
1237 *conp = negate_expr (*conp);
1238 var = negate_expr (var);
1244 /* Re-associate trees split by the above function. T1 and T2 are either
1245 expressions to associate or null. Return the new expression, if any. If
1246 we build an operation, do it in TYPE and with CODE. */
1249 associate_trees (tree t1, tree t2, enum tree_code code, tree type)
1256 /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
1257 try to fold this since we will have infinite recursion. But do
1258 deal with any NEGATE_EXPRs. */
1259 if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
1260 || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
1262 if (code == PLUS_EXPR)
1264 if (TREE_CODE (t1) == NEGATE_EXPR)
1265 return build2 (MINUS_EXPR, type, fold_convert (type, t2),
1266 fold_convert (type, TREE_OPERAND (t1, 0)));
1267 else if (TREE_CODE (t2) == NEGATE_EXPR)
1268 return build2 (MINUS_EXPR, type, fold_convert (type, t1),
1269 fold_convert (type, TREE_OPERAND (t2, 0)));
1271 return build2 (code, type, fold_convert (type, t1),
1272 fold_convert (type, t2));
1275 return fold (build2 (code, type, fold_convert (type, t1),
1276 fold_convert (type, t2)));
1279 /* Combine two integer constants ARG1 and ARG2 under operation CODE
1280 to produce a new constant.
1282 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1285 int_const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1287 unsigned HOST_WIDE_INT int1l, int2l;
1288 HOST_WIDE_INT int1h, int2h;
1289 unsigned HOST_WIDE_INT low;
1291 unsigned HOST_WIDE_INT garbagel;
1292 HOST_WIDE_INT garbageh;
1294 tree type = TREE_TYPE (arg1);
1295 int uns = TYPE_UNSIGNED (type);
1297 = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
1299 int no_overflow = 0;
1301 int1l = TREE_INT_CST_LOW (arg1);
1302 int1h = TREE_INT_CST_HIGH (arg1);
1303 int2l = TREE_INT_CST_LOW (arg2);
1304 int2h = TREE_INT_CST_HIGH (arg2);
1309 low = int1l | int2l, hi = int1h | int2h;
1313 low = int1l ^ int2l, hi = int1h ^ int2h;
1317 low = int1l & int2l, hi = int1h & int2h;
1323 /* It's unclear from the C standard whether shifts can overflow.
1324 The following code ignores overflow; perhaps a C standard
1325 interpretation ruling is needed. */
1326 lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1334 lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1339 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1343 neg_double (int2l, int2h, &low, &hi);
1344 add_double (int1l, int1h, low, hi, &low, &hi);
1345 overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
1349 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1352 case TRUNC_DIV_EXPR:
1353 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1354 case EXACT_DIV_EXPR:
1355 /* This is a shortcut for a common special case. */
1356 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1357 && ! TREE_CONSTANT_OVERFLOW (arg1)
1358 && ! TREE_CONSTANT_OVERFLOW (arg2)
1359 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1361 if (code == CEIL_DIV_EXPR)
1364 low = int1l / int2l, hi = 0;
1368 /* ... fall through ... */
1370 case ROUND_DIV_EXPR:
1371 if (int2h == 0 && int2l == 1)
1373 low = int1l, hi = int1h;
1376 if (int1l == int2l && int1h == int2h
1377 && ! (int1l == 0 && int1h == 0))
1382 overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
1383 &low, &hi, &garbagel, &garbageh);
1386 case TRUNC_MOD_EXPR:
1387 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1388 /* This is a shortcut for a common special case. */
1389 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1390 && ! TREE_CONSTANT_OVERFLOW (arg1)
1391 && ! TREE_CONSTANT_OVERFLOW (arg2)
1392 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1394 if (code == CEIL_MOD_EXPR)
1396 low = int1l % int2l, hi = 0;
1400 /* ... fall through ... */
1402 case ROUND_MOD_EXPR:
1403 overflow = div_and_round_double (code, uns,
1404 int1l, int1h, int2l, int2h,
1405 &garbagel, &garbageh, &low, &hi);
1411 low = (((unsigned HOST_WIDE_INT) int1h
1412 < (unsigned HOST_WIDE_INT) int2h)
1413 || (((unsigned HOST_WIDE_INT) int1h
1414 == (unsigned HOST_WIDE_INT) int2h)
1417 low = (int1h < int2h
1418 || (int1h == int2h && int1l < int2l));
1420 if (low == (code == MIN_EXPR))
1421 low = int1l, hi = int1h;
1423 low = int2l, hi = int2h;
1430 t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
1434 /* Propagate overflow flags ourselves. */
1435 if (((!uns || is_sizetype) && overflow)
1436 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1439 TREE_OVERFLOW (t) = 1;
1440 TREE_CONSTANT_OVERFLOW (t) = 1;
1442 else if (TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2))
1445 TREE_CONSTANT_OVERFLOW (t) = 1;
1449 t = force_fit_type (t, 1,
1450 ((!uns || is_sizetype) && overflow)
1451 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2),
1452 TREE_CONSTANT_OVERFLOW (arg1)
1453 | TREE_CONSTANT_OVERFLOW (arg2));
1458 /* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
1459 constant. We assume ARG1 and ARG2 have the same data type, or at least
1460 are the same kind of constant and the same machine mode.
1462 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1465 const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1470 if (TREE_CODE (arg1) == INTEGER_CST)
1471 return int_const_binop (code, arg1, arg2, notrunc);
1473 if (TREE_CODE (arg1) == REAL_CST)
1475 enum machine_mode mode;
1478 REAL_VALUE_TYPE value;
1481 d1 = TREE_REAL_CST (arg1);
1482 d2 = TREE_REAL_CST (arg2);
1484 type = TREE_TYPE (arg1);
1485 mode = TYPE_MODE (type);
1487 /* Don't perform operation if we honor signaling NaNs and
1488 either operand is a NaN. */
1489 if (HONOR_SNANS (mode)
1490 && (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2)))
1493 /* Don't perform operation if it would raise a division
1494 by zero exception. */
1495 if (code == RDIV_EXPR
1496 && REAL_VALUES_EQUAL (d2, dconst0)
1497 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1500 /* If either operand is a NaN, just return it. Otherwise, set up
1501 for floating-point trap; we return an overflow. */
1502 if (REAL_VALUE_ISNAN (d1))
1504 else if (REAL_VALUE_ISNAN (d2))
1507 REAL_ARITHMETIC (value, code, d1, d2);
1509 t = build_real (type, real_value_truncate (mode, value));
1511 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1512 TREE_CONSTANT_OVERFLOW (t)
1514 | TREE_CONSTANT_OVERFLOW (arg1)
1515 | TREE_CONSTANT_OVERFLOW (arg2);
1518 if (TREE_CODE (arg1) == COMPLEX_CST)
1520 tree type = TREE_TYPE (arg1);
1521 tree r1 = TREE_REALPART (arg1);
1522 tree i1 = TREE_IMAGPART (arg1);
1523 tree r2 = TREE_REALPART (arg2);
1524 tree i2 = TREE_IMAGPART (arg2);
1530 t = build_complex (type,
1531 const_binop (PLUS_EXPR, r1, r2, notrunc),
1532 const_binop (PLUS_EXPR, i1, i2, notrunc));
1536 t = build_complex (type,
1537 const_binop (MINUS_EXPR, r1, r2, notrunc),
1538 const_binop (MINUS_EXPR, i1, i2, notrunc));
1542 t = build_complex (type,
1543 const_binop (MINUS_EXPR,
1544 const_binop (MULT_EXPR,
1546 const_binop (MULT_EXPR,
1549 const_binop (PLUS_EXPR,
1550 const_binop (MULT_EXPR,
1552 const_binop (MULT_EXPR,
1560 = const_binop (PLUS_EXPR,
1561 const_binop (MULT_EXPR, r2, r2, notrunc),
1562 const_binop (MULT_EXPR, i2, i2, notrunc),
1565 t = build_complex (type,
1567 (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1568 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1569 const_binop (PLUS_EXPR,
1570 const_binop (MULT_EXPR, r1, r2,
1572 const_binop (MULT_EXPR, i1, i2,
1575 magsquared, notrunc),
1577 (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1578 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1579 const_binop (MINUS_EXPR,
1580 const_binop (MULT_EXPR, i1, r2,
1582 const_binop (MULT_EXPR, r1, i2,
1585 magsquared, notrunc));
1597 /* Create a size type INT_CST node with NUMBER sign extended. KIND
1598 indicates which particular sizetype to create. */
1601 size_int_kind (HOST_WIDE_INT number, enum size_type_kind kind)
1603 return build_int_cst (sizetype_tab[(int) kind], number);
1606 /* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
1607 is a tree code. The type of the result is taken from the operands.
1608 Both must be the same type integer type and it must be a size type.
1609 If the operands are constant, so is the result. */
1612 size_binop (enum tree_code code, tree arg0, tree arg1)
1614 tree type = TREE_TYPE (arg0);
1616 gcc_assert (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
1617 && type == TREE_TYPE (arg1));
1619 /* Handle the special case of two integer constants faster. */
1620 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1622 /* And some specific cases even faster than that. */
1623 if (code == PLUS_EXPR && integer_zerop (arg0))
1625 else if ((code == MINUS_EXPR || code == PLUS_EXPR)
1626 && integer_zerop (arg1))
1628 else if (code == MULT_EXPR && integer_onep (arg0))
1631 /* Handle general case of two integer constants. */
1632 return int_const_binop (code, arg0, arg1, 0);
1635 if (arg0 == error_mark_node || arg1 == error_mark_node)
1636 return error_mark_node;
1638 return fold (build2 (code, type, arg0, arg1));
1641 /* Given two values, either both of sizetype or both of bitsizetype,
1642 compute the difference between the two values. Return the value
1643 in signed type corresponding to the type of the operands. */
1646 size_diffop (tree arg0, tree arg1)
1648 tree type = TREE_TYPE (arg0);
1651 gcc_assert (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
1652 && type == TREE_TYPE (arg1));
1654 /* If the type is already signed, just do the simple thing. */
1655 if (!TYPE_UNSIGNED (type))
1656 return size_binop (MINUS_EXPR, arg0, arg1);
1658 ctype = type == bitsizetype ? sbitsizetype : ssizetype;
1660 /* If either operand is not a constant, do the conversions to the signed
1661 type and subtract. The hardware will do the right thing with any
1662 overflow in the subtraction. */
1663 if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST)
1664 return size_binop (MINUS_EXPR, fold_convert (ctype, arg0),
1665 fold_convert (ctype, arg1));
1667 /* If ARG0 is larger than ARG1, subtract and return the result in CTYPE.
1668 Otherwise, subtract the other way, convert to CTYPE (we know that can't
1669 overflow) and negate (which can't either). Special-case a result
1670 of zero while we're here. */
1671 if (tree_int_cst_equal (arg0, arg1))
1672 return fold_convert (ctype, integer_zero_node);
1673 else if (tree_int_cst_lt (arg1, arg0))
1674 return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1));
1676 return size_binop (MINUS_EXPR, fold_convert (ctype, integer_zero_node),
1677 fold_convert (ctype, size_binop (MINUS_EXPR,
1681 /* Construct a vector of zero elements of vector type TYPE. */
1684 build_zero_vector (tree type)
1689 elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
1690 units = TYPE_VECTOR_SUBPARTS (type);
1693 for (i = 0; i < units; i++)
1694 list = tree_cons (NULL_TREE, elem, list);
1695 return build_vector (type, list);
1699 /* Attempt to fold type conversion operation CODE of expression ARG1 to
1700 type TYPE. If no simplification can be done return NULL_TREE. */
1703 fold_convert_const (enum tree_code code, tree type, tree arg1)
1708 if (TREE_TYPE (arg1) == type)
1711 if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
1713 if (TREE_CODE (arg1) == INTEGER_CST)
1715 /* If we would build a constant wider than GCC supports,
1716 leave the conversion unfolded. */
1717 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1720 /* Given an integer constant, make new constant with new type,
1721 appropriately sign-extended or truncated. */
1722 t = build_int_cst_wide (type, TREE_INT_CST_LOW (arg1),
1723 TREE_INT_CST_HIGH (arg1));
1725 t = force_fit_type (t,
1726 /* Don't set the overflow when
1727 converting a pointer */
1728 !POINTER_TYPE_P (TREE_TYPE (arg1)),
1729 (TREE_INT_CST_HIGH (arg1) < 0
1730 && (TYPE_UNSIGNED (type)
1731 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
1732 | TREE_OVERFLOW (arg1),
1733 TREE_CONSTANT_OVERFLOW (arg1));
1736 else if (TREE_CODE (arg1) == REAL_CST)
1738 /* The following code implements the floating point to integer
1739 conversion rules required by the Java Language Specification,
1740 that IEEE NaNs are mapped to zero and values that overflow
1741 the target precision saturate, i.e. values greater than
1742 INT_MAX are mapped to INT_MAX, and values less than INT_MIN
1743 are mapped to INT_MIN. These semantics are allowed by the
1744 C and C++ standards that simply state that the behavior of
1745 FP-to-integer conversion is unspecified upon overflow. */
1747 HOST_WIDE_INT high, low;
1749 REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
1753 case FIX_TRUNC_EXPR:
1754 real_trunc (&r, VOIDmode, &x);
1758 real_ceil (&r, VOIDmode, &x);
1761 case FIX_FLOOR_EXPR:
1762 real_floor (&r, VOIDmode, &x);
1765 case FIX_ROUND_EXPR:
1766 real_round (&r, VOIDmode, &x);
1773 /* If R is NaN, return zero and show we have an overflow. */
1774 if (REAL_VALUE_ISNAN (r))
1781 /* See if R is less than the lower bound or greater than the
1786 tree lt = TYPE_MIN_VALUE (type);
1787 REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
1788 if (REAL_VALUES_LESS (r, l))
1791 high = TREE_INT_CST_HIGH (lt);
1792 low = TREE_INT_CST_LOW (lt);
1798 tree ut = TYPE_MAX_VALUE (type);
1801 REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
1802 if (REAL_VALUES_LESS (u, r))
1805 high = TREE_INT_CST_HIGH (ut);
1806 low = TREE_INT_CST_LOW (ut);
1812 REAL_VALUE_TO_INT (&low, &high, r);
1814 t = build_int_cst_wide (type, low, high);
1816 t = force_fit_type (t, -1, overflow | TREE_OVERFLOW (arg1),
1817 TREE_CONSTANT_OVERFLOW (arg1));
1821 else if (TREE_CODE (type) == REAL_TYPE)
1823 if (TREE_CODE (arg1) == INTEGER_CST)
1824 return build_real_from_int_cst (type, arg1);
1825 if (TREE_CODE (arg1) == REAL_CST)
1827 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1829 /* We make a copy of ARG1 so that we don't modify an
1830 existing constant tree. */
1831 t = copy_node (arg1);
1832 TREE_TYPE (t) = type;
1836 t = build_real (type,
1837 real_value_truncate (TYPE_MODE (type),
1838 TREE_REAL_CST (arg1)));
1840 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
1841 TREE_CONSTANT_OVERFLOW (t)
1842 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1849 /* Convert expression ARG to type TYPE. Used by the middle-end for
1850 simple conversions in preference to calling the front-end's convert. */
1853 fold_convert (tree type, tree arg)
1855 tree orig = TREE_TYPE (arg);
1861 if (TREE_CODE (arg) == ERROR_MARK
1862 || TREE_CODE (type) == ERROR_MARK
1863 || TREE_CODE (orig) == ERROR_MARK)
1864 return error_mark_node;
1866 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig)
1867 || lang_hooks.types_compatible_p (TYPE_MAIN_VARIANT (type),
1868 TYPE_MAIN_VARIANT (orig)))
1869 return fold (build1 (NOP_EXPR, type, arg));
1871 switch (TREE_CODE (type))
1873 case INTEGER_TYPE: case CHAR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
1874 case POINTER_TYPE: case REFERENCE_TYPE:
1876 if (TREE_CODE (arg) == INTEGER_CST)
1878 tem = fold_convert_const (NOP_EXPR, type, arg);
1879 if (tem != NULL_TREE)
1882 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
1883 || TREE_CODE (orig) == OFFSET_TYPE)
1884 return fold (build1 (NOP_EXPR, type, arg));
1885 if (TREE_CODE (orig) == COMPLEX_TYPE)
1887 tem = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg));
1888 return fold_convert (type, tem);
1890 gcc_assert (TREE_CODE (orig) == VECTOR_TYPE
1891 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
1892 return fold (build1 (NOP_EXPR, type, arg));
1895 if (TREE_CODE (arg) == INTEGER_CST)
1897 tem = fold_convert_const (FLOAT_EXPR, type, arg);
1898 if (tem != NULL_TREE)
1901 else if (TREE_CODE (arg) == REAL_CST)
1903 tem = fold_convert_const (NOP_EXPR, type, arg);
1904 if (tem != NULL_TREE)
1908 switch (TREE_CODE (orig))
1910 case INTEGER_TYPE: case CHAR_TYPE:
1911 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
1912 case POINTER_TYPE: case REFERENCE_TYPE:
1913 return fold (build1 (FLOAT_EXPR, type, arg));
1916 return fold (build1 (flag_float_store ? CONVERT_EXPR : NOP_EXPR,
1920 tem = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg));
1921 return fold_convert (type, tem);
1928 switch (TREE_CODE (orig))
1930 case INTEGER_TYPE: case CHAR_TYPE:
1931 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
1932 case POINTER_TYPE: case REFERENCE_TYPE:
1934 return build2 (COMPLEX_EXPR, type,
1935 fold_convert (TREE_TYPE (type), arg),
1936 fold_convert (TREE_TYPE (type), integer_zero_node));
1941 if (TREE_CODE (arg) == COMPLEX_EXPR)
1943 rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0));
1944 ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1));
1945 return fold (build2 (COMPLEX_EXPR, type, rpart, ipart));
1948 arg = save_expr (arg);
1949 rpart = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg));
1950 ipart = fold (build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg));
1951 rpart = fold_convert (TREE_TYPE (type), rpart);
1952 ipart = fold_convert (TREE_TYPE (type), ipart);
1953 return fold (build2 (COMPLEX_EXPR, type, rpart, ipart));
1961 if (integer_zerop (arg))
1962 return build_zero_vector (type);
1963 gcc_assert (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
1964 gcc_assert (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
1965 || TREE_CODE (orig) == VECTOR_TYPE);
1966 return fold (build1 (NOP_EXPR, type, arg));
1969 return fold (build1 (CONVERT_EXPR, type, fold_ignored_result (arg)));
1976 /* Return an expr equal to X but certainly not valid as an lvalue. */
1981 /* We only need to wrap lvalue tree codes. */
1982 switch (TREE_CODE (x))
1994 case ARRAY_RANGE_REF:
2000 case PREINCREMENT_EXPR:
2001 case PREDECREMENT_EXPR:
2003 case TRY_CATCH_EXPR:
2004 case WITH_CLEANUP_EXPR:
2015 /* Assume the worst for front-end tree codes. */
2016 if ((int)TREE_CODE (x) >= NUM_TREE_CODES)
2020 return build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
2023 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
2024 Zero means allow extended lvalues. */
2026 int pedantic_lvalues;
2028 /* When pedantic, return an expr equal to X but certainly not valid as a
2029 pedantic lvalue. Otherwise, return X. */
2032 pedantic_non_lvalue (tree x)
2034 if (pedantic_lvalues)
2035 return non_lvalue (x);
2040 /* Given a tree comparison code, return the code that is the logical inverse
2041 of the given code. It is not safe to do this for floating-point
2042 comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode
2043 as well: if reversing the comparison is unsafe, return ERROR_MARK. */
2045 static enum tree_code
2046 invert_tree_comparison (enum tree_code code, bool honor_nans)
2048 if (honor_nans && flag_trapping_math)
2058 return honor_nans ? UNLE_EXPR : LE_EXPR;
2060 return honor_nans ? UNLT_EXPR : LT_EXPR;
2062 return honor_nans ? UNGE_EXPR : GE_EXPR;
2064 return honor_nans ? UNGT_EXPR : GT_EXPR;
2078 return UNORDERED_EXPR;
2079 case UNORDERED_EXPR:
2080 return ORDERED_EXPR;
2086 /* Similar, but return the comparison that results if the operands are
2087 swapped. This is safe for floating-point. */
2090 swap_tree_comparison (enum tree_code code)
2111 /* Convert a comparison tree code from an enum tree_code representation
2112 into a compcode bit-based encoding. This function is the inverse of
2113 compcode_to_comparison. */
2115 static enum comparison_code
2116 comparison_to_compcode (enum tree_code code)
2133 return COMPCODE_ORD;
2134 case UNORDERED_EXPR:
2135 return COMPCODE_UNORD;
2137 return COMPCODE_UNLT;
2139 return COMPCODE_UNEQ;
2141 return COMPCODE_UNLE;
2143 return COMPCODE_UNGT;
2145 return COMPCODE_LTGT;
2147 return COMPCODE_UNGE;
2153 /* Convert a compcode bit-based encoding of a comparison operator back
2154 to GCC's enum tree_code representation. This function is the
2155 inverse of comparison_to_compcode. */
2157 static enum tree_code
2158 compcode_to_comparison (enum comparison_code code)
2175 return ORDERED_EXPR;
2176 case COMPCODE_UNORD:
2177 return UNORDERED_EXPR;
2195 /* Return a tree for the comparison which is the combination of
2196 doing the AND or OR (depending on CODE) of the two operations LCODE
2197 and RCODE on the identical operands LL_ARG and LR_ARG. Take into account
2198 the possibility of trapping if the mode has NaNs, and return NULL_TREE
2199 if this makes the transformation invalid. */
2202 combine_comparisons (enum tree_code code, enum tree_code lcode,
2203 enum tree_code rcode, tree truth_type,
2204 tree ll_arg, tree lr_arg)
2206 bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg)));
2207 enum comparison_code lcompcode = comparison_to_compcode (lcode);
2208 enum comparison_code rcompcode = comparison_to_compcode (rcode);
2209 enum comparison_code compcode;
2213 case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR:
2214 compcode = lcompcode & rcompcode;
2217 case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR:
2218 compcode = lcompcode | rcompcode;
2227 /* Eliminate unordered comparisons, as well as LTGT and ORD
2228 which are not used unless the mode has NaNs. */
2229 compcode &= ~COMPCODE_UNORD;
2230 if (compcode == COMPCODE_LTGT)
2231 compcode = COMPCODE_NE;
2232 else if (compcode == COMPCODE_ORD)
2233 compcode = COMPCODE_TRUE;
2235 else if (flag_trapping_math)
2237 /* Check that the original operation and the optimized ones will trap
2238 under the same condition. */
2239 bool ltrap = (lcompcode & COMPCODE_UNORD) == 0
2240 && (lcompcode != COMPCODE_EQ)
2241 && (lcompcode != COMPCODE_ORD);
2242 bool rtrap = (rcompcode & COMPCODE_UNORD) == 0
2243 && (rcompcode != COMPCODE_EQ)
2244 && (rcompcode != COMPCODE_ORD);
2245 bool trap = (compcode & COMPCODE_UNORD) == 0
2246 && (compcode != COMPCODE_EQ)
2247 && (compcode != COMPCODE_ORD);
2249 /* In a short-circuited boolean expression the LHS might be
2250 such that the RHS, if evaluated, will never trap. For
2251 example, in ORD (x, y) && (x < y), we evaluate the RHS only
2252 if neither x nor y is NaN. (This is a mixed blessing: for
2253 example, the expression above will never trap, hence
2254 optimizing it to x < y would be invalid). */
2255 if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD))
2256 || (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD)))
2259 /* If the comparison was short-circuited, and only the RHS
2260 trapped, we may now generate a spurious trap. */
2262 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2265 /* If we changed the conditions that cause a trap, we lose. */
2266 if ((ltrap || rtrap) != trap)
2270 if (compcode == COMPCODE_TRUE)
2271 return constant_boolean_node (true, truth_type);
2272 else if (compcode == COMPCODE_FALSE)
2273 return constant_boolean_node (false, truth_type);
2275 return fold (build2 (compcode_to_comparison (compcode),
2276 truth_type, ll_arg, lr_arg));
2279 /* Return nonzero if CODE is a tree code that represents a truth value. */
2282 truth_value_p (enum tree_code code)
2284 return (TREE_CODE_CLASS (code) == '<'
2285 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
2286 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
2287 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
2290 /* Return nonzero if two operands (typically of the same tree node)
2291 are necessarily equal. If either argument has side-effects this
2292 function returns zero. FLAGS modifies behavior as follows:
2294 If OEP_ONLY_CONST is set, only return nonzero for constants.
2295 This function tests whether the operands are indistinguishable;
2296 it does not test whether they are equal using C's == operation.
2297 The distinction is important for IEEE floating point, because
2298 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
2299 (2) two NaNs may be indistinguishable, but NaN!=NaN.
2301 If OEP_ONLY_CONST is unset, a VAR_DECL is considered equal to itself
2302 even though it may hold multiple values during a function.
2303 This is because a GCC tree node guarantees that nothing else is
2304 executed between the evaluation of its "operands" (which may often
2305 be evaluated in arbitrary order). Hence if the operands themselves
2306 don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the
2307 same value in each operand/subexpression. Hence leaving OEP_ONLY_CONST
2308 unset means assuming isochronic (or instantaneous) tree equivalence.
2309 Unless comparing arbitrary expression trees, such as from different
2310 statements, this flag can usually be left unset.
2312 If OEP_PURE_SAME is set, then pure functions with identical arguments
2313 are considered the same. It is used when the caller has other ways
2314 to ensure that global memory is unchanged in between. */
2317 operand_equal_p (tree arg0, tree arg1, unsigned int flags)
2319 /* If one is specified and the other isn't, they aren't equal and if
2320 neither is specified, they are.
2322 ??? This is temporary and is meant only to handle the cases of the
2323 optional operands for COMPONENT_REF and ARRAY_REF. */
2324 if ((arg0 && !arg1) || (!arg0 && arg1))
2326 else if (!arg0 && !arg1)
2328 /* If either is ERROR_MARK, they aren't equal. */
2329 else if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK)
2332 /* If both types don't have the same signedness, then we can't consider
2333 them equal. We must check this before the STRIP_NOPS calls
2334 because they may change the signedness of the arguments. */
2335 if (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1)))
2341 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2342 /* This is needed for conversions and for COMPONENT_REF.
2343 Might as well play it safe and always test this. */
2344 || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
2345 || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
2346 || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
2349 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
2350 We don't care about side effects in that case because the SAVE_EXPR
2351 takes care of that for us. In all other cases, two expressions are
2352 equal if they have no side effects. If we have two identical
2353 expressions with side effects that should be treated the same due
2354 to the only side effects being identical SAVE_EXPR's, that will
2355 be detected in the recursive calls below. */
2356 if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST)
2357 && (TREE_CODE (arg0) == SAVE_EXPR
2358 || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
2361 /* Next handle constant cases, those for which we can return 1 even
2362 if ONLY_CONST is set. */
2363 if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
2364 switch (TREE_CODE (arg0))
2367 return (! TREE_CONSTANT_OVERFLOW (arg0)
2368 && ! TREE_CONSTANT_OVERFLOW (arg1)
2369 && tree_int_cst_equal (arg0, arg1));
2372 return (! TREE_CONSTANT_OVERFLOW (arg0)
2373 && ! TREE_CONSTANT_OVERFLOW (arg1)
2374 && REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
2375 TREE_REAL_CST (arg1)));
2381 if (TREE_CONSTANT_OVERFLOW (arg0)
2382 || TREE_CONSTANT_OVERFLOW (arg1))
2385 v1 = TREE_VECTOR_CST_ELTS (arg0);
2386 v2 = TREE_VECTOR_CST_ELTS (arg1);
2389 if (!operand_equal_p (TREE_VALUE (v1), TREE_VALUE (v2),
2392 v1 = TREE_CHAIN (v1);
2393 v2 = TREE_CHAIN (v2);
2400 return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
2402 && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
2406 return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
2407 && ! memcmp (TREE_STRING_POINTER (arg0),
2408 TREE_STRING_POINTER (arg1),
2409 TREE_STRING_LENGTH (arg0)));
2412 return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
2418 if (flags & OEP_ONLY_CONST)
2421 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
2424 /* Two conversions are equal only if signedness and modes match. */
2425 switch (TREE_CODE (arg0))
2430 case FIX_TRUNC_EXPR:
2431 case FIX_FLOOR_EXPR:
2432 case FIX_ROUND_EXPR:
2433 if (TYPE_UNSIGNED (TREE_TYPE (arg0))
2434 != TYPE_UNSIGNED (TREE_TYPE (arg1)))
2441 return operand_equal_p (TREE_OPERAND (arg0, 0),
2442 TREE_OPERAND (arg1, 0), flags);
2446 if (operand_equal_p (TREE_OPERAND (arg0, 0),
2447 TREE_OPERAND (arg1, 0), flags)
2448 && operand_equal_p (TREE_OPERAND (arg0, 1),
2449 TREE_OPERAND (arg1, 1), flags))
2452 /* For commutative ops, allow the other order. */
2453 return (commutative_tree_code (TREE_CODE (arg0))
2454 && operand_equal_p (TREE_OPERAND (arg0, 0),
2455 TREE_OPERAND (arg1, 1), flags)
2456 && operand_equal_p (TREE_OPERAND (arg0, 1),
2457 TREE_OPERAND (arg1, 0), flags));
2460 /* If either of the pointer (or reference) expressions we are
2461 dereferencing contain a side effect, these cannot be equal. */
2462 if (TREE_SIDE_EFFECTS (arg0)
2463 || TREE_SIDE_EFFECTS (arg1))
2466 switch (TREE_CODE (arg0))
2471 return operand_equal_p (TREE_OPERAND (arg0, 0),
2472 TREE_OPERAND (arg1, 0), flags);
2475 case ARRAY_RANGE_REF:
2476 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2477 TREE_OPERAND (arg1, 0), flags)
2478 && operand_equal_p (TREE_OPERAND (arg0, 1),
2479 TREE_OPERAND (arg1, 1), flags)
2480 && operand_equal_p (TREE_OPERAND (arg0, 2),
2481 TREE_OPERAND (arg1, 2), flags)
2482 && operand_equal_p (TREE_OPERAND (arg0, 3),
2483 TREE_OPERAND (arg1, 3), flags));
2487 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2488 TREE_OPERAND (arg1, 0), flags)
2489 && operand_equal_p (TREE_OPERAND (arg0, 1),
2490 TREE_OPERAND (arg1, 1), flags)
2491 && operand_equal_p (TREE_OPERAND (arg0, 2),
2492 TREE_OPERAND (arg1, 2), flags));
2496 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2497 TREE_OPERAND (arg1, 0), flags)
2498 && operand_equal_p (TREE_OPERAND (arg0, 1),
2499 TREE_OPERAND (arg1, 1), flags)
2500 && operand_equal_p (TREE_OPERAND (arg0, 2),
2501 TREE_OPERAND (arg1, 2), flags));
2507 switch (TREE_CODE (arg0))
2510 case TRUTH_NOT_EXPR:
2511 return operand_equal_p (TREE_OPERAND (arg0, 0),
2512 TREE_OPERAND (arg1, 0), flags);
2514 case TRUTH_ANDIF_EXPR:
2515 case TRUTH_ORIF_EXPR:
2516 return operand_equal_p (TREE_OPERAND (arg0, 0),
2517 TREE_OPERAND (arg1, 0), flags)
2518 && operand_equal_p (TREE_OPERAND (arg0, 1),
2519 TREE_OPERAND (arg1, 1), flags);
2521 case TRUTH_AND_EXPR:
2523 case TRUTH_XOR_EXPR:
2524 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2525 TREE_OPERAND (arg1, 0), flags)
2526 && operand_equal_p (TREE_OPERAND (arg0, 1),
2527 TREE_OPERAND (arg1, 1), flags))
2528 || (operand_equal_p (TREE_OPERAND (arg0, 0),
2529 TREE_OPERAND (arg1, 1), flags)
2530 && operand_equal_p (TREE_OPERAND (arg0, 1),
2531 TREE_OPERAND (arg1, 0), flags));
2534 /* If the CALL_EXPRs call different functions, then they
2535 clearly can not be equal. */
2536 if (! operand_equal_p (TREE_OPERAND (arg0, 0),
2537 TREE_OPERAND (arg1, 0), flags))
2541 unsigned int cef = call_expr_flags (arg0);
2542 if (flags & OEP_PURE_SAME)
2543 cef &= ECF_CONST | ECF_PURE;
2550 /* Now see if all the arguments are the same. operand_equal_p
2551 does not handle TREE_LIST, so we walk the operands here
2552 feeding them to operand_equal_p. */
2553 arg0 = TREE_OPERAND (arg0, 1);
2554 arg1 = TREE_OPERAND (arg1, 1);
2555 while (arg0 && arg1)
2557 if (! operand_equal_p (TREE_VALUE (arg0), TREE_VALUE (arg1),
2561 arg0 = TREE_CHAIN (arg0);
2562 arg1 = TREE_CHAIN (arg1);
2565 /* If we get here and both argument lists are exhausted
2566 then the CALL_EXPRs are equal. */
2567 return ! (arg0 || arg1);
2574 /* Consider __builtin_sqrt equal to sqrt. */
2575 return (TREE_CODE (arg0) == FUNCTION_DECL
2576 && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1)
2577 && DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1)
2578 && DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1));
2585 /* Similar to operand_equal_p, but see if ARG0 might have been made by
2586 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
2588 When in doubt, return 0. */
2591 operand_equal_for_comparison_p (tree arg0, tree arg1, tree other)
2593 int unsignedp1, unsignedpo;
2594 tree primarg0, primarg1, primother;
2595 unsigned int correct_width;
2597 if (operand_equal_p (arg0, arg1, 0))
2600 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
2601 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
2604 /* Discard any conversions that don't change the modes of ARG0 and ARG1
2605 and see if the inner values are the same. This removes any
2606 signedness comparison, which doesn't matter here. */
2607 primarg0 = arg0, primarg1 = arg1;
2608 STRIP_NOPS (primarg0);
2609 STRIP_NOPS (primarg1);
2610 if (operand_equal_p (primarg0, primarg1, 0))
2613 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
2614 actual comparison operand, ARG0.
2616 First throw away any conversions to wider types
2617 already present in the operands. */
2619 primarg1 = get_narrower (arg1, &unsignedp1);
2620 primother = get_narrower (other, &unsignedpo);
2622 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
2623 if (unsignedp1 == unsignedpo
2624 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
2625 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
2627 tree type = TREE_TYPE (arg0);
2629 /* Make sure shorter operand is extended the right way
2630 to match the longer operand. */
2631 primarg1 = fold_convert (lang_hooks.types.signed_or_unsigned_type
2632 (unsignedp1, TREE_TYPE (primarg1)), primarg1);
2634 if (operand_equal_p (arg0, fold_convert (type, primarg1), 0))
2641 /* See if ARG is an expression that is either a comparison or is performing
2642 arithmetic on comparisons. The comparisons must only be comparing
2643 two different values, which will be stored in *CVAL1 and *CVAL2; if
2644 they are nonzero it means that some operands have already been found.
2645 No variables may be used anywhere else in the expression except in the
2646 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
2647 the expression and save_expr needs to be called with CVAL1 and CVAL2.
2649 If this is true, return 1. Otherwise, return zero. */
2652 twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p)
2654 enum tree_code code = TREE_CODE (arg);
2655 char class = TREE_CODE_CLASS (code);
2657 /* We can handle some of the 'e' cases here. */
2658 if (class == 'e' && code == TRUTH_NOT_EXPR)
2660 else if (class == 'e'
2661 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
2662 || code == COMPOUND_EXPR))
2665 else if (class == 'e' && code == SAVE_EXPR
2666 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
2668 /* If we've already found a CVAL1 or CVAL2, this expression is
2669 two complex to handle. */
2670 if (*cval1 || *cval2)
2680 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
2683 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
2684 && twoval_comparison_p (TREE_OPERAND (arg, 1),
2685 cval1, cval2, save_p));
2691 if (code == COND_EXPR)
2692 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
2693 cval1, cval2, save_p)
2694 && twoval_comparison_p (TREE_OPERAND (arg, 1),
2695 cval1, cval2, save_p)
2696 && twoval_comparison_p (TREE_OPERAND (arg, 2),
2697 cval1, cval2, save_p));
2701 /* First see if we can handle the first operand, then the second. For
2702 the second operand, we know *CVAL1 can't be zero. It must be that
2703 one side of the comparison is each of the values; test for the
2704 case where this isn't true by failing if the two operands
2707 if (operand_equal_p (TREE_OPERAND (arg, 0),
2708 TREE_OPERAND (arg, 1), 0))
2712 *cval1 = TREE_OPERAND (arg, 0);
2713 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
2715 else if (*cval2 == 0)
2716 *cval2 = TREE_OPERAND (arg, 0);
2717 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
2722 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
2724 else if (*cval2 == 0)
2725 *cval2 = TREE_OPERAND (arg, 1);
2726 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
2738 /* ARG is a tree that is known to contain just arithmetic operations and
2739 comparisons. Evaluate the operations in the tree substituting NEW0 for
2740 any occurrence of OLD0 as an operand of a comparison and likewise for
2744 eval_subst (tree arg, tree old0, tree new0, tree old1, tree new1)
2746 tree type = TREE_TYPE (arg);
2747 enum tree_code code = TREE_CODE (arg);
2748 char class = TREE_CODE_CLASS (code);
2750 /* We can handle some of the 'e' cases here. */
2751 if (class == 'e' && code == TRUTH_NOT_EXPR)
2753 else if (class == 'e'
2754 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2760 return fold (build1 (code, type,
2761 eval_subst (TREE_OPERAND (arg, 0),
2762 old0, new0, old1, new1)));
2765 return fold (build2 (code, type,
2766 eval_subst (TREE_OPERAND (arg, 0),
2767 old0, new0, old1, new1),
2768 eval_subst (TREE_OPERAND (arg, 1),
2769 old0, new0, old1, new1)));
2775 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
2778 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
2781 return fold (build3 (code, type,
2782 eval_subst (TREE_OPERAND (arg, 0),
2783 old0, new0, old1, new1),
2784 eval_subst (TREE_OPERAND (arg, 1),
2785 old0, new0, old1, new1),
2786 eval_subst (TREE_OPERAND (arg, 2),
2787 old0, new0, old1, new1)));
2791 /* Fall through - ??? */
2795 tree arg0 = TREE_OPERAND (arg, 0);
2796 tree arg1 = TREE_OPERAND (arg, 1);
2798 /* We need to check both for exact equality and tree equality. The
2799 former will be true if the operand has a side-effect. In that
2800 case, we know the operand occurred exactly once. */
2802 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
2804 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
2807 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2809 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2812 return fold (build2 (code, type, arg0, arg1));
2820 /* Return a tree for the case when the result of an expression is RESULT
2821 converted to TYPE and OMITTED was previously an operand of the expression
2822 but is now not needed (e.g., we folded OMITTED * 0).
2824 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2825 the conversion of RESULT to TYPE. */
2828 omit_one_operand (tree type, tree result, tree omitted)
2830 tree t = fold_convert (type, result);
2832 if (TREE_SIDE_EFFECTS (omitted))
2833 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
2835 return non_lvalue (t);
2838 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
2841 pedantic_omit_one_operand (tree type, tree result, tree omitted)
2843 tree t = fold_convert (type, result);
2845 if (TREE_SIDE_EFFECTS (omitted))
2846 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
2848 return pedantic_non_lvalue (t);
2851 /* Return a tree for the case when the result of an expression is RESULT
2852 converted to TYPE and OMITTED1 and OMITTED2 were previously operands
2853 of the expression but are now not needed.
2855 If OMITTED1 or OMITTED2 has side effects, they must be evaluated.
2856 If both OMITTED1 and OMITTED2 have side effects, OMITTED1 is
2857 evaluated before OMITTED2. Otherwise, if neither has side effects,
2858 just do the conversion of RESULT to TYPE. */
2861 omit_two_operands (tree type, tree result, tree omitted1, tree omitted2)
2863 tree t = fold_convert (type, result);
2865 if (TREE_SIDE_EFFECTS (omitted2))
2866 t = build2 (COMPOUND_EXPR, type, omitted2, t);
2867 if (TREE_SIDE_EFFECTS (omitted1))
2868 t = build2 (COMPOUND_EXPR, type, omitted1, t);
2870 return TREE_CODE (t) != COMPOUND_EXPR ? non_lvalue (t) : t;
2874 /* Return a simplified tree node for the truth-negation of ARG. This
2875 never alters ARG itself. We assume that ARG is an operation that
2876 returns a truth value (0 or 1).
2878 FIXME: one would think we would fold the result, but it causes
2879 problems with the dominator optimizer. */
2881 invert_truthvalue (tree arg)
2883 tree type = TREE_TYPE (arg);
2884 enum tree_code code = TREE_CODE (arg);
2886 if (code == ERROR_MARK)
2889 /* If this is a comparison, we can simply invert it, except for
2890 floating-point non-equality comparisons, in which case we just
2891 enclose a TRUTH_NOT_EXPR around what we have. */
2893 if (TREE_CODE_CLASS (code) == '<')
2895 tree op_type = TREE_TYPE (TREE_OPERAND (arg, 0));
2896 if (FLOAT_TYPE_P (op_type)
2897 && flag_trapping_math
2898 && code != ORDERED_EXPR && code != UNORDERED_EXPR
2899 && code != NE_EXPR && code != EQ_EXPR)
2900 return build1 (TRUTH_NOT_EXPR, type, arg);
2903 code = invert_tree_comparison (code,
2904 HONOR_NANS (TYPE_MODE (op_type)));
2905 if (code == ERROR_MARK)
2906 return build1 (TRUTH_NOT_EXPR, type, arg);
2908 return build2 (code, type,
2909 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2916 return fold_convert (type,
2917 build_int_cst (NULL_TREE, integer_zerop (arg)));
2919 case TRUTH_AND_EXPR:
2920 return build2 (TRUTH_OR_EXPR, type,
2921 invert_truthvalue (TREE_OPERAND (arg, 0)),
2922 invert_truthvalue (TREE_OPERAND (arg, 1)));
2925 return build2 (TRUTH_AND_EXPR, type,
2926 invert_truthvalue (TREE_OPERAND (arg, 0)),
2927 invert_truthvalue (TREE_OPERAND (arg, 1)));
2929 case TRUTH_XOR_EXPR:
2930 /* Here we can invert either operand. We invert the first operand
2931 unless the second operand is a TRUTH_NOT_EXPR in which case our
2932 result is the XOR of the first operand with the inside of the
2933 negation of the second operand. */
2935 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2936 return build2 (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2937 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2939 return build2 (TRUTH_XOR_EXPR, type,
2940 invert_truthvalue (TREE_OPERAND (arg, 0)),
2941 TREE_OPERAND (arg, 1));
2943 case TRUTH_ANDIF_EXPR:
2944 return build2 (TRUTH_ORIF_EXPR, type,
2945 invert_truthvalue (TREE_OPERAND (arg, 0)),
2946 invert_truthvalue (TREE_OPERAND (arg, 1)));
2948 case TRUTH_ORIF_EXPR:
2949 return build2 (TRUTH_ANDIF_EXPR, type,
2950 invert_truthvalue (TREE_OPERAND (arg, 0)),
2951 invert_truthvalue (TREE_OPERAND (arg, 1)));
2953 case TRUTH_NOT_EXPR:
2954 return TREE_OPERAND (arg, 0);
2957 return build3 (COND_EXPR, type, TREE_OPERAND (arg, 0),
2958 invert_truthvalue (TREE_OPERAND (arg, 1)),
2959 invert_truthvalue (TREE_OPERAND (arg, 2)));
2962 return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2963 invert_truthvalue (TREE_OPERAND (arg, 1)));
2965 case NON_LVALUE_EXPR:
2966 return invert_truthvalue (TREE_OPERAND (arg, 0));
2969 if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
2974 return build1 (TREE_CODE (arg), type,
2975 invert_truthvalue (TREE_OPERAND (arg, 0)));
2978 if (!integer_onep (TREE_OPERAND (arg, 1)))
2980 return build2 (EQ_EXPR, type, arg,
2981 fold_convert (type, integer_zero_node));
2984 return build1 (TRUTH_NOT_EXPR, type, arg);
2986 case CLEANUP_POINT_EXPR:
2987 return build1 (CLEANUP_POINT_EXPR, type,
2988 invert_truthvalue (TREE_OPERAND (arg, 0)));
2993 gcc_assert (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE);
2994 return build1 (TRUTH_NOT_EXPR, type, arg);
2997 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2998 operands are another bit-wise operation with a common input. If so,
2999 distribute the bit operations to save an operation and possibly two if
3000 constants are involved. For example, convert
3001 (A | B) & (A | C) into A | (B & C)
3002 Further simplification will occur if B and C are constants.
3004 If this optimization cannot be done, 0 will be returned. */
3007 distribute_bit_expr (enum tree_code code, tree type, tree arg0, tree arg1)
3012 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3013 || TREE_CODE (arg0) == code
3014 || (TREE_CODE (arg0) != BIT_AND_EXPR
3015 && TREE_CODE (arg0) != BIT_IOR_EXPR))
3018 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
3020 common = TREE_OPERAND (arg0, 0);
3021 left = TREE_OPERAND (arg0, 1);
3022 right = TREE_OPERAND (arg1, 1);
3024 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
3026 common = TREE_OPERAND (arg0, 0);
3027 left = TREE_OPERAND (arg0, 1);
3028 right = TREE_OPERAND (arg1, 0);
3030 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
3032 common = TREE_OPERAND (arg0, 1);
3033 left = TREE_OPERAND (arg0, 0);
3034 right = TREE_OPERAND (arg1, 1);
3036 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
3038 common = TREE_OPERAND (arg0, 1);
3039 left = TREE_OPERAND (arg0, 0);
3040 right = TREE_OPERAND (arg1, 0);
3045 return fold (build2 (TREE_CODE (arg0), type, common,
3046 fold (build2 (code, type, left, right))));
3049 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
3050 starting at BITPOS. The field is unsigned if UNSIGNEDP is nonzero. */
3053 make_bit_field_ref (tree inner, tree type, int bitsize, int bitpos,
3056 tree result = build3 (BIT_FIELD_REF, type, inner,
3057 size_int (bitsize), bitsize_int (bitpos));
3059 BIT_FIELD_REF_UNSIGNED (result) = unsignedp;
3064 /* Optimize a bit-field compare.
3066 There are two cases: First is a compare against a constant and the
3067 second is a comparison of two items where the fields are at the same
3068 bit position relative to the start of a chunk (byte, halfword, word)
3069 large enough to contain it. In these cases we can avoid the shift
3070 implicit in bitfield extractions.
3072 For constants, we emit a compare of the shifted constant with the
3073 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
3074 compared. For two fields at the same position, we do the ANDs with the
3075 similar mask and compare the result of the ANDs.
3077 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
3078 COMPARE_TYPE is the type of the comparison, and LHS and RHS
3079 are the left and right operands of the comparison, respectively.
3081 If the optimization described above can be done, we return the resulting
3082 tree. Otherwise we return zero. */
3085 optimize_bit_field_compare (enum tree_code code, tree compare_type,
3088 HOST_WIDE_INT lbitpos, lbitsize, rbitpos, rbitsize, nbitpos, nbitsize;
3089 tree type = TREE_TYPE (lhs);
3090 tree signed_type, unsigned_type;
3091 int const_p = TREE_CODE (rhs) == INTEGER_CST;
3092 enum machine_mode lmode, rmode, nmode;
3093 int lunsignedp, runsignedp;
3094 int lvolatilep = 0, rvolatilep = 0;
3095 tree linner, rinner = NULL_TREE;
3099 /* Get all the information about the extractions being done. If the bit size
3100 if the same as the size of the underlying object, we aren't doing an
3101 extraction at all and so can do nothing. We also don't want to
3102 do anything if the inner expression is a PLACEHOLDER_EXPR since we
3103 then will no longer be able to replace it. */
3104 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
3105 &lunsignedp, &lvolatilep);
3106 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
3107 || offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR)
3112 /* If this is not a constant, we can only do something if bit positions,
3113 sizes, and signedness are the same. */
3114 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
3115 &runsignedp, &rvolatilep);
3117 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
3118 || lunsignedp != runsignedp || offset != 0
3119 || TREE_CODE (rinner) == PLACEHOLDER_EXPR)
3123 /* See if we can find a mode to refer to this field. We should be able to,
3124 but fail if we can't. */
3125 nmode = get_best_mode (lbitsize, lbitpos,
3126 const_p ? TYPE_ALIGN (TREE_TYPE (linner))
3127 : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
3128 TYPE_ALIGN (TREE_TYPE (rinner))),
3129 word_mode, lvolatilep || rvolatilep);
3130 if (nmode == VOIDmode)
3133 /* Set signed and unsigned types of the precision of this mode for the
3135 signed_type = lang_hooks.types.type_for_mode (nmode, 0);
3136 unsigned_type = lang_hooks.types.type_for_mode (nmode, 1);
3138 /* Compute the bit position and size for the new reference and our offset
3139 within it. If the new reference is the same size as the original, we
3140 won't optimize anything, so return zero. */
3141 nbitsize = GET_MODE_BITSIZE (nmode);
3142 nbitpos = lbitpos & ~ (nbitsize - 1);
3144 if (nbitsize == lbitsize)
3147 if (BYTES_BIG_ENDIAN)
3148 lbitpos = nbitsize - lbitsize - lbitpos;
3150 /* Make the mask to be used against the extracted field. */
3151 mask = build_int_cst (unsigned_type, -1);
3152 mask = force_fit_type (mask, 0, false, false);
3153 mask = fold_convert (unsigned_type, mask);
3154 mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize), 0);
3155 mask = const_binop (RSHIFT_EXPR, mask,
3156 size_int (nbitsize - lbitsize - lbitpos), 0);
3159 /* If not comparing with constant, just rework the comparison
3161 return build2 (code, compare_type,
3162 build2 (BIT_AND_EXPR, unsigned_type,
3163 make_bit_field_ref (linner, unsigned_type,
3164 nbitsize, nbitpos, 1),
3166 build2 (BIT_AND_EXPR, unsigned_type,
3167 make_bit_field_ref (rinner, unsigned_type,
3168 nbitsize, nbitpos, 1),
3171 /* Otherwise, we are handling the constant case. See if the constant is too
3172 big for the field. Warn and return a tree of for 0 (false) if so. We do
3173 this not only for its own sake, but to avoid having to test for this
3174 error case below. If we didn't, we might generate wrong code.
3176 For unsigned fields, the constant shifted right by the field length should
3177 be all zero. For signed fields, the high-order bits should agree with
3182 if (! integer_zerop (const_binop (RSHIFT_EXPR,
3183 fold_convert (unsigned_type, rhs),
3184 size_int (lbitsize), 0)))
3186 warning ("comparison is always %d due to width of bit-field",
3188 return constant_boolean_node (code == NE_EXPR, compare_type);
3193 tree tem = const_binop (RSHIFT_EXPR, fold_convert (signed_type, rhs),
3194 size_int (lbitsize - 1), 0);
3195 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
3197 warning ("comparison is always %d due to width of bit-field",
3199 return constant_boolean_node (code == NE_EXPR, compare_type);
3203 /* Single-bit compares should always be against zero. */
3204 if (lbitsize == 1 && ! integer_zerop (rhs))
3206 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
3207 rhs = fold_convert (type, integer_zero_node);
3210 /* Make a new bitfield reference, shift the constant over the
3211 appropriate number of bits and mask it with the computed mask
3212 (in case this was a signed field). If we changed it, make a new one. */
3213 lhs = make_bit_field_ref (linner, unsigned_type, nbitsize, nbitpos, 1);
3216 TREE_SIDE_EFFECTS (lhs) = 1;
3217 TREE_THIS_VOLATILE (lhs) = 1;
3220 rhs = fold (const_binop (BIT_AND_EXPR,
3221 const_binop (LSHIFT_EXPR,
3222 fold_convert (unsigned_type, rhs),
3223 size_int (lbitpos), 0),
3226 return build2 (code, compare_type,
3227 build2 (BIT_AND_EXPR, unsigned_type, lhs, mask),
3231 /* Subroutine for fold_truthop: decode a field reference.
3233 If EXP is a comparison reference, we return the innermost reference.
3235 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
3236 set to the starting bit number.
3238 If the innermost field can be completely contained in a mode-sized
3239 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
3241 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
3242 otherwise it is not changed.
3244 *PUNSIGNEDP is set to the signedness of the field.
3246 *PMASK is set to the mask used. This is either contained in a
3247 BIT_AND_EXPR or derived from the width of the field.
3249 *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.
3251 Return 0 if this is not a component reference or is one that we can't
3252 do anything with. */
3255 decode_field_reference (tree exp, HOST_WIDE_INT *pbitsize,
3256 HOST_WIDE_INT *pbitpos, enum machine_mode *pmode,
3257 int *punsignedp, int *pvolatilep,
3258 tree *pmask, tree *pand_mask)
3260 tree outer_type = 0;
3262 tree mask, inner, offset;
3264 unsigned int precision;
3266 /* All the optimizations using this function assume integer fields.
3267 There are problems with FP fields since the type_for_size call
3268 below can fail for, e.g., XFmode. */
3269 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
3272 /* We are interested in the bare arrangement of bits, so strip everything
3273 that doesn't affect the machine mode. However, record the type of the
3274 outermost expression if it may matter below. */
3275 if (TREE_CODE (exp) == NOP_EXPR
3276 || TREE_CODE (exp) == CONVERT_EXPR
3277 || TREE_CODE (exp) == NON_LVALUE_EXPR)
3278 outer_type = TREE_TYPE (exp);
3281 if (TREE_CODE (exp) == BIT_AND_EXPR)
3283 and_mask = TREE_OPERAND (exp, 1);
3284 exp = TREE_OPERAND (exp, 0);
3285 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
3286 if (TREE_CODE (and_mask) != INTEGER_CST)
3290 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
3291 punsignedp, pvolatilep);
3292 if ((inner == exp && and_mask == 0)
3293 || *pbitsize < 0 || offset != 0
3294 || TREE_CODE (inner) == PLACEHOLDER_EXPR)
3297 /* If the number of bits in the reference is the same as the bitsize of
3298 the outer type, then the outer type gives the signedness. Otherwise
3299 (in case of a small bitfield) the signedness is unchanged. */
3300 if (outer_type && *pbitsize == TYPE_PRECISION (outer_type))
3301 *punsignedp = TYPE_UNSIGNED (outer_type);
3303 /* Compute the mask to access the bitfield. */
3304 unsigned_type = lang_hooks.types.type_for_size (*pbitsize, 1);
3305 precision = TYPE_PRECISION (unsigned_type);
3307 mask = build_int_cst (unsigned_type, -1);
3308 mask = force_fit_type (mask, 0, false, false);
3310 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
3311 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
3313 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
3315 mask = fold (build2 (BIT_AND_EXPR, unsigned_type,
3316 fold_convert (unsigned_type, and_mask), mask));
3319 *pand_mask = and_mask;
3323 /* Return nonzero if MASK represents a mask of SIZE ones in the low-order
3327 all_ones_mask_p (tree mask, int size)
3329 tree type = TREE_TYPE (mask);
3330 unsigned int precision = TYPE_PRECISION (type);
3333 tmask = build_int_cst (lang_hooks.types.signed_type (type), -1);
3334 tmask = force_fit_type (tmask, 0, false, false);
3337 tree_int_cst_equal (mask,
3338 const_binop (RSHIFT_EXPR,
3339 const_binop (LSHIFT_EXPR, tmask,
3340 size_int (precision - size),
3342 size_int (precision - size), 0));
3345 /* Subroutine for fold: determine if VAL is the INTEGER_CONST that
3346 represents the sign bit of EXP's type. If EXP represents a sign
3347 or zero extension, also test VAL against the unextended type.
3348 The return value is the (sub)expression whose sign bit is VAL,
3349 or NULL_TREE otherwise. */
3352 sign_bit_p (tree exp, tree val)
3354 unsigned HOST_WIDE_INT mask_lo, lo;
3355 HOST_WIDE_INT mask_hi, hi;
3359 /* Tree EXP must have an integral type. */
3360 t = TREE_TYPE (exp);
3361 if (! INTEGRAL_TYPE_P (t))
3364 /* Tree VAL must be an integer constant. */
3365 if (TREE_CODE (val) != INTEGER_CST
3366 || TREE_CONSTANT_OVERFLOW (val))
3369 width = TYPE_PRECISION (t);
3370 if (width > HOST_BITS_PER_WIDE_INT)
3372 hi = (unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT - 1);
3375 mask_hi = ((unsigned HOST_WIDE_INT) -1
3376 >> (2 * HOST_BITS_PER_WIDE_INT - width));
3382 lo = (unsigned HOST_WIDE_INT) 1 << (width - 1);
3385 mask_lo = ((unsigned HOST_WIDE_INT) -1
3386 >> (HOST_BITS_PER_WIDE_INT - width));
3389 /* We mask off those bits beyond TREE_TYPE (exp) so that we can
3390 treat VAL as if it were unsigned. */
3391 if ((TREE_INT_CST_HIGH (val) & mask_hi) == hi
3392 && (TREE_INT_CST_LOW (val) & mask_lo) == lo)
3395 /* Handle extension from a narrower type. */
3396 if (TREE_CODE (exp) == NOP_EXPR
3397 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0))) < width)
3398 return sign_bit_p (TREE_OPERAND (exp, 0), val);
3403 /* Subroutine for fold_truthop: determine if an operand is simple enough
3404 to be evaluated unconditionally. */
3407 simple_operand_p (tree exp)
3409 /* Strip any conversions that don't change the machine mode. */
3410 while ((TREE_CODE (exp) == NOP_EXPR
3411 || TREE_CODE (exp) == CONVERT_EXPR)
3412 && (TYPE_MODE (TREE_TYPE (exp))
3413 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
3414 exp = TREE_OPERAND (exp, 0);
3416 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
3418 && ! TREE_ADDRESSABLE (exp)
3419 && ! TREE_THIS_VOLATILE (exp)
3420 && ! DECL_NONLOCAL (exp)
3421 /* Don't regard global variables as simple. They may be
3422 allocated in ways unknown to the compiler (shared memory,
3423 #pragma weak, etc). */
3424 && ! TREE_PUBLIC (exp)
3425 && ! DECL_EXTERNAL (exp)
3426 /* Loading a static variable is unduly expensive, but global
3427 registers aren't expensive. */
3428 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
3431 /* The following functions are subroutines to fold_range_test and allow it to
3432 try to change a logical combination of comparisons into a range test.
3435 X == 2 || X == 3 || X == 4 || X == 5
3439 (unsigned) (X - 2) <= 3
3441 We describe each set of comparisons as being either inside or outside
3442 a range, using a variable named like IN_P, and then describe the
3443 range with a lower and upper bound. If one of the bounds is omitted,