1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
30 /* The entry points in this file are fold, size_int_wide, size_binop
31 and force_fit_type_double.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'.
42 force_fit_type_double takes a constant, an overflowable flag and a
43 prior overflow indicator. It forces the value to fit the type and
46 Note: Since the folders get called on non-gimple code as well as
47 gimple code, we need to handle GIMPLE tuples as well as their
48 corresponding tree equivalents. */
52 #include "coretypes.h"
57 #include "fixed-value.h"
66 #include "langhooks.h"
69 /* Nonzero if we are folding constants inside an initializer; zero
71 int folding_initializer = 0;
73 /* The following constants represent a bit based encoding of GCC's
74 comparison operators. This encoding simplifies transformations
75 on relational comparison operators, such as AND and OR. */
76 enum comparison_code {
95 static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
96 static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
97 static bool negate_mathfn_p (enum built_in_function);
98 static bool negate_expr_p (tree);
99 static tree negate_expr (tree);
100 static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
101 static tree associate_trees (tree, tree, enum tree_code, tree);
102 static tree const_binop (enum tree_code, tree, tree, int);
103 static enum comparison_code comparison_to_compcode (enum tree_code);
104 static enum tree_code compcode_to_comparison (enum comparison_code);
105 static tree combine_comparisons (enum tree_code, enum tree_code,
106 enum tree_code, tree, tree, tree);
107 static int truth_value_p (enum tree_code);
108 static int operand_equal_for_comparison_p (tree, tree, tree);
109 static int twoval_comparison_p (tree, tree *, tree *, int *);
110 static tree eval_subst (tree, tree, tree, tree, tree);
111 static tree pedantic_omit_one_operand (tree, tree, tree);
112 static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
113 static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
114 enum machine_mode *, int *, int *,
116 static tree sign_bit_p (tree, const_tree);
117 static int simple_operand_p (const_tree);
118 static tree range_binop (enum tree_code, tree, tree, int, tree, int);
119 static tree range_predecessor (tree);
120 static tree range_successor (tree);
121 static tree make_range (tree, int *, tree *, tree *, bool *);
122 static tree build_range_check (tree, tree, int, tree, tree);
123 static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree,
125 static tree fold_range_test (enum tree_code, tree, tree, tree);
126 static tree fold_cond_expr_with_comparison (tree, tree, tree, tree);
127 static tree unextend (tree, int, int, tree);
128 static tree fold_truthop (enum tree_code, tree, tree, tree);
129 static tree optimize_minmax_comparison (enum tree_code, tree, tree, tree);
130 static tree extract_muldiv (tree, tree, enum tree_code, tree, bool *);
131 static tree extract_muldiv_1 (tree, tree, enum tree_code, tree, bool *);
132 static tree fold_binary_op_with_conditional_arg (enum tree_code, tree,
135 static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
137 static tree fold_inf_compare (enum tree_code, tree, tree, tree);
138 static tree fold_div_compare (enum tree_code, tree, tree, tree);
139 static bool reorder_operands_p (const_tree, const_tree);
140 static tree fold_negate_const (tree, tree);
141 static tree fold_not_const (tree, tree);
142 static tree fold_relational_const (enum tree_code, tree, tree, tree);
145 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
146 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
147 and SUM1. Then this yields nonzero if overflow occurred during the
150 Overflow occurs if A and B have the same sign, but A and SUM differ in
151 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
153 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
155 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
156 We do that by representing the two-word integer in 4 words, with only
157 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
158 number. The value of the word is LOWPART + HIGHPART * BASE. */
161 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
162 #define HIGHPART(x) \
163 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
164 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
166 /* Unpack a two-word integer into 4 words.
167 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
168 WORDS points to the array of HOST_WIDE_INTs. */
171 encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
173 words[0] = LOWPART (low);
174 words[1] = HIGHPART (low);
175 words[2] = LOWPART (hi);
176 words[3] = HIGHPART (hi);
179 /* Pack an array of 4 words into a two-word integer.
180 WORDS points to the array of words.
181 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
184 decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
187 *low = words[0] + words[1] * BASE;
188 *hi = words[2] + words[3] * BASE;
191 /* Force the double-word integer L1, H1 to be within the range of the
192 integer type TYPE. Stores the properly truncated and sign-extended
193 double-word integer in *LV, *HV. Returns true if the operation
194 overflows, that is, argument and result are different. */
197 fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
198 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, const_tree type)
200 unsigned HOST_WIDE_INT low0 = l1;
201 HOST_WIDE_INT high0 = h1;
203 int sign_extended_type;
205 if (POINTER_TYPE_P (type)
206 || TREE_CODE (type) == OFFSET_TYPE)
209 prec = TYPE_PRECISION (type);
211 /* Size types *are* sign extended. */
212 sign_extended_type = (!TYPE_UNSIGNED (type)
213 || (TREE_CODE (type) == INTEGER_TYPE
214 && TYPE_IS_SIZETYPE (type)));
216 /* First clear all bits that are beyond the type's precision. */
217 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
219 else if (prec > HOST_BITS_PER_WIDE_INT)
220 h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
224 if (prec < HOST_BITS_PER_WIDE_INT)
225 l1 &= ~((HOST_WIDE_INT) (-1) << prec);
228 /* Then do sign extension if necessary. */
229 if (!sign_extended_type)
230 /* No sign extension */;
231 else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
232 /* Correct width already. */;
233 else if (prec > HOST_BITS_PER_WIDE_INT)
235 /* Sign extend top half? */
236 if (h1 & ((unsigned HOST_WIDE_INT)1
237 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
238 h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
240 else if (prec == HOST_BITS_PER_WIDE_INT)
242 if ((HOST_WIDE_INT)l1 < 0)
247 /* Sign extend bottom half? */
248 if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
251 l1 |= (HOST_WIDE_INT)(-1) << prec;
258 /* If the value didn't fit, signal overflow. */
259 return l1 != low0 || h1 != high0;
262 /* We force the double-int HIGH:LOW to the range of the type TYPE by
263 sign or zero extending it.
264 OVERFLOWABLE indicates if we are interested
265 in overflow of the value, when >0 we are only interested in signed
266 overflow, for <0 we are interested in any overflow. OVERFLOWED
267 indicates whether overflow has already occurred. CONST_OVERFLOWED
268 indicates whether constant overflow has already occurred. We force
269 T's value to be within range of T's type (by setting to 0 or 1 all
270 the bits outside the type's range). We set TREE_OVERFLOWED if,
271 OVERFLOWED is nonzero,
272 or OVERFLOWABLE is >0 and signed overflow occurs
273 or OVERFLOWABLE is <0 and any overflow occurs
274 We return a new tree node for the extended double-int. The node
275 is shared if no overflow flags are set. */
278 force_fit_type_double (tree type, unsigned HOST_WIDE_INT low,
279 HOST_WIDE_INT high, int overflowable,
282 int sign_extended_type;
285 /* Size types *are* sign extended. */
286 sign_extended_type = (!TYPE_UNSIGNED (type)
287 || (TREE_CODE (type) == INTEGER_TYPE
288 && TYPE_IS_SIZETYPE (type)));
290 overflow = fit_double_type (low, high, &low, &high, type);
292 /* If we need to set overflow flags, return a new unshared node. */
293 if (overflowed || overflow)
297 || (overflowable > 0 && sign_extended_type))
299 tree t = make_node (INTEGER_CST);
300 TREE_INT_CST_LOW (t) = low;
301 TREE_INT_CST_HIGH (t) = high;
302 TREE_TYPE (t) = type;
303 TREE_OVERFLOW (t) = 1;
308 /* Else build a shared node. */
309 return build_int_cst_wide (type, low, high);
312 /* Add two doubleword integers with doubleword result.
313 Return nonzero if the operation overflows according to UNSIGNED_P.
314 Each argument is given as two `HOST_WIDE_INT' pieces.
315 One argument is L1 and H1; the other, L2 and H2.
316 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
319 add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
320 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
321 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
324 unsigned HOST_WIDE_INT l;
328 h = h1 + h2 + (l < l1);
334 return (unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1;
336 return OVERFLOW_SUM_SIGN (h1, h2, h);
339 /* Negate a doubleword integer with doubleword result.
340 Return nonzero if the operation overflows, assuming it's signed.
341 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
342 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
345 neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
346 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
352 return (*hv & h1) < 0;
362 /* Multiply two doubleword integers with doubleword result.
363 Return nonzero if the operation overflows according to UNSIGNED_P.
364 Each argument is given as two `HOST_WIDE_INT' pieces.
365 One argument is L1 and H1; the other, L2 and H2.
366 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
369 mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
370 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
371 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
374 HOST_WIDE_INT arg1[4];
375 HOST_WIDE_INT arg2[4];
376 HOST_WIDE_INT prod[4 * 2];
377 unsigned HOST_WIDE_INT carry;
379 unsigned HOST_WIDE_INT toplow, neglow;
380 HOST_WIDE_INT tophigh, neghigh;
382 encode (arg1, l1, h1);
383 encode (arg2, l2, h2);
385 memset (prod, 0, sizeof prod);
387 for (i = 0; i < 4; i++)
390 for (j = 0; j < 4; j++)
393 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
394 carry += arg1[i] * arg2[j];
395 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
397 prod[k] = LOWPART (carry);
398 carry = HIGHPART (carry);
403 decode (prod, lv, hv);
404 decode (prod + 4, &toplow, &tophigh);
406 /* Unsigned overflow is immediate. */
408 return (toplow | tophigh) != 0;
410 /* Check for signed overflow by calculating the signed representation of the
411 top half of the result; it should agree with the low half's sign bit. */
414 neg_double (l2, h2, &neglow, &neghigh);
415 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
419 neg_double (l1, h1, &neglow, &neghigh);
420 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
422 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
425 /* Shift the doubleword integer in L1, H1 left by COUNT places
426 keeping only PREC bits of result.
427 Shift right if COUNT is negative.
428 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
429 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
432 lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
433 HOST_WIDE_INT count, unsigned int prec,
434 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
436 unsigned HOST_WIDE_INT signmask;
440 rshift_double (l1, h1, -count, prec, lv, hv, arith);
444 if (SHIFT_COUNT_TRUNCATED)
447 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
449 /* Shifting by the host word size is undefined according to the
450 ANSI standard, so we must handle this as a special case. */
454 else if (count >= HOST_BITS_PER_WIDE_INT)
456 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
461 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
462 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
466 /* Sign extend all bits that are beyond the precision. */
468 signmask = -((prec > HOST_BITS_PER_WIDE_INT
469 ? ((unsigned HOST_WIDE_INT) *hv
470 >> (prec - HOST_BITS_PER_WIDE_INT - 1))
471 : (*lv >> (prec - 1))) & 1);
473 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
475 else if (prec >= HOST_BITS_PER_WIDE_INT)
477 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
478 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
483 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
484 *lv |= signmask << prec;
488 /* Shift the doubleword integer in L1, H1 right by COUNT places
489 keeping only PREC bits of result. COUNT must be positive.
490 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
491 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
494 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
495 HOST_WIDE_INT count, unsigned int prec,
496 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
499 unsigned HOST_WIDE_INT signmask;
502 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
505 if (SHIFT_COUNT_TRUNCATED)
508 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
510 /* Shifting by the host word size is undefined according to the
511 ANSI standard, so we must handle this as a special case. */
515 else if (count >= HOST_BITS_PER_WIDE_INT)
518 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
522 *hv = (unsigned HOST_WIDE_INT) h1 >> count;
524 | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
527 /* Zero / sign extend all bits that are beyond the precision. */
529 if (count >= (HOST_WIDE_INT)prec)
534 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
536 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
538 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
539 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
544 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
545 *lv |= signmask << (prec - count);
549 /* Rotate the doubleword integer in L1, H1 left by COUNT places
550 keeping only PREC bits of result.
551 Rotate right if COUNT is negative.
552 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
555 lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
556 HOST_WIDE_INT count, unsigned int prec,
557 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
559 unsigned HOST_WIDE_INT s1l, s2l;
560 HOST_WIDE_INT s1h, s2h;
566 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
567 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
572 /* Rotate the doubleword integer in L1, H1 left by COUNT places
573 keeping only PREC bits of result. COUNT must be positive.
574 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
577 rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
578 HOST_WIDE_INT count, unsigned int prec,
579 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
581 unsigned HOST_WIDE_INT s1l, s2l;
582 HOST_WIDE_INT s1h, s2h;
588 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
589 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
594 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
595 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
596 CODE is a tree code for a kind of division, one of
597 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
599 It controls how the quotient is rounded to an integer.
600 Return nonzero if the operation overflows.
601 UNS nonzero says do unsigned division. */
604 div_and_round_double (enum tree_code code, int uns,
605 unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
606 HOST_WIDE_INT hnum_orig,
607 unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
608 HOST_WIDE_INT hden_orig,
609 unsigned HOST_WIDE_INT *lquo,
610 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
614 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
615 HOST_WIDE_INT den[4], quo[4];
617 unsigned HOST_WIDE_INT work;
618 unsigned HOST_WIDE_INT carry = 0;
619 unsigned HOST_WIDE_INT lnum = lnum_orig;
620 HOST_WIDE_INT hnum = hnum_orig;
621 unsigned HOST_WIDE_INT lden = lden_orig;
622 HOST_WIDE_INT hden = hden_orig;
625 if (hden == 0 && lden == 0)
626 overflow = 1, lden = 1;
628 /* Calculate quotient sign and convert operands to unsigned. */
634 /* (minimum integer) / (-1) is the only overflow case. */
635 if (neg_double (lnum, hnum, &lnum, &hnum)
636 && ((HOST_WIDE_INT) lden & hden) == -1)
642 neg_double (lden, hden, &lden, &hden);
646 if (hnum == 0 && hden == 0)
647 { /* single precision */
649 /* This unsigned division rounds toward zero. */
655 { /* trivial case: dividend < divisor */
656 /* hden != 0 already checked. */
663 memset (quo, 0, sizeof quo);
665 memset (num, 0, sizeof num); /* to zero 9th element */
666 memset (den, 0, sizeof den);
668 encode (num, lnum, hnum);
669 encode (den, lden, hden);
671 /* Special code for when the divisor < BASE. */
672 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
674 /* hnum != 0 already checked. */
675 for (i = 4 - 1; i >= 0; i--)
677 work = num[i] + carry * BASE;
678 quo[i] = work / lden;
684 /* Full double precision division,
685 with thanks to Don Knuth's "Seminumerical Algorithms". */
686 int num_hi_sig, den_hi_sig;
687 unsigned HOST_WIDE_INT quo_est, scale;
689 /* Find the highest nonzero divisor digit. */
690 for (i = 4 - 1;; i--)
697 /* Insure that the first digit of the divisor is at least BASE/2.
698 This is required by the quotient digit estimation algorithm. */
700 scale = BASE / (den[den_hi_sig] + 1);
702 { /* scale divisor and dividend */
704 for (i = 0; i <= 4 - 1; i++)
706 work = (num[i] * scale) + carry;
707 num[i] = LOWPART (work);
708 carry = HIGHPART (work);
713 for (i = 0; i <= 4 - 1; i++)
715 work = (den[i] * scale) + carry;
716 den[i] = LOWPART (work);
717 carry = HIGHPART (work);
718 if (den[i] != 0) den_hi_sig = i;
725 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
727 /* Guess the next quotient digit, quo_est, by dividing the first
728 two remaining dividend digits by the high order quotient digit.
729 quo_est is never low and is at most 2 high. */
730 unsigned HOST_WIDE_INT tmp;
732 num_hi_sig = i + den_hi_sig + 1;
733 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
734 if (num[num_hi_sig] != den[den_hi_sig])
735 quo_est = work / den[den_hi_sig];
739 /* Refine quo_est so it's usually correct, and at most one high. */
740 tmp = work - quo_est * den[den_hi_sig];
742 && (den[den_hi_sig - 1] * quo_est
743 > (tmp * BASE + num[num_hi_sig - 2])))
746 /* Try QUO_EST as the quotient digit, by multiplying the
747 divisor by QUO_EST and subtracting from the remaining dividend.
748 Keep in mind that QUO_EST is the I - 1st digit. */
751 for (j = 0; j <= den_hi_sig; j++)
753 work = quo_est * den[j] + carry;
754 carry = HIGHPART (work);
755 work = num[i + j] - LOWPART (work);
756 num[i + j] = LOWPART (work);
757 carry += HIGHPART (work) != 0;
760 /* If quo_est was high by one, then num[i] went negative and
761 we need to correct things. */
762 if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
765 carry = 0; /* add divisor back in */
766 for (j = 0; j <= den_hi_sig; j++)
768 work = num[i + j] + den[j] + carry;
769 carry = HIGHPART (work);
770 num[i + j] = LOWPART (work);
773 num [num_hi_sig] += carry;
776 /* Store the quotient digit. */
781 decode (quo, lquo, hquo);
784 /* If result is negative, make it so. */
786 neg_double (*lquo, *hquo, lquo, hquo);
788 /* Compute trial remainder: rem = num - (quo * den) */
789 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
790 neg_double (*lrem, *hrem, lrem, hrem);
791 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
796 case TRUNC_MOD_EXPR: /* round toward zero */
797 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
801 case FLOOR_MOD_EXPR: /* round toward negative infinity */
802 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
805 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
813 case CEIL_MOD_EXPR: /* round toward positive infinity */
814 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
816 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
824 case ROUND_MOD_EXPR: /* round to closest integer */
826 unsigned HOST_WIDE_INT labs_rem = *lrem;
827 HOST_WIDE_INT habs_rem = *hrem;
828 unsigned HOST_WIDE_INT labs_den = lden, ltwice;
829 HOST_WIDE_INT habs_den = hden, htwice;
831 /* Get absolute values. */
833 neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
835 neg_double (lden, hden, &labs_den, &habs_den);
837 /* If (2 * abs (lrem) >= abs (lden)) */
838 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
839 labs_rem, habs_rem, <wice, &htwice);
841 if (((unsigned HOST_WIDE_INT) habs_den
842 < (unsigned HOST_WIDE_INT) htwice)
843 || (((unsigned HOST_WIDE_INT) habs_den
844 == (unsigned HOST_WIDE_INT) htwice)
845 && (labs_den < ltwice)))
849 add_double (*lquo, *hquo,
850 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
853 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
865 /* Compute true remainder: rem = num - (quo * den) */
866 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
867 neg_double (*lrem, *hrem, lrem, hrem);
868 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
872 /* If ARG2 divides ARG1 with zero remainder, carries out the division
873 of type CODE and returns the quotient.
874 Otherwise returns NULL_TREE. */
877 div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
879 unsigned HOST_WIDE_INT int1l, int2l;
880 HOST_WIDE_INT int1h, int2h;
881 unsigned HOST_WIDE_INT quol, reml;
882 HOST_WIDE_INT quoh, remh;
883 tree type = TREE_TYPE (arg1);
884 int uns = TYPE_UNSIGNED (type);
886 int1l = TREE_INT_CST_LOW (arg1);
887 int1h = TREE_INT_CST_HIGH (arg1);
888 /* &obj[0] + -128 really should be compiled as &obj[-8] rather than
889 &obj[some_exotic_number]. */
890 if (POINTER_TYPE_P (type))
893 type = signed_type_for (type);
894 fit_double_type (int1l, int1h, &int1l, &int1h,
898 fit_double_type (int1l, int1h, &int1l, &int1h, type);
899 int2l = TREE_INT_CST_LOW (arg2);
900 int2h = TREE_INT_CST_HIGH (arg2);
902 div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
903 &quol, &quoh, &reml, &remh);
904 if (remh != 0 || reml != 0)
907 return build_int_cst_wide (type, quol, quoh);
910 /* This is nonzero if we should defer warnings about undefined
911 overflow. This facility exists because these warnings are a
912 special case. The code to estimate loop iterations does not want
913 to issue any warnings, since it works with expressions which do not
914 occur in user code. Various bits of cleanup code call fold(), but
915 only use the result if it has certain characteristics (e.g., is a
916 constant); that code only wants to issue a warning if the result is
919 static int fold_deferring_overflow_warnings;
921 /* If a warning about undefined overflow is deferred, this is the
922 warning. Note that this may cause us to turn two warnings into
923 one, but that is fine since it is sufficient to only give one
924 warning per expression. */
926 static const char* fold_deferred_overflow_warning;
928 /* If a warning about undefined overflow is deferred, this is the
929 level at which the warning should be emitted. */
931 static enum warn_strict_overflow_code fold_deferred_overflow_code;
933 /* Start deferring overflow warnings. We could use a stack here to
934 permit nested calls, but at present it is not necessary. */
937 fold_defer_overflow_warnings (void)
939 ++fold_deferring_overflow_warnings;
942 /* Stop deferring overflow warnings. If there is a pending warning,
943 and ISSUE is true, then issue the warning if appropriate. STMT is
944 the statement with which the warning should be associated (used for
945 location information); STMT may be NULL. CODE is the level of the
946 warning--a warn_strict_overflow_code value. This function will use
947 the smaller of CODE and the deferred code when deciding whether to
948 issue the warning. CODE may be zero to mean to always use the
952 fold_undefer_overflow_warnings (bool issue, const_tree stmt, int code)
957 gcc_assert (fold_deferring_overflow_warnings > 0);
958 --fold_deferring_overflow_warnings;
959 if (fold_deferring_overflow_warnings > 0)
961 if (fold_deferred_overflow_warning != NULL
963 && code < (int) fold_deferred_overflow_code)
964 fold_deferred_overflow_code = code;
968 warnmsg = fold_deferred_overflow_warning;
969 fold_deferred_overflow_warning = NULL;
971 if (!issue || warnmsg == NULL)
974 if (stmt != NULL_TREE && TREE_NO_WARNING (stmt))
977 /* Use the smallest code level when deciding to issue the
979 if (code == 0 || code > (int) fold_deferred_overflow_code)
980 code = fold_deferred_overflow_code;
982 if (!issue_strict_overflow_warning (code))
985 if (stmt == NULL_TREE || !expr_has_location (stmt))
986 locus = input_location;
988 locus = expr_location (stmt);
989 warning (OPT_Wstrict_overflow, "%H%s", &locus, warnmsg);
992 /* Stop deferring overflow warnings, ignoring any deferred
996 fold_undefer_and_ignore_overflow_warnings (void)
998 fold_undefer_overflow_warnings (false, NULL_TREE, 0);
1001 /* Whether we are deferring overflow warnings. */
1004 fold_deferring_overflow_warnings_p (void)
1006 return fold_deferring_overflow_warnings > 0;
1009 /* This is called when we fold something based on the fact that signed
1010 overflow is undefined. */
1013 fold_overflow_warning (const char* gmsgid, enum warn_strict_overflow_code wc)
1015 if (fold_deferring_overflow_warnings > 0)
1017 if (fold_deferred_overflow_warning == NULL
1018 || wc < fold_deferred_overflow_code)
1020 fold_deferred_overflow_warning = gmsgid;
1021 fold_deferred_overflow_code = wc;
1024 else if (issue_strict_overflow_warning (wc))
1025 warning (OPT_Wstrict_overflow, gmsgid);
1028 /* Return true if the built-in mathematical function specified by CODE
1029 is odd, i.e. -f(x) == f(-x). */
1032 negate_mathfn_p (enum built_in_function code)
1036 CASE_FLT_FN (BUILT_IN_ASIN):
1037 CASE_FLT_FN (BUILT_IN_ASINH):
1038 CASE_FLT_FN (BUILT_IN_ATAN):
1039 CASE_FLT_FN (BUILT_IN_ATANH):
1040 CASE_FLT_FN (BUILT_IN_CASIN):
1041 CASE_FLT_FN (BUILT_IN_CASINH):
1042 CASE_FLT_FN (BUILT_IN_CATAN):
1043 CASE_FLT_FN (BUILT_IN_CATANH):
1044 CASE_FLT_FN (BUILT_IN_CBRT):
1045 CASE_FLT_FN (BUILT_IN_CPROJ):
1046 CASE_FLT_FN (BUILT_IN_CSIN):
1047 CASE_FLT_FN (BUILT_IN_CSINH):
1048 CASE_FLT_FN (BUILT_IN_CTAN):
1049 CASE_FLT_FN (BUILT_IN_CTANH):
1050 CASE_FLT_FN (BUILT_IN_ERF):
1051 CASE_FLT_FN (BUILT_IN_LLROUND):
1052 CASE_FLT_FN (BUILT_IN_LROUND):
1053 CASE_FLT_FN (BUILT_IN_ROUND):
1054 CASE_FLT_FN (BUILT_IN_SIN):
1055 CASE_FLT_FN (BUILT_IN_SINH):
1056 CASE_FLT_FN (BUILT_IN_TAN):
1057 CASE_FLT_FN (BUILT_IN_TANH):
1058 CASE_FLT_FN (BUILT_IN_TRUNC):
1061 CASE_FLT_FN (BUILT_IN_LLRINT):
1062 CASE_FLT_FN (BUILT_IN_LRINT):
1063 CASE_FLT_FN (BUILT_IN_NEARBYINT):
1064 CASE_FLT_FN (BUILT_IN_RINT):
1065 return !flag_rounding_math;
1073 /* Check whether we may negate an integer constant T without causing
1077 may_negate_without_overflow_p (const_tree t)
1079 unsigned HOST_WIDE_INT val;
1083 gcc_assert (TREE_CODE (t) == INTEGER_CST);
1085 type = TREE_TYPE (t);
1086 if (TYPE_UNSIGNED (type))
1089 prec = TYPE_PRECISION (type);
1090 if (prec > HOST_BITS_PER_WIDE_INT)
1092 if (TREE_INT_CST_LOW (t) != 0)
1094 prec -= HOST_BITS_PER_WIDE_INT;
1095 val = TREE_INT_CST_HIGH (t);
1098 val = TREE_INT_CST_LOW (t);
1099 if (prec < HOST_BITS_PER_WIDE_INT)
1100 val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
1101 return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
1104 /* Determine whether an expression T can be cheaply negated using
1105 the function negate_expr without introducing undefined overflow. */
1108 negate_expr_p (tree t)
1115 type = TREE_TYPE (t);
1117 STRIP_SIGN_NOPS (t);
1118 switch (TREE_CODE (t))
1121 if (TYPE_OVERFLOW_WRAPS (type))
1124 /* Check that -CST will not overflow type. */
1125 return may_negate_without_overflow_p (t);
1127 return (INTEGRAL_TYPE_P (type)
1128 && TYPE_OVERFLOW_WRAPS (type));
1136 return negate_expr_p (TREE_REALPART (t))
1137 && negate_expr_p (TREE_IMAGPART (t));
1140 return negate_expr_p (TREE_OPERAND (t, 0))
1141 && negate_expr_p (TREE_OPERAND (t, 1));
1144 return negate_expr_p (TREE_OPERAND (t, 0));
1147 if (HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1148 || HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
1150 /* -(A + B) -> (-B) - A. */
1151 if (negate_expr_p (TREE_OPERAND (t, 1))
1152 && reorder_operands_p (TREE_OPERAND (t, 0),
1153 TREE_OPERAND (t, 1)))
1155 /* -(A + B) -> (-A) - B. */
1156 return negate_expr_p (TREE_OPERAND (t, 0));
1159 /* We can't turn -(A-B) into B-A when we honor signed zeros. */
1160 return !HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1161 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
1162 && reorder_operands_p (TREE_OPERAND (t, 0),
1163 TREE_OPERAND (t, 1));
1166 if (TYPE_UNSIGNED (TREE_TYPE (t)))
1172 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
1173 return negate_expr_p (TREE_OPERAND (t, 1))
1174 || negate_expr_p (TREE_OPERAND (t, 0));
1177 case TRUNC_DIV_EXPR:
1178 case ROUND_DIV_EXPR:
1179 case FLOOR_DIV_EXPR:
1181 case EXACT_DIV_EXPR:
1182 /* In general we can't negate A / B, because if A is INT_MIN and
1183 B is 1, we may turn this into INT_MIN / -1 which is undefined
1184 and actually traps on some architectures. But if overflow is
1185 undefined, we can negate, because - (INT_MIN / 1) is an
1187 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
1188 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
1190 return negate_expr_p (TREE_OPERAND (t, 1))
1191 || negate_expr_p (TREE_OPERAND (t, 0));
1194 /* Negate -((double)float) as (double)(-float). */
1195 if (TREE_CODE (type) == REAL_TYPE)
1197 tree tem = strip_float_extensions (t);
1199 return negate_expr_p (tem);
1204 /* Negate -f(x) as f(-x). */
1205 if (negate_mathfn_p (builtin_mathfn_code (t)))
1206 return negate_expr_p (CALL_EXPR_ARG (t, 0));
1210 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1211 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1213 tree op1 = TREE_OPERAND (t, 1);
1214 if (TREE_INT_CST_HIGH (op1) == 0
1215 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1216 == TREE_INT_CST_LOW (op1))
1227 /* Given T, an expression, return a folded tree for -T or NULL_TREE, if no
1228 simplification is possible.
1229 If negate_expr_p would return true for T, NULL_TREE will never be
1233 fold_negate_expr (tree t)
1235 tree type = TREE_TYPE (t);
1238 switch (TREE_CODE (t))
1240 /* Convert - (~A) to A + 1. */
1242 if (INTEGRAL_TYPE_P (type))
1243 return fold_build2 (PLUS_EXPR, type, TREE_OPERAND (t, 0),
1244 build_int_cst (type, 1));
1248 tem = fold_negate_const (t, type);
1249 if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
1250 || !TYPE_OVERFLOW_TRAPS (type))
1255 tem = fold_negate_const (t, type);
1256 /* Two's complement FP formats, such as c4x, may overflow. */
1257 if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
1262 tem = fold_negate_const (t, type);
1267 tree rpart = negate_expr (TREE_REALPART (t));
1268 tree ipart = negate_expr (TREE_IMAGPART (t));
1270 if ((TREE_CODE (rpart) == REAL_CST
1271 && TREE_CODE (ipart) == REAL_CST)
1272 || (TREE_CODE (rpart) == INTEGER_CST
1273 && TREE_CODE (ipart) == INTEGER_CST))
1274 return build_complex (type, rpart, ipart);
1279 if (negate_expr_p (t))
1280 return fold_build2 (COMPLEX_EXPR, type,
1281 fold_negate_expr (TREE_OPERAND (t, 0)),
1282 fold_negate_expr (TREE_OPERAND (t, 1)));
1286 if (negate_expr_p (t))
1287 return fold_build1 (CONJ_EXPR, type,
1288 fold_negate_expr (TREE_OPERAND (t, 0)));
1292 return TREE_OPERAND (t, 0);
1295 if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1296 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
1298 /* -(A + B) -> (-B) - A. */
1299 if (negate_expr_p (TREE_OPERAND (t, 1))
1300 && reorder_operands_p (TREE_OPERAND (t, 0),
1301 TREE_OPERAND (t, 1)))
1303 tem = negate_expr (TREE_OPERAND (t, 1));
1304 return fold_build2 (MINUS_EXPR, type,
1305 tem, TREE_OPERAND (t, 0));
1308 /* -(A + B) -> (-A) - B. */
1309 if (negate_expr_p (TREE_OPERAND (t, 0)))
1311 tem = negate_expr (TREE_OPERAND (t, 0));
1312 return fold_build2 (MINUS_EXPR, type,
1313 tem, TREE_OPERAND (t, 1));
1319 /* - (A - B) -> B - A */
1320 if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1321 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
1322 && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)))
1323 return fold_build2 (MINUS_EXPR, type,
1324 TREE_OPERAND (t, 1), TREE_OPERAND (t, 0));
1328 if (TYPE_UNSIGNED (type))
1334 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type)))
1336 tem = TREE_OPERAND (t, 1);
1337 if (negate_expr_p (tem))
1338 return fold_build2 (TREE_CODE (t), type,
1339 TREE_OPERAND (t, 0), negate_expr (tem));
1340 tem = TREE_OPERAND (t, 0);
1341 if (negate_expr_p (tem))
1342 return fold_build2 (TREE_CODE (t), type,
1343 negate_expr (tem), TREE_OPERAND (t, 1));
1347 case TRUNC_DIV_EXPR:
1348 case ROUND_DIV_EXPR:
1349 case FLOOR_DIV_EXPR:
1351 case EXACT_DIV_EXPR:
1352 /* In general we can't negate A / B, because if A is INT_MIN and
1353 B is 1, we may turn this into INT_MIN / -1 which is undefined
1354 and actually traps on some architectures. But if overflow is
1355 undefined, we can negate, because - (INT_MIN / 1) is an
1357 if (!INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type))
1359 const char * const warnmsg = G_("assuming signed overflow does not "
1360 "occur when negating a division");
1361 tem = TREE_OPERAND (t, 1);
1362 if (negate_expr_p (tem))
1364 if (INTEGRAL_TYPE_P (type)
1365 && (TREE_CODE (tem) != INTEGER_CST
1366 || integer_onep (tem)))
1367 fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
1368 return fold_build2 (TREE_CODE (t), type,
1369 TREE_OPERAND (t, 0), negate_expr (tem));
1371 tem = TREE_OPERAND (t, 0);
1372 if (negate_expr_p (tem))
1374 if (INTEGRAL_TYPE_P (type)
1375 && (TREE_CODE (tem) != INTEGER_CST
1376 || tree_int_cst_equal (tem, TYPE_MIN_VALUE (type))))
1377 fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
1378 return fold_build2 (TREE_CODE (t), type,
1379 negate_expr (tem), TREE_OPERAND (t, 1));
1385 /* Convert -((double)float) into (double)(-float). */
1386 if (TREE_CODE (type) == REAL_TYPE)
1388 tem = strip_float_extensions (t);
1389 if (tem != t && negate_expr_p (tem))
1390 return fold_convert (type, negate_expr (tem));
1395 /* Negate -f(x) as f(-x). */
1396 if (negate_mathfn_p (builtin_mathfn_code (t))
1397 && negate_expr_p (CALL_EXPR_ARG (t, 0)))
1401 fndecl = get_callee_fndecl (t);
1402 arg = negate_expr (CALL_EXPR_ARG (t, 0));
1403 return build_call_expr (fndecl, 1, arg);
1408 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1409 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1411 tree op1 = TREE_OPERAND (t, 1);
1412 if (TREE_INT_CST_HIGH (op1) == 0
1413 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1414 == TREE_INT_CST_LOW (op1))
1416 tree ntype = TYPE_UNSIGNED (type)
1417 ? signed_type_for (type)
1418 : unsigned_type_for (type);
1419 tree temp = fold_convert (ntype, TREE_OPERAND (t, 0));
1420 temp = fold_build2 (RSHIFT_EXPR, ntype, temp, op1);
1421 return fold_convert (type, temp);
1433 /* Like fold_negate_expr, but return a NEGATE_EXPR tree, if T can not be
1434 negated in a simpler way. Also allow for T to be NULL_TREE, in which case
1435 return NULL_TREE. */
1438 negate_expr (tree t)
1445 type = TREE_TYPE (t);
1446 STRIP_SIGN_NOPS (t);
1448 tem = fold_negate_expr (t);
1450 tem = build1 (NEGATE_EXPR, TREE_TYPE (t), t);
1451 return fold_convert (type, tem);
1454 /* Split a tree IN into a constant, literal and variable parts that could be
1455 combined with CODE to make IN. "constant" means an expression with
1456 TREE_CONSTANT but that isn't an actual constant. CODE must be a
1457 commutative arithmetic operation. Store the constant part into *CONP,
1458 the literal in *LITP and return the variable part. If a part isn't
1459 present, set it to null. If the tree does not decompose in this way,
1460 return the entire tree as the variable part and the other parts as null.
1462 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
1463 case, we negate an operand that was subtracted. Except if it is a
1464 literal for which we use *MINUS_LITP instead.
1466 If NEGATE_P is true, we are negating all of IN, again except a literal
1467 for which we use *MINUS_LITP instead.
1469 If IN is itself a literal or constant, return it as appropriate.
1471 Note that we do not guarantee that any of the three values will be the
1472 same type as IN, but they will have the same signedness and mode. */
1475 split_tree (tree in, enum tree_code code, tree *conp, tree *litp,
1476 tree *minus_litp, int negate_p)
1484 /* Strip any conversions that don't change the machine mode or signedness. */
1485 STRIP_SIGN_NOPS (in);
1487 if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST
1488 || TREE_CODE (in) == FIXED_CST)
1490 else if (TREE_CODE (in) == code
1491 || ((! FLOAT_TYPE_P (TREE_TYPE (in)) || flag_associative_math)
1492 && ! SAT_FIXED_POINT_TYPE_P (TREE_TYPE (in))
1493 /* We can associate addition and subtraction together (even
1494 though the C standard doesn't say so) for integers because
1495 the value is not affected. For reals, the value might be
1496 affected, so we can't. */
1497 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1498 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1500 tree op0 = TREE_OPERAND (in, 0);
1501 tree op1 = TREE_OPERAND (in, 1);
1502 int neg1_p = TREE_CODE (in) == MINUS_EXPR;
1503 int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
1505 /* First see if either of the operands is a literal, then a constant. */
1506 if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST
1507 || TREE_CODE (op0) == FIXED_CST)
1508 *litp = op0, op0 = 0;
1509 else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST
1510 || TREE_CODE (op1) == FIXED_CST)
1511 *litp = op1, neg_litp_p = neg1_p, op1 = 0;
1513 if (op0 != 0 && TREE_CONSTANT (op0))
1514 *conp = op0, op0 = 0;
1515 else if (op1 != 0 && TREE_CONSTANT (op1))
1516 *conp = op1, neg_conp_p = neg1_p, op1 = 0;
1518 /* If we haven't dealt with either operand, this is not a case we can
1519 decompose. Otherwise, VAR is either of the ones remaining, if any. */
1520 if (op0 != 0 && op1 != 0)
1525 var = op1, neg_var_p = neg1_p;
1527 /* Now do any needed negations. */
1529 *minus_litp = *litp, *litp = 0;
1531 *conp = negate_expr (*conp);
1533 var = negate_expr (var);
1535 else if (TREE_CONSTANT (in))
1543 *minus_litp = *litp, *litp = 0;
1544 else if (*minus_litp)
1545 *litp = *minus_litp, *minus_litp = 0;
1546 *conp = negate_expr (*conp);
1547 var = negate_expr (var);
1553 /* Re-associate trees split by the above function. T1 and T2 are either
1554 expressions to associate or null. Return the new expression, if any. If
1555 we build an operation, do it in TYPE and with CODE. */
1558 associate_trees (tree t1, tree t2, enum tree_code code, tree type)
1565 /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
1566 try to fold this since we will have infinite recursion. But do
1567 deal with any NEGATE_EXPRs. */
1568 if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
1569 || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
1571 if (code == PLUS_EXPR)
1573 if (TREE_CODE (t1) == NEGATE_EXPR)
1574 return build2 (MINUS_EXPR, type, fold_convert (type, t2),
1575 fold_convert (type, TREE_OPERAND (t1, 0)));
1576 else if (TREE_CODE (t2) == NEGATE_EXPR)
1577 return build2 (MINUS_EXPR, type, fold_convert (type, t1),
1578 fold_convert (type, TREE_OPERAND (t2, 0)));
1579 else if (integer_zerop (t2))
1580 return fold_convert (type, t1);
1582 else if (code == MINUS_EXPR)
1584 if (integer_zerop (t2))
1585 return fold_convert (type, t1);
1588 return build2 (code, type, fold_convert (type, t1),
1589 fold_convert (type, t2));
1592 return fold_build2 (code, type, fold_convert (type, t1),
1593 fold_convert (type, t2));
1596 /* Check whether TYPE1 and TYPE2 are equivalent integer types, suitable
1597 for use in int_const_binop, size_binop and size_diffop. */
1600 int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2)
1602 if (TREE_CODE (type1) != INTEGER_TYPE && !POINTER_TYPE_P (type1))
1604 if (TREE_CODE (type2) != INTEGER_TYPE && !POINTER_TYPE_P (type2))
1619 return TYPE_UNSIGNED (type1) == TYPE_UNSIGNED (type2)
1620 && TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
1621 && TYPE_MODE (type1) == TYPE_MODE (type2);
1625 /* Combine two integer constants ARG1 and ARG2 under operation CODE
1626 to produce a new constant. Return NULL_TREE if we don't know how
1627 to evaluate CODE at compile-time.
1629 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1632 int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2, int notrunc)
1634 unsigned HOST_WIDE_INT int1l, int2l;
1635 HOST_WIDE_INT int1h, int2h;
1636 unsigned HOST_WIDE_INT low;
1638 unsigned HOST_WIDE_INT garbagel;
1639 HOST_WIDE_INT garbageh;
1641 tree type = TREE_TYPE (arg1);
1642 int uns = TYPE_UNSIGNED (type);
1644 = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
1647 int1l = TREE_INT_CST_LOW (arg1);
1648 int1h = TREE_INT_CST_HIGH (arg1);
1649 int2l = TREE_INT_CST_LOW (arg2);
1650 int2h = TREE_INT_CST_HIGH (arg2);
1655 low = int1l | int2l, hi = int1h | int2h;
1659 low = int1l ^ int2l, hi = int1h ^ int2h;
1663 low = int1l & int2l, hi = int1h & int2h;
1669 /* It's unclear from the C standard whether shifts can overflow.
1670 The following code ignores overflow; perhaps a C standard
1671 interpretation ruling is needed. */
1672 lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1679 lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1684 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1688 neg_double (int2l, int2h, &low, &hi);
1689 add_double (int1l, int1h, low, hi, &low, &hi);
1690 overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
1694 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1697 case TRUNC_DIV_EXPR:
1698 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1699 case EXACT_DIV_EXPR:
1700 /* This is a shortcut for a common special case. */
1701 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1702 && !TREE_OVERFLOW (arg1)
1703 && !TREE_OVERFLOW (arg2)
1704 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1706 if (code == CEIL_DIV_EXPR)
1709 low = int1l / int2l, hi = 0;
1713 /* ... fall through ... */
1715 case ROUND_DIV_EXPR:
1716 if (int2h == 0 && int2l == 0)
1718 if (int2h == 0 && int2l == 1)
1720 low = int1l, hi = int1h;
1723 if (int1l == int2l && int1h == int2h
1724 && ! (int1l == 0 && int1h == 0))
1729 overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
1730 &low, &hi, &garbagel, &garbageh);
1733 case TRUNC_MOD_EXPR:
1734 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1735 /* This is a shortcut for a common special case. */
1736 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1737 && !TREE_OVERFLOW (arg1)
1738 && !TREE_OVERFLOW (arg2)
1739 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1741 if (code == CEIL_MOD_EXPR)
1743 low = int1l % int2l, hi = 0;
1747 /* ... fall through ... */
1749 case ROUND_MOD_EXPR:
1750 if (int2h == 0 && int2l == 0)
1752 overflow = div_and_round_double (code, uns,
1753 int1l, int1h, int2l, int2h,
1754 &garbagel, &garbageh, &low, &hi);
1760 low = (((unsigned HOST_WIDE_INT) int1h
1761 < (unsigned HOST_WIDE_INT) int2h)
1762 || (((unsigned HOST_WIDE_INT) int1h
1763 == (unsigned HOST_WIDE_INT) int2h)
1766 low = (int1h < int2h
1767 || (int1h == int2h && int1l < int2l));
1769 if (low == (code == MIN_EXPR))
1770 low = int1l, hi = int1h;
1772 low = int2l, hi = int2h;
1781 t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
1783 /* Propagate overflow flags ourselves. */
1784 if (((!uns || is_sizetype) && overflow)
1785 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1788 TREE_OVERFLOW (t) = 1;
1792 t = force_fit_type_double (TREE_TYPE (arg1), low, hi, 1,
1793 ((!uns || is_sizetype) && overflow)
1794 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1799 /* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
1800 constant. We assume ARG1 and ARG2 have the same data type, or at least
1801 are the same kind of constant and the same machine mode. Return zero if
1802 combining the constants is not allowed in the current operating mode.
1804 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1807 const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1809 /* Sanity check for the recursive cases. */
1816 if (TREE_CODE (arg1) == INTEGER_CST)
1817 return int_const_binop (code, arg1, arg2, notrunc);
1819 if (TREE_CODE (arg1) == REAL_CST)
1821 enum machine_mode mode;
1824 REAL_VALUE_TYPE value;
1825 REAL_VALUE_TYPE result;
1829 /* The following codes are handled by real_arithmetic. */
1844 d1 = TREE_REAL_CST (arg1);
1845 d2 = TREE_REAL_CST (arg2);
1847 type = TREE_TYPE (arg1);
1848 mode = TYPE_MODE (type);
1850 /* Don't perform operation if we honor signaling NaNs and
1851 either operand is a NaN. */
1852 if (HONOR_SNANS (mode)
1853 && (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2)))
1856 /* Don't perform operation if it would raise a division
1857 by zero exception. */
1858 if (code == RDIV_EXPR
1859 && REAL_VALUES_EQUAL (d2, dconst0)
1860 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1863 /* If either operand is a NaN, just return it. Otherwise, set up
1864 for floating-point trap; we return an overflow. */
1865 if (REAL_VALUE_ISNAN (d1))
1867 else if (REAL_VALUE_ISNAN (d2))
1870 inexact = real_arithmetic (&value, code, &d1, &d2);
1871 real_convert (&result, mode, &value);
1873 /* Don't constant fold this floating point operation if
1874 the result has overflowed and flag_trapping_math. */
1875 if (flag_trapping_math
1876 && MODE_HAS_INFINITIES (mode)
1877 && REAL_VALUE_ISINF (result)
1878 && !REAL_VALUE_ISINF (d1)
1879 && !REAL_VALUE_ISINF (d2))
1882 /* Don't constant fold this floating point operation if the
1883 result may dependent upon the run-time rounding mode and
1884 flag_rounding_math is set, or if GCC's software emulation
1885 is unable to accurately represent the result. */
1886 if ((flag_rounding_math
1887 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
1888 && !flag_unsafe_math_optimizations))
1889 && (inexact || !real_identical (&result, &value)))
1892 t = build_real (type, result);
1894 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1898 if (TREE_CODE (arg1) == FIXED_CST)
1900 FIXED_VALUE_TYPE f1;
1901 FIXED_VALUE_TYPE f2;
1902 FIXED_VALUE_TYPE result;
1907 /* The following codes are handled by fixed_arithmetic. */
1913 case TRUNC_DIV_EXPR:
1914 f2 = TREE_FIXED_CST (arg2);
1919 f2.data.high = TREE_INT_CST_HIGH (arg2);
1920 f2.data.low = TREE_INT_CST_LOW (arg2);
1928 f1 = TREE_FIXED_CST (arg1);
1929 type = TREE_TYPE (arg1);
1930 sat_p = TYPE_SATURATING (type);
1931 overflow_p = fixed_arithmetic (&result, code, &f1, &f2, sat_p);
1932 t = build_fixed (type, result);
1933 /* Propagate overflow flags. */
1934 if (overflow_p | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1936 TREE_OVERFLOW (t) = 1;
1937 TREE_CONSTANT_OVERFLOW (t) = 1;
1939 else if (TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2))
1940 TREE_CONSTANT_OVERFLOW (t) = 1;
1944 if (TREE_CODE (arg1) == COMPLEX_CST)
1946 tree type = TREE_TYPE (arg1);
1947 tree r1 = TREE_REALPART (arg1);
1948 tree i1 = TREE_IMAGPART (arg1);
1949 tree r2 = TREE_REALPART (arg2);
1950 tree i2 = TREE_IMAGPART (arg2);
1957 real = const_binop (code, r1, r2, notrunc);
1958 imag = const_binop (code, i1, i2, notrunc);
1962 real = const_binop (MINUS_EXPR,
1963 const_binop (MULT_EXPR, r1, r2, notrunc),
1964 const_binop (MULT_EXPR, i1, i2, notrunc),
1966 imag = const_binop (PLUS_EXPR,
1967 const_binop (MULT_EXPR, r1, i2, notrunc),
1968 const_binop (MULT_EXPR, i1, r2, notrunc),
1975 = const_binop (PLUS_EXPR,
1976 const_binop (MULT_EXPR, r2, r2, notrunc),
1977 const_binop (MULT_EXPR, i2, i2, notrunc),
1980 = const_binop (PLUS_EXPR,
1981 const_binop (MULT_EXPR, r1, r2, notrunc),
1982 const_binop (MULT_EXPR, i1, i2, notrunc),
1985 = const_binop (MINUS_EXPR,
1986 const_binop (MULT_EXPR, i1, r2, notrunc),
1987 const_binop (MULT_EXPR, r1, i2, notrunc),
1990 if (INTEGRAL_TYPE_P (TREE_TYPE (r1)))
1991 code = TRUNC_DIV_EXPR;
1993 real = const_binop (code, t1, magsquared, notrunc);
1994 imag = const_binop (code, t2, magsquared, notrunc);
2003 return build_complex (type, real, imag);
2009 /* Create a size type INT_CST node with NUMBER sign extended. KIND
2010 indicates which particular sizetype to create. */
2013 size_int_kind (HOST_WIDE_INT number, enum size_type_kind kind)
2015 return build_int_cst (sizetype_tab[(int) kind], number);
2018 /* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
2019 is a tree code. The type of the result is taken from the operands.
2020 Both must be equivalent integer types, ala int_binop_types_match_p.
2021 If the operands are constant, so is the result. */
2024 size_binop (enum tree_code code, tree arg0, tree arg1)
2026 tree type = TREE_TYPE (arg0);
2028 if (arg0 == error_mark_node || arg1 == error_mark_node)
2029 return error_mark_node;
2031 gcc_assert (int_binop_types_match_p (code, TREE_TYPE (arg0),
2034 /* Handle the special case of two integer constants faster. */
2035 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
2037 /* And some specific cases even faster than that. */
2038 if (code == PLUS_EXPR)
2040 if (integer_zerop (arg0) && !TREE_OVERFLOW (arg0))
2042 if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
2045 else if (code == MINUS_EXPR)
2047 if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
2050 else if (code == MULT_EXPR)
2052 if (integer_onep (arg0) && !TREE_OVERFLOW (arg0))
2056 /* Handle general case of two integer constants. */
2057 return int_const_binop (code, arg0, arg1, 0);
2060 return fold_build2 (code, type, arg0, arg1);
2063 /* Given two values, either both of sizetype or both of bitsizetype,
2064 compute the difference between the two values. Return the value
2065 in signed type corresponding to the type of the operands. */
2068 size_diffop (tree arg0, tree arg1)
2070 tree type = TREE_TYPE (arg0);
2073 gcc_assert (int_binop_types_match_p (MINUS_EXPR, TREE_TYPE (arg0),
2076 /* If the type is already signed, just do the simple thing. */
2077 if (!TYPE_UNSIGNED (type))
2078 return size_binop (MINUS_EXPR, arg0, arg1);
2080 if (type == sizetype)
2082 else if (type == bitsizetype)
2083 ctype = sbitsizetype;
2085 ctype = signed_type_for (type);
2087 /* If either operand is not a constant, do the conversions to the signed
2088 type and subtract. The hardware will do the right thing with any
2089 overflow in the subtraction. */
2090 if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST)
2091 return size_binop (MINUS_EXPR, fold_convert (ctype, arg0),
2092 fold_convert (ctype, arg1));
2094 /* If ARG0 is larger than ARG1, subtract and return the result in CTYPE.
2095 Otherwise, subtract the other way, convert to CTYPE (we know that can't
2096 overflow) and negate (which can't either). Special-case a result
2097 of zero while we're here. */
2098 if (tree_int_cst_equal (arg0, arg1))
2099 return build_int_cst (ctype, 0);
2100 else if (tree_int_cst_lt (arg1, arg0))
2101 return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1));
2103 return size_binop (MINUS_EXPR, build_int_cst (ctype, 0),
2104 fold_convert (ctype, size_binop (MINUS_EXPR,
2108 /* A subroutine of fold_convert_const handling conversions of an
2109 INTEGER_CST to another integer type. */
2112 fold_convert_const_int_from_int (tree type, const_tree arg1)
2116 /* Given an integer constant, make new constant with new type,
2117 appropriately sign-extended or truncated. */
2118 t = force_fit_type_double (type, TREE_INT_CST_LOW (arg1),
2119 TREE_INT_CST_HIGH (arg1),
2120 /* Don't set the overflow when
2121 converting from a pointer, */
2122 !POINTER_TYPE_P (TREE_TYPE (arg1))
2123 /* or to a sizetype with same signedness
2124 and the precision is unchanged.
2125 ??? sizetype is always sign-extended,
2126 but its signedness depends on the
2127 frontend. Thus we see spurious overflows
2128 here if we do not check this. */
2129 && !((TYPE_PRECISION (TREE_TYPE (arg1))
2130 == TYPE_PRECISION (type))
2131 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
2132 == TYPE_UNSIGNED (type))
2133 && ((TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
2134 && TYPE_IS_SIZETYPE (TREE_TYPE (arg1)))
2135 || (TREE_CODE (type) == INTEGER_TYPE
2136 && TYPE_IS_SIZETYPE (type)))),
2137 (TREE_INT_CST_HIGH (arg1) < 0
2138 && (TYPE_UNSIGNED (type)
2139 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
2140 | TREE_OVERFLOW (arg1));
2145 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2146 to an integer type. */
2149 fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1)
2154 /* The following code implements the floating point to integer
2155 conversion rules required by the Java Language Specification,
2156 that IEEE NaNs are mapped to zero and values that overflow
2157 the target precision saturate, i.e. values greater than
2158 INT_MAX are mapped to INT_MAX, and values less than INT_MIN
2159 are mapped to INT_MIN. These semantics are allowed by the
2160 C and C++ standards that simply state that the behavior of
2161 FP-to-integer conversion is unspecified upon overflow. */
2163 HOST_WIDE_INT high, low;
2165 REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
2169 case FIX_TRUNC_EXPR:
2170 real_trunc (&r, VOIDmode, &x);
2177 /* If R is NaN, return zero and show we have an overflow. */
2178 if (REAL_VALUE_ISNAN (r))
2185 /* See if R is less than the lower bound or greater than the
2190 tree lt = TYPE_MIN_VALUE (type);
2191 REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
2192 if (REAL_VALUES_LESS (r, l))
2195 high = TREE_INT_CST_HIGH (lt);
2196 low = TREE_INT_CST_LOW (lt);
2202 tree ut = TYPE_MAX_VALUE (type);
2205 REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
2206 if (REAL_VALUES_LESS (u, r))
2209 high = TREE_INT_CST_HIGH (ut);
2210 low = TREE_INT_CST_LOW (ut);
2216 REAL_VALUE_TO_INT (&low, &high, r);
2218 t = force_fit_type_double (type, low, high, -1,
2219 overflow | TREE_OVERFLOW (arg1));
2223 /* A subroutine of fold_convert_const handling conversions of a
2224 FIXED_CST to an integer type. */
2227 fold_convert_const_int_from_fixed (tree type, const_tree arg1)
2230 double_int temp, temp_trunc;
2233 /* Right shift FIXED_CST to temp by fbit. */
2234 temp = TREE_FIXED_CST (arg1).data;
2235 mode = TREE_FIXED_CST (arg1).mode;
2236 if (GET_MODE_FBIT (mode) < 2 * HOST_BITS_PER_WIDE_INT)
2238 lshift_double (temp.low, temp.high,
2239 - GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
2240 &temp.low, &temp.high, SIGNED_FIXED_POINT_MODE_P (mode));
2242 /* Left shift temp to temp_trunc by fbit. */
2243 lshift_double (temp.low, temp.high,
2244 GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
2245 &temp_trunc.low, &temp_trunc.high,
2246 SIGNED_FIXED_POINT_MODE_P (mode));
2253 temp_trunc.high = 0;
2256 /* If FIXED_CST is negative, we need to round the value toward 0.
2257 By checking if the fractional bits are not zero to add 1 to temp. */
2258 if (SIGNED_FIXED_POINT_MODE_P (mode) && temp_trunc.high < 0
2259 && !double_int_equal_p (TREE_FIXED_CST (arg1).data, temp_trunc))
2264 temp = double_int_add (temp, one);
2267 /* Given a fixed-point constant, make new constant with new type,
2268 appropriately sign-extended or truncated. */
2269 t = force_fit_type_double (type, temp.low, temp.high, -1,
2271 && (TYPE_UNSIGNED (type)
2272 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
2273 | TREE_OVERFLOW (arg1));
2278 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2279 to another floating point type. */
2282 fold_convert_const_real_from_real (tree type, const_tree arg1)
2284 REAL_VALUE_TYPE value;
2287 real_convert (&value, TYPE_MODE (type), &TREE_REAL_CST (arg1));
2288 t = build_real (type, value);
2290 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
2294 /* A subroutine of fold_convert_const handling conversions a FIXED_CST
2295 to a floating point type. */
2298 fold_convert_const_real_from_fixed (tree type, const_tree arg1)
2300 REAL_VALUE_TYPE value;
2303 real_convert_from_fixed (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1));
2304 t = build_real (type, value);
2306 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
2307 TREE_CONSTANT_OVERFLOW (t)
2308 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
2312 /* A subroutine of fold_convert_const handling conversions a FIXED_CST
2313 to another fixed-point type. */
2316 fold_convert_const_fixed_from_fixed (tree type, const_tree arg1)
2318 FIXED_VALUE_TYPE value;
2322 overflow_p = fixed_convert (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1),
2323 TYPE_SATURATING (type));
2324 t = build_fixed (type, value);
2326 /* Propagate overflow flags. */
2327 if (overflow_p | TREE_OVERFLOW (arg1))
2329 TREE_OVERFLOW (t) = 1;
2330 TREE_CONSTANT_OVERFLOW (t) = 1;
2332 else if (TREE_CONSTANT_OVERFLOW (arg1))
2333 TREE_CONSTANT_OVERFLOW (t) = 1;
2337 /* A subroutine of fold_convert_const handling conversions an INTEGER_CST
2338 to a fixed-point type. */
2341 fold_convert_const_fixed_from_int (tree type, const_tree arg1)
2343 FIXED_VALUE_TYPE value;
2347 overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type),
2348 TREE_INT_CST (arg1),
2349 TYPE_UNSIGNED (TREE_TYPE (arg1)),
2350 TYPE_SATURATING (type));
2351 t = build_fixed (type, value);
2353 /* Propagate overflow flags. */
2354 if (overflow_p | TREE_OVERFLOW (arg1))
2356 TREE_OVERFLOW (t) = 1;
2357 TREE_CONSTANT_OVERFLOW (t) = 1;
2359 else if (TREE_CONSTANT_OVERFLOW (arg1))
2360 TREE_CONSTANT_OVERFLOW (t) = 1;
2364 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2365 to a fixed-point type. */
2368 fold_convert_const_fixed_from_real (tree type, const_tree arg1)
2370 FIXED_VALUE_TYPE value;
2374 overflow_p = fixed_convert_from_real (&value, TYPE_MODE (type),
2375 &TREE_REAL_CST (arg1),
2376 TYPE_SATURATING (type));
2377 t = build_fixed (type, value);
2379 /* Propagate overflow flags. */
2380 if (overflow_p | TREE_OVERFLOW (arg1))
2382 TREE_OVERFLOW (t) = 1;
2383 TREE_CONSTANT_OVERFLOW (t) = 1;
2385 else if (TREE_CONSTANT_OVERFLOW (arg1))
2386 TREE_CONSTANT_OVERFLOW (t) = 1;
2390 /* Attempt to fold type conversion operation CODE of expression ARG1 to
2391 type TYPE. If no simplification can be done return NULL_TREE. */
2394 fold_convert_const (enum tree_code code, tree type, tree arg1)
2396 if (TREE_TYPE (arg1) == type)
2399 if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
2401 if (TREE_CODE (arg1) == INTEGER_CST)
2402 return fold_convert_const_int_from_int (type, arg1);
2403 else if (TREE_CODE (arg1) == REAL_CST)
2404 return fold_convert_const_int_from_real (code, type, arg1);
2405 else if (TREE_CODE (arg1) == FIXED_CST)
2406 return fold_convert_const_int_from_fixed (type, arg1);
2408 else if (TREE_CODE (type) == REAL_TYPE)
2410 if (TREE_CODE (arg1) == INTEGER_CST)
2411 return build_real_from_int_cst (type, arg1);
2412 else if (TREE_CODE (arg1) == REAL_CST)
2413 return fold_convert_const_real_from_real (type, arg1);
2414 else if (TREE_CODE (arg1) == FIXED_CST)
2415 return fold_convert_const_real_from_fixed (type, arg1);
2417 else if (TREE_CODE (type) == FIXED_POINT_TYPE)
2419 if (TREE_CODE (arg1) == FIXED_CST)
2420 return fold_convert_const_fixed_from_fixed (type, arg1);
2421 else if (TREE_CODE (arg1) == INTEGER_CST)
2422 return fold_convert_const_fixed_from_int (type, arg1);
2423 else if (TREE_CODE (arg1) == REAL_CST)
2424 return fold_convert_const_fixed_from_real (type, arg1);
2429 /* Construct a vector of zero elements of vector type TYPE. */
2432 build_zero_vector (tree type)
2437 elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
2438 units = TYPE_VECTOR_SUBPARTS (type);
2441 for (i = 0; i < units; i++)
2442 list = tree_cons (NULL_TREE, elem, list);
2443 return build_vector (type, list);
2446 /* Returns true, if ARG is convertible to TYPE using a NOP_EXPR. */
2449 fold_convertible_p (const_tree type, const_tree arg)
2451 tree orig = TREE_TYPE (arg);
2456 if (TREE_CODE (arg) == ERROR_MARK
2457 || TREE_CODE (type) == ERROR_MARK
2458 || TREE_CODE (orig) == ERROR_MARK)
2461 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
2464 switch (TREE_CODE (type))
2466 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
2467 case POINTER_TYPE: case REFERENCE_TYPE:
2469 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2470 || TREE_CODE (orig) == OFFSET_TYPE)
2472 return (TREE_CODE (orig) == VECTOR_TYPE
2473 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2476 case FIXED_POINT_TYPE:
2480 return TREE_CODE (type) == TREE_CODE (orig);
2487 /* Convert expression ARG to type TYPE. Used by the middle-end for
2488 simple conversions in preference to calling the front-end's convert. */
2491 fold_convert (tree type, tree arg)
2493 tree orig = TREE_TYPE (arg);
2499 if (TREE_CODE (arg) == ERROR_MARK
2500 || TREE_CODE (type) == ERROR_MARK
2501 || TREE_CODE (orig) == ERROR_MARK)
2502 return error_mark_node;
2504 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
2505 return fold_build1 (NOP_EXPR, type, arg);
2507 switch (TREE_CODE (type))
2509 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
2510 case POINTER_TYPE: case REFERENCE_TYPE:
2512 if (TREE_CODE (arg) == INTEGER_CST)
2514 tem = fold_convert_const (NOP_EXPR, type, arg);
2515 if (tem != NULL_TREE)
2518 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2519 || TREE_CODE (orig) == OFFSET_TYPE)
2520 return fold_build1 (NOP_EXPR, type, arg);
2521 if (TREE_CODE (orig) == COMPLEX_TYPE)
2523 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2524 return fold_convert (type, tem);
2526 gcc_assert (TREE_CODE (orig) == VECTOR_TYPE
2527 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2528 return fold_build1 (NOP_EXPR, type, arg);
2531 if (TREE_CODE (arg) == INTEGER_CST)
2533 tem = fold_convert_const (FLOAT_EXPR, type, arg);
2534 if (tem != NULL_TREE)
2537 else if (TREE_CODE (arg) == REAL_CST)
2539 tem = fold_convert_const (NOP_EXPR, type, arg);
2540 if (tem != NULL_TREE)
2543 else if (TREE_CODE (arg) == FIXED_CST)
2545 tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
2546 if (tem != NULL_TREE)
2550 switch (TREE_CODE (orig))
2553 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2554 case POINTER_TYPE: case REFERENCE_TYPE:
2555 return fold_build1 (FLOAT_EXPR, type, arg);
2558 return fold_build1 (NOP_EXPR, type, arg);
2560 case FIXED_POINT_TYPE:
2561 return fold_build1 (FIXED_CONVERT_EXPR, type, arg);
2564 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2565 return fold_convert (type, tem);
2571 case FIXED_POINT_TYPE:
2572 if (TREE_CODE (arg) == FIXED_CST || TREE_CODE (arg) == INTEGER_CST
2573 || TREE_CODE (arg) == REAL_CST)
2575 tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
2576 if (tem != NULL_TREE)
2580 switch (TREE_CODE (orig))
2582 case FIXED_POINT_TYPE:
2587 return fold_build1 (FIXED_CONVERT_EXPR, type, arg);
2590 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2591 return fold_convert (type, tem);
2598 switch (TREE_CODE (orig))
2601 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2602 case POINTER_TYPE: case REFERENCE_TYPE:
2604 case FIXED_POINT_TYPE:
2605 return build2 (COMPLEX_EXPR, type,
2606 fold_convert (TREE_TYPE (type), arg),
2607 fold_convert (TREE_TYPE (type), integer_zero_node));
2612 if (TREE_CODE (arg) == COMPLEX_EXPR)
2614 rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0));
2615 ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1));
2616 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2619 arg = save_expr (arg);
2620 rpart = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2621 ipart = fold_build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg);
2622 rpart = fold_convert (TREE_TYPE (type), rpart);
2623 ipart = fold_convert (TREE_TYPE (type), ipart);
2624 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2632 if (integer_zerop (arg))
2633 return build_zero_vector (type);
2634 gcc_assert (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2635 gcc_assert (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2636 || TREE_CODE (orig) == VECTOR_TYPE);
2637 return fold_build1 (VIEW_CONVERT_EXPR, type, arg);
2640 tem = fold_ignored_result (arg);
2641 if (TREE_CODE (tem) == GIMPLE_MODIFY_STMT)
2643 return fold_build1 (NOP_EXPR, type, tem);
2650 /* Return false if expr can be assumed not to be an lvalue, true
2654 maybe_lvalue_p (const_tree x)
2656 /* We only need to wrap lvalue tree codes. */
2657 switch (TREE_CODE (x))
2668 case ALIGN_INDIRECT_REF:
2669 case MISALIGNED_INDIRECT_REF:
2671 case ARRAY_RANGE_REF:
2677 case PREINCREMENT_EXPR:
2678 case PREDECREMENT_EXPR:
2680 case TRY_CATCH_EXPR:
2681 case WITH_CLEANUP_EXPR:
2684 case GIMPLE_MODIFY_STMT:
2693 /* Assume the worst for front-end tree codes. */
2694 if ((int)TREE_CODE (x) >= NUM_TREE_CODES)
2702 /* Return an expr equal to X but certainly not valid as an lvalue. */
2707 /* While we are in GIMPLE, NON_LVALUE_EXPR doesn't mean anything to
2712 if (! maybe_lvalue_p (x))
2714 return build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
2717 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
2718 Zero means allow extended lvalues. */
2720 int pedantic_lvalues;
2722 /* When pedantic, return an expr equal to X but certainly not valid as a
2723 pedantic lvalue. Otherwise, return X. */
2726 pedantic_non_lvalue (tree x)
2728 if (pedantic_lvalues)
2729 return non_lvalue (x);
2734 /* Given a tree comparison code, return the code that is the logical inverse
2735 of the given code. It is not safe to do this for floating-point
2736 comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode
2737 as well: if reversing the comparison is unsafe, return ERROR_MARK. */
2740 invert_tree_comparison (enum tree_code code, bool honor_nans)
2742 if (honor_nans && flag_trapping_math)
2752 return honor_nans ? UNLE_EXPR : LE_EXPR;
2754 return honor_nans ? UNLT_EXPR : LT_EXPR;
2756 return honor_nans ? UNGE_EXPR : GE_EXPR;
2758 return honor_nans ? UNGT_EXPR : GT_EXPR;
2772 return UNORDERED_EXPR;
2773 case UNORDERED_EXPR:
2774 return ORDERED_EXPR;
2780 /* Similar, but return the comparison that results if the operands are
2781 swapped. This is safe for floating-point. */
2784 swap_tree_comparison (enum tree_code code)
2791 case UNORDERED_EXPR:
2817 /* Convert a comparison tree code from an enum tree_code representation
2818 into a compcode bit-based encoding. This function is the inverse of
2819 compcode_to_comparison. */
2821 static enum comparison_code
2822 comparison_to_compcode (enum tree_code code)
2839 return COMPCODE_ORD;
2840 case UNORDERED_EXPR:
2841 return COMPCODE_UNORD;
2843 return COMPCODE_UNLT;
2845 return COMPCODE_UNEQ;
2847 return COMPCODE_UNLE;
2849 return COMPCODE_UNGT;
2851 return COMPCODE_LTGT;
2853 return COMPCODE_UNGE;
2859 /* Convert a compcode bit-based encoding of a comparison operator back
2860 to GCC's enum tree_code representation. This function is the
2861 inverse of comparison_to_compcode. */
2863 static enum tree_code
2864 compcode_to_comparison (enum comparison_code code)
2881 return ORDERED_EXPR;
2882 case COMPCODE_UNORD:
2883 return UNORDERED_EXPR;
2901 /* Return a tree for the comparison which is the combination of
2902 doing the AND or OR (depending on CODE) of the two operations LCODE
2903 and RCODE on the identical operands LL_ARG and LR_ARG. Take into account
2904 the possibility of trapping if the mode has NaNs, and return NULL_TREE
2905 if this makes the transformation invalid. */
2908 combine_comparisons (enum tree_code code, enum tree_code lcode,
2909 enum tree_code rcode, tree truth_type,
2910 tree ll_arg, tree lr_arg)
2912 bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg)));
2913 enum comparison_code lcompcode = comparison_to_compcode (lcode);
2914 enum comparison_code rcompcode = comparison_to_compcode (rcode);
2915 enum comparison_code compcode;
2919 case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR:
2920 compcode = lcompcode & rcompcode;
2923 case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR:
2924 compcode = lcompcode | rcompcode;
2933 /* Eliminate unordered comparisons, as well as LTGT and ORD
2934 which are not used unless the mode has NaNs. */
2935 compcode &= ~COMPCODE_UNORD;
2936 if (compcode == COMPCODE_LTGT)
2937 compcode = COMPCODE_NE;
2938 else if (compcode == COMPCODE_ORD)
2939 compcode = COMPCODE_TRUE;
2941 else if (flag_trapping_math)
2943 /* Check that the original operation and the optimized ones will trap
2944 under the same condition. */
2945 bool ltrap = (lcompcode & COMPCODE_UNORD) == 0
2946 && (lcompcode != COMPCODE_EQ)
2947 && (lcompcode != COMPCODE_ORD);
2948 bool rtrap = (rcompcode & COMPCODE_UNORD) == 0
2949 && (rcompcode != COMPCODE_EQ)
2950 && (rcompcode != COMPCODE_ORD);
2951 bool trap = (compcode & COMPCODE_UNORD) == 0
2952 && (compcode != COMPCODE_EQ)
2953 && (compcode != COMPCODE_ORD);
2955 /* In a short-circuited boolean expression the LHS might be
2956 such that the RHS, if evaluated, will never trap. For
2957 example, in ORD (x, y) && (x < y), we evaluate the RHS only
2958 if neither x nor y is NaN. (This is a mixed blessing: for
2959 example, the expression above will never trap, hence
2960 optimizing it to x < y would be invalid). */
2961 if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD))
2962 || (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD)))
2965 /* If the comparison was short-circuited, and only the RHS
2966 trapped, we may now generate a spurious trap. */
2968 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2971 /* If we changed the conditions that cause a trap, we lose. */
2972 if ((ltrap || rtrap) != trap)
2976 if (compcode == COMPCODE_TRUE)
2977 return constant_boolean_node (true, truth_type);
2978 else if (compcode == COMPCODE_FALSE)
2979 return constant_boolean_node (false, truth_type);
2981 return fold_build2 (compcode_to_comparison (compcode),
2982 truth_type, ll_arg, lr_arg);
2985 /* Return nonzero if CODE is a tree code that represents a truth value. */
2988 truth_value_p (enum tree_code code)
2990 return (TREE_CODE_CLASS (code) == tcc_comparison
2991 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
2992 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
2993 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
2996 /* Return nonzero if two operands (typically of the same tree node)
2997 are necessarily equal. If either argument has side-effects this
2998 function returns zero. FLAGS modifies behavior as follows:
3000 If OEP_ONLY_CONST is set, only return nonzero for constants.
3001 This function tests whether the operands are indistinguishable;
3002 it does not test whether they are equal using C's == operation.
3003 The distinction is important for IEEE floating point, because
3004 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
3005 (2) two NaNs may be indistinguishable, but NaN!=NaN.
3007 If OEP_ONLY_CONST is unset, a VAR_DECL is considered equal to itself
3008 even though it may hold multiple values during a function.
3009 This is because a GCC tree node guarantees that nothing else is
3010 executed between the evaluation of its "operands" (which may often
3011 be evaluated in arbitrary order). Hence if the operands themselves
3012 don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the
3013 same value in each operand/subexpression. Hence leaving OEP_ONLY_CONST
3014 unset means assuming isochronic (or instantaneous) tree equivalence.
3015 Unless comparing arbitrary expression trees, such as from different
3016 statements, this flag can usually be left unset.
3018 If OEP_PURE_SAME is set, then pure functions with identical arguments
3019 are considered the same. It is used when the caller has other ways
3020 to ensure that global memory is unchanged in between. */
3023 operand_equal_p (const_tree arg0, const_tree arg1, unsigned int flags)
3025 /* If either is ERROR_MARK, they aren't equal. */
3026 if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK)
3029 /* Check equality of integer constants before bailing out due to
3030 precision differences. */
3031 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
3032 return tree_int_cst_equal (arg0, arg1);
3034 /* If both types don't have the same signedness, then we can't consider
3035 them equal. We must check this before the STRIP_NOPS calls
3036 because they may change the signedness of the arguments. */
3037 if (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1)))
3040 /* If both types don't have the same precision, then it is not safe
3042 if (TYPE_PRECISION (TREE_TYPE (arg0)) != TYPE_PRECISION (TREE_TYPE (arg1)))
3048 /* In case both args are comparisons but with different comparison
3049 code, try to swap the comparison operands of one arg to produce
3050 a match and compare that variant. */
3051 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3052 && COMPARISON_CLASS_P (arg0)
3053 && COMPARISON_CLASS_P (arg1))
3055 enum tree_code swap_code = swap_tree_comparison (TREE_CODE (arg1));
3057 if (TREE_CODE (arg0) == swap_code)
3058 return operand_equal_p (TREE_OPERAND (arg0, 0),
3059 TREE_OPERAND (arg1, 1), flags)
3060 && operand_equal_p (TREE_OPERAND (arg0, 1),
3061 TREE_OPERAND (arg1, 0), flags);
3064 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3065 /* This is needed for conversions and for COMPONENT_REF.
3066 Might as well play it safe and always test this. */
3067 || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
3068 || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
3069 || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
3072 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
3073 We don't care about side effects in that case because the SAVE_EXPR
3074 takes care of that for us. In all other cases, two expressions are
3075 equal if they have no side effects. If we have two identical
3076 expressions with side effects that should be treated the same due
3077 to the only side effects being identical SAVE_EXPR's, that will
3078 be detected in the recursive calls below. */
3079 if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST)
3080 && (TREE_CODE (arg0) == SAVE_EXPR
3081 || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
3084 /* Next handle constant cases, those for which we can return 1 even
3085 if ONLY_CONST is set. */
3086 if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
3087 switch (TREE_CODE (arg0))
3090 return tree_int_cst_equal (arg0, arg1);
3093 return FIXED_VALUES_IDENTICAL (TREE_FIXED_CST (arg0),
3094 TREE_FIXED_CST (arg1));
3097 if (REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
3098 TREE_REAL_CST (arg1)))
3102 if (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))))
3104 /* If we do not distinguish between signed and unsigned zero,
3105 consider them equal. */
3106 if (real_zerop (arg0) && real_zerop (arg1))
3115 v1 = TREE_VECTOR_CST_ELTS (arg0);
3116 v2 = TREE_VECTOR_CST_ELTS (arg1);
3119 if (!operand_equal_p (TREE_VALUE (v1), TREE_VALUE (v2),
3122 v1 = TREE_CHAIN (v1);
3123 v2 = TREE_CHAIN (v2);
3130 return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
3132 && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
3136 return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
3137 && ! memcmp (TREE_STRING_POINTER (arg0),
3138 TREE_STRING_POINTER (arg1),
3139 TREE_STRING_LENGTH (arg0)));
3142 return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
3148 if (flags & OEP_ONLY_CONST)
3151 /* Define macros to test an operand from arg0 and arg1 for equality and a
3152 variant that allows null and views null as being different from any
3153 non-null value. In the latter case, if either is null, the both
3154 must be; otherwise, do the normal comparison. */
3155 #define OP_SAME(N) operand_equal_p (TREE_OPERAND (arg0, N), \
3156 TREE_OPERAND (arg1, N), flags)
3158 #define OP_SAME_WITH_NULL(N) \
3159 ((!TREE_OPERAND (arg0, N) || !TREE_OPERAND (arg1, N)) \
3160 ? TREE_OPERAND (arg0, N) == TREE_OPERAND (arg1, N) : OP_SAME (N))
3162 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
3165 /* Two conversions are equal only if signedness and modes match. */
3166 switch (TREE_CODE (arg0))
3170 case FIX_TRUNC_EXPR:
3171 if (TYPE_UNSIGNED (TREE_TYPE (arg0))
3172 != TYPE_UNSIGNED (TREE_TYPE (arg1)))
3182 case tcc_comparison:
3184 if (OP_SAME (0) && OP_SAME (1))
3187 /* For commutative ops, allow the other order. */
3188 return (commutative_tree_code (TREE_CODE (arg0))
3189 && operand_equal_p (TREE_OPERAND (arg0, 0),
3190 TREE_OPERAND (arg1, 1), flags)
3191 && operand_equal_p (TREE_OPERAND (arg0, 1),
3192 TREE_OPERAND (arg1, 0), flags));
3195 /* If either of the pointer (or reference) expressions we are
3196 dereferencing contain a side effect, these cannot be equal. */
3197 if (TREE_SIDE_EFFECTS (arg0)
3198 || TREE_SIDE_EFFECTS (arg1))
3201 switch (TREE_CODE (arg0))
3204 case ALIGN_INDIRECT_REF:
3205 case MISALIGNED_INDIRECT_REF:
3211 case ARRAY_RANGE_REF:
3212 /* Operands 2 and 3 may be null.
3213 Compare the array index by value if it is constant first as we
3214 may have different types but same value here. */
3216 && (tree_int_cst_equal (TREE_OPERAND (arg0, 1),
3217 TREE_OPERAND (arg1, 1))
3219 && OP_SAME_WITH_NULL (2)
3220 && OP_SAME_WITH_NULL (3));
3223 /* Handle operand 2 the same as for ARRAY_REF. Operand 0
3224 may be NULL when we're called to compare MEM_EXPRs. */
3225 return OP_SAME_WITH_NULL (0)
3227 && OP_SAME_WITH_NULL (2);
3230 return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
3236 case tcc_expression:
3237 switch (TREE_CODE (arg0))
3240 case TRUTH_NOT_EXPR:
3243 case TRUTH_ANDIF_EXPR:
3244 case TRUTH_ORIF_EXPR:
3245 return OP_SAME (0) && OP_SAME (1);
3247 case TRUTH_AND_EXPR:
3249 case TRUTH_XOR_EXPR:
3250 if (OP_SAME (0) && OP_SAME (1))
3253 /* Otherwise take into account this is a commutative operation. */
3254 return (operand_equal_p (TREE_OPERAND (arg0, 0),
3255 TREE_OPERAND (arg1, 1), flags)
3256 && operand_equal_p (TREE_OPERAND (arg0, 1),
3257 TREE_OPERAND (arg1, 0), flags));
3264 switch (TREE_CODE (arg0))
3267 /* If the CALL_EXPRs call different functions, then they
3268 clearly can not be equal. */
3269 if (! operand_equal_p (CALL_EXPR_FN (arg0), CALL_EXPR_FN (arg1),
3274 unsigned int cef = call_expr_flags (arg0);
3275 if (flags & OEP_PURE_SAME)
3276 cef &= ECF_CONST | ECF_PURE;
3283 /* Now see if all the arguments are the same. */
3285 const_call_expr_arg_iterator iter0, iter1;
3287 for (a0 = first_const_call_expr_arg (arg0, &iter0),
3288 a1 = first_const_call_expr_arg (arg1, &iter1);
3290 a0 = next_const_call_expr_arg (&iter0),
3291 a1 = next_const_call_expr_arg (&iter1))
3292 if (! operand_equal_p (a0, a1, flags))
3295 /* If we get here and both argument lists are exhausted
3296 then the CALL_EXPRs are equal. */
3297 return ! (a0 || a1);
3303 case tcc_declaration:
3304 /* Consider __builtin_sqrt equal to sqrt. */
3305 return (TREE_CODE (arg0) == FUNCTION_DECL
3306 && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1)
3307 && DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1)
3308 && DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1));
3315 #undef OP_SAME_WITH_NULL
3318 /* Similar to operand_equal_p, but see if ARG0 might have been made by
3319 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
3321 When in doubt, return 0. */
3324 operand_equal_for_comparison_p (tree arg0, tree arg1, tree other)
3326 int unsignedp1, unsignedpo;
3327 tree primarg0, primarg1, primother;
3328 unsigned int correct_width;
3330 if (operand_equal_p (arg0, arg1, 0))
3333 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
3334 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
3337 /* Discard any conversions that don't change the modes of ARG0 and ARG1
3338 and see if the inner values are the same. This removes any
3339 signedness comparison, which doesn't matter here. */
3340 primarg0 = arg0, primarg1 = arg1;
3341 STRIP_NOPS (primarg0);
3342 STRIP_NOPS (primarg1);
3343 if (operand_equal_p (primarg0, primarg1, 0))
3346 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
3347 actual comparison operand, ARG0.
3349 First throw away any conversions to wider types
3350 already present in the operands. */
3352 primarg1 = get_narrower (arg1, &unsignedp1);
3353 primother = get_narrower (other, &unsignedpo);
3355 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
3356 if (unsignedp1 == unsignedpo
3357 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
3358 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
3360 tree type = TREE_TYPE (arg0);
3362 /* Make sure shorter operand is extended the right way
3363 to match the longer operand. */
3364 primarg1 = fold_convert (signed_or_unsigned_type_for
3365 (unsignedp1, TREE_TYPE (primarg1)), primarg1);
3367 if (operand_equal_p (arg0, fold_convert (type, primarg1), 0))
3374 /* See if ARG is an expression that is either a comparison or is performing
3375 arithmetic on comparisons. The comparisons must only be comparing
3376 two different values, which will be stored in *CVAL1 and *CVAL2; if
3377 they are nonzero it means that some operands have already been found.
3378 No variables may be used anywhere else in the expression except in the
3379 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
3380 the expression and save_expr needs to be called with CVAL1 and CVAL2.
3382 If this is true, return 1. Otherwise, return zero. */
3385 twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p)
3387 enum tree_code code = TREE_CODE (arg);
3388 enum tree_code_class class = TREE_CODE_CLASS (code);
3390 /* We can handle some of the tcc_expression cases here. */
3391 if (class == tcc_expression && code == TRUTH_NOT_EXPR)
3393 else if (class == tcc_expression
3394 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
3395 || code == COMPOUND_EXPR))
3398 else if (class == tcc_expression && code == SAVE_EXPR
3399 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
3401 /* If we've already found a CVAL1 or CVAL2, this expression is
3402 two complex to handle. */
3403 if (*cval1 || *cval2)
3413 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
3416 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
3417 && twoval_comparison_p (TREE_OPERAND (arg, 1),
3418 cval1, cval2, save_p));
3423 case tcc_expression:
3424 if (code == COND_EXPR)
3425 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
3426 cval1, cval2, save_p)
3427 && twoval_comparison_p (TREE_OPERAND (arg, 1),
3428 cval1, cval2, save_p)
3429 && twoval_comparison_p (TREE_OPERAND (arg, 2),
3430 cval1, cval2, save_p));
3433 case tcc_comparison:
3434 /* First see if we can handle the first operand, then the second. For
3435 the second operand, we know *CVAL1 can't be zero. It must be that
3436 one side of the comparison is each of the values; test for the
3437 case where this isn't true by failing if the two operands
3440 if (operand_equal_p (TREE_OPERAND (arg, 0),
3441 TREE_OPERAND (arg, 1), 0))
3445 *cval1 = TREE_OPERAND (arg, 0);
3446 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
3448 else if (*cval2 == 0)
3449 *cval2 = TREE_OPERAND (arg, 0);
3450 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
3455 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
3457 else if (*cval2 == 0)
3458 *cval2 = TREE_OPERAND (arg, 1);
3459 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
3471 /* ARG is a tree that is known to contain just arithmetic operations and
3472 comparisons. Evaluate the operations in the tree substituting NEW0 for
3473 any occurrence of OLD0 as an operand of a comparison and likewise for
3477 eval_subst (tree arg, tree old0, tree new0, tree old1, tree new1)
3479 tree type = TREE_TYPE (arg);
3480 enum tree_code code = TREE_CODE (arg);
3481 enum tree_code_class class = TREE_CODE_CLASS (code);
3483 /* We can handle some of the tcc_expression cases here. */
3484 if (class == tcc_expression && code == TRUTH_NOT_EXPR)
3486 else if (class == tcc_expression
3487 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
3493 return fold_build1 (code, type,
3494 eval_subst (TREE_OPERAND (arg, 0),
3495 old0, new0, old1, new1));
3498 return fold_build2 (code, type,
3499 eval_subst (TREE_OPERAND (arg, 0),
3500 old0, new0, old1, new1),
3501 eval_subst (TREE_OPERAND (arg, 1),
3502 old0, new0, old1, new1));
3504 case tcc_expression:
3508 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
3511 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
3514 return fold_build3 (code, type,
3515 eval_subst (TREE_OPERAND (arg, 0),
3516 old0, new0, old1, new1),
3517 eval_subst (TREE_OPERAND (arg, 1),
3518 old0, new0, old1, new1),
3519 eval_subst (TREE_OPERAND (arg, 2),
3520 old0, new0, old1, new1));
3524 /* Fall through - ??? */
3526 case tcc_comparison:
3528 tree arg0 = TREE_OPERAND (arg, 0);
3529 tree arg1 = TREE_OPERAND (arg, 1);
3531 /* We need to check both for exact equality and tree equality. The
3532 former will be true if the operand has a side-effect. In that
3533 case, we know the operand occurred exactly once. */
3535 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
3537 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
3540 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
3542 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
3545 return fold_build2 (code, type, arg0, arg1);
3553 /* Return a tree for the case when the result of an expression is RESULT
3554 converted to TYPE and OMITTED was previously an operand of the expression
3555 but is now not needed (e.g., we folded OMITTED * 0).
3557 If OMITTED has side effects, we must evaluate it. Otherwise, just do
3558 the conversion of RESULT to TYPE. */
3561 omit_one_operand (tree type, tree result, tree omitted)
3563 tree t = fold_convert (type, result);
3565 /* If the resulting operand is an empty statement, just return the omitted
3566 statement casted to void. */
3567 if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
3568 return build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
3570 if (TREE_SIDE_EFFECTS (omitted))
3571 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
3573 return non_lvalue (t);
3576 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
3579 pedantic_omit_one_operand (tree type, tree result, tree omitted)
3581 tree t = fold_convert (type, result);
3583 /* If the resulting operand is an empty statement, just return the omitted
3584 statement casted to void. */
3585 if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
3586 return build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
3588 if (TREE_SIDE_EFFECTS (omitted))
3589 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
3591 return pedantic_non_lvalue (t);
3594 /* Return a tree for the case when the result of an expression is RESULT
3595 converted to TYPE and OMITTED1 and OMITTED2 were previously operands
3596 of the expression but are now not needed.
3598 If OMITTED1 or OMITTED2 has side effects, they must be evaluated.
3599 If both OMITTED1 and OMITTED2 have side effects, OMITTED1 is
3600 evaluated before OMITTED2. Otherwise, if neither has side effects,
3601 just do the conversion of RESULT to TYPE. */
3604 omit_two_operands (tree type, tree result, tree omitted1, tree omitted2)
3606 tree t = fold_convert (type, result);
3608 if (TREE_SIDE_EFFECTS (omitted2))
3609 t = build2 (COMPOUND_EXPR, type, omitted2, t);
3610 if (TREE_SIDE_EFFECTS (omitted1))
3611 t = build2 (COMPOUND_EXPR, type, omitted1, t);
3613 return TREE_CODE (t) != COMPOUND_EXPR ? non_lvalue (t) : t;
3617 /* Return a simplified tree node for the truth-negation of ARG. This
3618 never alters ARG itself. We assume that ARG is an operation that
3619 returns a truth value (0 or 1).
3621 FIXME: one would think we would fold the result, but it causes
3622 problems with the dominator optimizer. */
3625 fold_truth_not_expr (tree arg)
3627 tree type = TREE_TYPE (arg);
3628 enum tree_code code = TREE_CODE (arg);
3630 /* If this is a comparison, we can simply invert it, except for
3631 floating-point non-equality comparisons, in which case we just
3632 enclose a TRUTH_NOT_EXPR around what we have. */
3634 if (TREE_CODE_CLASS (code) == tcc_comparison)
3636 tree op_type = TREE_TYPE (TREE_OPERAND (arg, 0));
3637 if (FLOAT_TYPE_P (op_type)
3638 && flag_trapping_math
3639 && code != ORDERED_EXPR && code != UNORDERED_EXPR
3640 && code != NE_EXPR && code != EQ_EXPR)
3644 code = invert_tree_comparison (code,
3645 HONOR_NANS (TYPE_MODE (op_type)));
3646 if (code == ERROR_MARK)
3649 return build2 (code, type,
3650 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
3657 return constant_boolean_node (integer_zerop (arg), type);
3659 case TRUTH_AND_EXPR:
3660 return build2 (TRUTH_OR_EXPR, type,
3661 invert_truthvalue (TREE_OPERAND (arg, 0)),
3662 invert_truthvalue (TREE_OPERAND (arg, 1)));
3665 return build2 (TRUTH_AND_EXPR, type,
3666 invert_truthvalue (TREE_OPERAND (arg, 0)),
3667 invert_truthvalue (TREE_OPERAND (arg, 1)));
3669 case TRUTH_XOR_EXPR:
3670 /* Here we can invert either operand. We invert the first operand
3671 unless the second operand is a TRUTH_NOT_EXPR in which case our
3672 result is the XOR of the first operand with the inside of the
3673 negation of the second operand. */
3675 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
3676 return build2 (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
3677 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
3679 return build2 (TRUTH_XOR_EXPR, type,
3680 invert_truthvalue (TREE_OPERAND (arg, 0)),
3681 TREE_OPERAND (arg, 1));
3683 case TRUTH_ANDIF_EXPR:
3684 return build2 (TRUTH_ORIF_EXPR, type,
3685 invert_truthvalue (TREE_OPERAND (arg, 0)),
3686 invert_truthvalue (TREE_OPERAND (arg, 1)));
3688 case TRUTH_ORIF_EXPR:
3689 return build2 (TRUTH_ANDIF_EXPR, type,
3690 invert_truthvalue (TREE_OPERAND (arg, 0)),
3691 invert_truthvalue (TREE_OPERAND (arg, 1)));
3693 case TRUTH_NOT_EXPR:
3694 return TREE_OPERAND (arg, 0);
3698 tree arg1 = TREE_OPERAND (arg, 1);
3699 tree arg2 = TREE_OPERAND (arg, 2);
3700 /* A COND_EXPR may have a throw as one operand, which
3701 then has void type. Just leave void operands
3703 return build3 (COND_EXPR, type, TREE_OPERAND (arg, 0),
3704 VOID_TYPE_P (TREE_TYPE (arg1))
3705 ? arg1 : invert_truthvalue (arg1),
3706 VOID_TYPE_P (TREE_TYPE (arg2))
3707 ? arg2 : invert_truthvalue (arg2));
3711 return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
3712 invert_truthvalue (TREE_OPERAND (arg, 1)));
3714 case NON_LVALUE_EXPR:
3715 return invert_truthvalue (TREE_OPERAND (arg, 0));
3718 if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
3719 return build1 (TRUTH_NOT_EXPR, type, arg);
3723 return build1 (TREE_CODE (arg), type,
3724 invert_truthvalue (TREE_OPERAND (arg, 0)));
3727 if (!integer_onep (TREE_OPERAND (arg, 1)))
3729 return build2 (EQ_EXPR, type, arg,
3730 build_int_cst (type, 0));
3733 return build1 (TRUTH_NOT_EXPR, type, arg);
3735 case CLEANUP_POINT_EXPR:
3736 return build1 (CLEANUP_POINT_EXPR, type,
3737 invert_truthvalue (TREE_OPERAND (arg, 0)));
3746 /* Return a simplified tree node for the truth-negation of ARG. This
3747 never alters ARG itself. We assume that ARG is an operation that
3748 returns a truth value (0 or 1).
3750 FIXME: one would think we would fold the result, but it causes
3751 problems with the dominator optimizer. */
3754 invert_truthvalue (tree arg)
3758 if (TREE_CODE (arg) == ERROR_MARK)
3761 tem = fold_truth_not_expr (arg);
3763 tem = build1 (TRUTH_NOT_EXPR, TREE_TYPE (arg), arg);
3768 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
3769 operands are another bit-wise operation with a common input. If so,
3770 distribute the bit operations to save an operation and possibly two if
3771 constants are involved. For example, convert
3772 (A | B) & (A | C) into A | (B & C)
3773 Further simplification will occur if B and C are constants.
3775 If this optimization cannot be done, 0 will be returned. */
3778 distribute_bit_expr (enum tree_code code, tree type, tree arg0, tree arg1)
3783 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3784 || TREE_CODE (arg0) == code
3785 || (TREE_CODE (arg0) != BIT_AND_EXPR
3786 && TREE_CODE (arg0) != BIT_IOR_EXPR))
3789 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
3791 common = TREE_OPERAND (arg0, 0);
3792 left = TREE_OPERAND (arg0, 1);
3793 right = TREE_OPERAND (arg1, 1);
3795 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
3797 common = TREE_OPERAND (arg0, 0);
3798 left = TREE_OPERAND (arg0, 1);
3799 right = TREE_OPERAND (arg1, 0);
3801 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
3803 common = TREE_OPERAND (arg0, 1);
3804 left = TREE_OPERAND (arg0, 0);
3805 right = TREE_OPERAND (arg1, 1);
3807 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
3809 common = TREE_OPERAND (arg0, 1);
3810 left = TREE_OPERAND (arg0, 0);
3811 right = TREE_OPERAND (arg1, 0);
3816 return fold_build2 (TREE_CODE (arg0), type, common,
3817 fold_build2 (code, type, left, right));
3820 /* Knowing that ARG0 and ARG1 are both RDIV_EXPRs, simplify a binary operation
3821 with code CODE. This optimization is unsafe. */
3823 distribute_real_division (enum tree_code code, tree type, tree arg0, tree arg1)
3825 bool mul0 = TREE_CODE (arg0) == MULT_EXPR;
3826 bool mul1 = TREE_CODE (arg1) == MULT_EXPR;
3828 /* (A / C) +- (B / C) -> (A +- B) / C. */
3830 && operand_equal_p (TREE_OPERAND (arg0, 1),
3831 TREE_OPERAND (arg1, 1), 0))
3832 return fold_build2 (mul0 ? MULT_EXPR : RDIV_EXPR, type,
3833 fold_build2 (code, type,
3834 TREE_OPERAND (arg0, 0),
3835 TREE_OPERAND (arg1, 0)),
3836 TREE_OPERAND (arg0, 1));
3838 /* (A / C1) +- (A / C2) -> A * (1 / C1 +- 1 / C2). */
3839 if (operand_equal_p (TREE_OPERAND (arg0, 0),
3840 TREE_OPERAND (arg1, 0), 0)
3841 && TREE_CODE (TREE_OPERAND (arg0, 1)) == REAL_CST
3842 && TREE_CODE (TREE_OPERAND (arg1, 1)) == REAL_CST)
3844 REAL_VALUE_TYPE r0, r1;
3845 r0 = TREE_REAL_CST (TREE_OPERAND (arg0, 1));
3846 r1 = TREE_REAL_CST (TREE_OPERAND (arg1, 1));
3848 real_arithmetic (&r0, RDIV_EXPR, &dconst1, &r0);
3850 real_arithmetic (&r1, RDIV_EXPR, &dconst1, &r1);
3851 real_arithmetic (&r0, code, &r0, &r1);
3852 return fold_build2 (MULT_EXPR, type,
3853 TREE_OPERAND (arg0, 0),
3854 build_real (type, r0));
3860 /* Subroutine for fold_truthop: decode a field reference.
3862 If EXP is a comparison reference, we return the innermost reference.
3864 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
3865 set to the starting bit number.
3867 If the innermost field can be completely contained in a mode-sized
3868 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
3870 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
3871 otherwise it is not changed.
3873 *PUNSIGNEDP is set to the signedness of the field.
3875 *PMASK is set to the mask used. This is either contained in a
3876 BIT_AND_EXPR or derived from the width of the field.
3878 *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.
3880 Return 0 if this is not a component reference or is one that we can't
3881 do anything with. */
3884 decode_field_reference (tree exp, HOST_WIDE_INT *pbitsize,
3885 HOST_WIDE_INT *pbitpos, enum machine_mode *pmode,
3886 int *punsignedp, int *pvolatilep,
3887 tree *pmask, tree *pand_mask)
3889 tree outer_type = 0;
3891 tree mask, inner, offset;
3893 unsigned int precision;
3895 /* All the optimizations using this function assume integer fields.
3896 There are problems with FP fields since the type_for_size call
3897 below can fail for, e.g., XFmode. */
3898 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
3901 /* We are interested in the bare arrangement of bits, so strip everything
3902 that doesn't affect the machine mode. However, record the type of the
3903 outermost expression if it may matter below. */
3904 if (TREE_CODE (exp) == NOP_EXPR
3905 || TREE_CODE (exp) == CONVERT_EXPR
3906 || TREE_CODE (exp) == NON_LVALUE_EXPR)
3907 outer_type = TREE_TYPE (exp);
3910 if (TREE_CODE (exp) == BIT_AND_EXPR)
3912 and_mask = TREE_OPERAND (exp, 1);
3913 exp = TREE_OPERAND (exp, 0);
3914 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
3915 if (TREE_CODE (and_mask) != INTEGER_CST)
3919 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
3920 punsignedp, pvolatilep, false);
3921 if ((inner == exp && and_mask == 0)
3922 || *pbitsize < 0 || offset != 0
3923 || TREE_CODE (inner) == PLACEHOLDER_EXPR)
3926 /* If the number of bits in the reference is the same as the bitsize of
3927 the outer type, then the outer type gives the signedness. Otherwise
3928 (in case of a small bitfield) the signedness is unchanged. */
3929 if (outer_type && *pbitsize == TYPE_PRECISION (outer_type))
3930 *punsignedp = TYPE_UNSIGNED (outer_type);
3932 /* Compute the mask to access the bitfield. */
3933 unsigned_type = lang_hooks.types.type_for_size (*pbitsize, 1);
3934 precision = TYPE_PRECISION (unsigned_type);
3936 mask = build_int_cst_type (unsigned_type, -1);
3938 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
3939 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
3941 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
3943 mask = fold_build2 (BIT_AND_EXPR, unsigned_type,
3944 fold_convert (unsigned_type, and_mask), mask);
3947 *pand_mask = and_mask;
3951 /* Subroutine for fold: determine if VAL is the INTEGER_CONST that
3952 represents the sign bit of EXP's type. If EXP represents a sign
3953 or zero extension, also test VAL against the unextended type.
3954 The return value is the (sub)expression whose sign bit is VAL,
3955 or NULL_TREE otherwise. */
3958 sign_bit_p (tree exp, const_tree val)
3960 unsigned HOST_WIDE_INT mask_lo, lo;
3961 HOST_WIDE_INT mask_hi, hi;
3965 /* Tree EXP must have an integral type. */
3966 t = TREE_TYPE (exp);
3967 if (! INTEGRAL_TYPE_P (t))
3970 /* Tree VAL must be an integer constant. */
3971 if (TREE_CODE (val) != INTEGER_CST
3972 || TREE_OVERFLOW (val))
3975 width = TYPE_PRECISION (t);
3976 if (width > HOST_BITS_PER_WIDE_INT)
3978 hi = (unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT - 1);
3981 mask_hi = ((unsigned HOST_WIDE_INT) -1
3982 >> (2 * HOST_BITS_PER_WIDE_INT - width));
3988 lo = (unsigned HOST_WIDE_INT) 1 << (width - 1);
3991 mask_lo = ((unsigned HOST_WIDE_INT) -1
3992 >> (HOST_BITS_PER_WIDE_INT - width));
3995 /* We mask off those bits beyond TREE_TYPE (exp) so that we can
3996 treat VAL as if it were unsigned. */
3997 if ((TREE_INT_CST_HIGH (val) & mask_hi) == hi
3998 && (TREE_INT_CST_LOW (val) & mask_lo) == lo)
4001 /* Handle extension from a narrower type. */
4002 if (TREE_CODE (exp) == NOP_EXPR
4003 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0))) < width)
4004 return sign_bit_p (TREE_OPERAND (exp, 0), val);
4009 /* Subroutine for fold_truthop: determine if an operand is simple enough
4010 to be evaluated unconditionally. */
4013 simple_operand_p (const_tree exp)
4015 /* Strip any conversions that don't change the machine mode. */
4018 return (CONSTANT_CLASS_P (exp)
4019 || TREE_CODE (exp) == SSA_NAME
4021 && ! TREE_ADDRESSABLE (exp)
4022 && ! TREE_THIS_VOLATILE (exp)
4023 && ! DECL_NONLOCAL (exp)
4024 /* Don't regard global variables as simple. They may be
4025 allocated in ways unknown to the compiler (shared memory,
4026 #pragma weak, etc). */
4027 && ! TREE_PUBLIC (exp)
4028 && ! DECL_EXTERNAL (exp)
4029 /* Loading a static variable is unduly expensive, but global
4030 registers aren't expensive. */
4031 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
4034 /* The following functions are subroutines to fold_range_test and allow it to
4035 try to change a logical combination of comparisons into a range test.
4038 X == 2 || X == 3 || X == 4 || X == 5
4042 (unsigned) (X - 2) <= 3
4044 We describe each set of comparisons as being either inside or outside
4045 a range, using a variable named like IN_P, and then describe the
4046 range with a lower and upper bound. If one of the bounds is omitted,
4047 it represents either the highest or lowest value of the type.
4049 In the comments below, we represent a range by two numbers in brackets
4050 preceded by a "+" to designate being inside that range, or a "-" to
4051 designate being outside that range, so the condition can be inverted by
4052 flipping the prefix. An omitted bound is represented by a "-". For
4053 example, "- [-, 10]" means being outside the range starting at the lowest
4054 possible value and ending at 10, in other words, being greater than 10.
4055 The range "+ [-, -]" is always true and hence the range "- [-, -]" is
4058 We set up things so that the missing bounds are handled in a consistent
4059 manner so neither a missing bound nor "true" and "false" need to be
4060 handled using a special case. */
4062 /* Return the result of applying CODE to ARG0 and ARG1, but handle the case
4063 of ARG0 and/or ARG1 being omitted, meaning an unlimited range. UPPER0_P
4064 and UPPER1_P are nonzero if the respective argument is an upper bound
4065 and zero for a lower. TYPE, if nonzero, is the type of the result; it
4066 must be specified for a comparison. ARG1 will be converted to ARG0's
4067 type if both are specified. */
4070 range_binop (enum tree_code code, tree type, tree arg0, int upper0_p,
4071 tree arg1, int upper1_p)
4077 /* If neither arg represents infinity, do the normal operation.
4078 Else, if not a comparison, return infinity. Else handle the special
4079 comparison rules. Note that most of the cases below won't occur, but
4080 are handled for consistency. */
4082 if (arg0 != 0 && arg1 != 0)
4084 tem = fold_build2 (code, type != 0 ? type : TREE_TYPE (arg0),
4085 arg0, fold_convert (TREE_TYPE (arg0), arg1));
4087 return TREE_CODE (tem) == INTEGER_CST ? tem : 0;
4090 if (TREE_CODE_CLASS (code) != tcc_comparison)
4093 /* Set SGN[01] to -1 if ARG[01] is a lower bound, 1 for upper, and 0
4094 for neither. In real maths, we cannot assume open ended ranges are
4095 the same. But, this is computer arithmetic, where numbers are finite.
4096 We can therefore make the transformation of any unbounded range with
4097 the value Z, Z being greater than any representable number. This permits
4098 us to treat unbounded ranges as equal. */
4099 sgn0 = arg0 != 0 ? 0 : (upper0_p ? 1 : -1);
4100 sgn1 = arg1 != 0 ? 0 : (upper1_p ? 1 : -1);
4104 result = sgn0 == sgn1;
4107 result = sgn0 != sgn1;
4110 result = sgn0 < sgn1;
4113 result = sgn0 <= sgn1;
4116 result = sgn0 > sgn1;
4119 result = sgn0 >= sgn1;
4125 return constant_boolean_node (result, type);
4128 /* Given EXP, a logical expression, set the range it is testing into