1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
30 /* The entry points in this file are fold, size_int_wide, size_binop
31 and force_fit_type_double.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'.
42 force_fit_type_double takes a constant, an overflowable flag and a
43 prior overflow indicator. It forces the value to fit the type and
46 Note: Since the folders get called on non-gimple code as well as
47 gimple code, we need to handle GIMPLE tuples as well as their
48 corresponding tree equivalents. */
52 #include "coretypes.h"
57 #include "fixed-value.h"
65 #include "langhooks.h"
68 /* Nonzero if we are folding constants inside an initializer; zero
70 int folding_initializer = 0;
72 /* The following constants represent a bit based encoding of GCC's
73 comparison operators. This encoding simplifies transformations
74 on relational comparison operators, such as AND and OR. */
75 enum comparison_code {
94 static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
95 static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
96 static bool negate_mathfn_p (enum built_in_function);
97 static bool negate_expr_p (tree);
98 static tree negate_expr (tree);
99 static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
100 static tree associate_trees (tree, tree, enum tree_code, tree);
101 static tree const_binop (enum tree_code, tree, tree, int);
102 static enum comparison_code comparison_to_compcode (enum tree_code);
103 static enum tree_code compcode_to_comparison (enum comparison_code);
104 static tree combine_comparisons (enum tree_code, enum tree_code,
105 enum tree_code, tree, tree, tree);
106 static int truth_value_p (enum tree_code);
107 static int operand_equal_for_comparison_p (tree, tree, tree);
108 static int twoval_comparison_p (tree, tree *, tree *, int *);
109 static tree eval_subst (tree, tree, tree, tree, tree);
110 static tree pedantic_omit_one_operand (tree, tree, tree);
111 static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
112 static tree make_bit_field_ref (tree, tree, int, int, int);
113 static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree);
114 static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
115 enum machine_mode *, int *, int *,
117 static int all_ones_mask_p (const_tree, int);
118 static tree sign_bit_p (tree, const_tree);
119 static int simple_operand_p (const_tree);
120 static tree range_binop (enum tree_code, tree, tree, int, tree, int);
121 static tree range_predecessor (tree);
122 static tree range_successor (tree);
123 static tree make_range (tree, int *, tree *, tree *, bool *);
124 static tree build_range_check (tree, tree, int, tree, tree);
125 static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree,
127 static tree fold_range_test (enum tree_code, tree, tree, tree);
128 static tree fold_cond_expr_with_comparison (tree, tree, tree, tree);
129 static tree unextend (tree, int, int, tree);
130 static tree fold_truthop (enum tree_code, tree, tree, tree);
131 static tree optimize_minmax_comparison (enum tree_code, tree, tree, tree);
132 static tree extract_muldiv (tree, tree, enum tree_code, tree, bool *);
133 static tree extract_muldiv_1 (tree, tree, enum tree_code, tree, bool *);
134 static tree fold_binary_op_with_conditional_arg (enum tree_code, tree,
137 static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
139 static tree fold_inf_compare (enum tree_code, tree, tree, tree);
140 static tree fold_div_compare (enum tree_code, tree, tree, tree);
141 static bool reorder_operands_p (const_tree, const_tree);
142 static tree fold_negate_const (tree, tree);
143 static tree fold_not_const (tree, tree);
144 static tree fold_relational_const (enum tree_code, tree, tree, tree);
147 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
148 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
149 and SUM1. Then this yields nonzero if overflow occurred during the
152 Overflow occurs if A and B have the same sign, but A and SUM differ in
153 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
155 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
157 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
158 We do that by representing the two-word integer in 4 words, with only
159 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
160 number. The value of the word is LOWPART + HIGHPART * BASE. */
163 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
164 #define HIGHPART(x) \
165 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
166 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
168 /* Unpack a two-word integer into 4 words.
169 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
170 WORDS points to the array of HOST_WIDE_INTs. */
173 encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
175 words[0] = LOWPART (low);
176 words[1] = HIGHPART (low);
177 words[2] = LOWPART (hi);
178 words[3] = HIGHPART (hi);
181 /* Pack an array of 4 words into a two-word integer.
182 WORDS points to the array of words.
183 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
186 decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
189 *low = words[0] + words[1] * BASE;
190 *hi = words[2] + words[3] * BASE;
193 /* Force the double-word integer L1, H1 to be within the range of the
194 integer type TYPE. Stores the properly truncated and sign-extended
195 double-word integer in *LV, *HV. Returns true if the operation
196 overflows, that is, argument and result are different. */
199 fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
200 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, const_tree type)
202 unsigned HOST_WIDE_INT low0 = l1;
203 HOST_WIDE_INT high0 = h1;
205 int sign_extended_type;
207 if (POINTER_TYPE_P (type)
208 || TREE_CODE (type) == OFFSET_TYPE)
211 prec = TYPE_PRECISION (type);
213 /* Size types *are* sign extended. */
214 sign_extended_type = (!TYPE_UNSIGNED (type)
215 || (TREE_CODE (type) == INTEGER_TYPE
216 && TYPE_IS_SIZETYPE (type)));
218 /* First clear all bits that are beyond the type's precision. */
219 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
221 else if (prec > HOST_BITS_PER_WIDE_INT)
222 h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
226 if (prec < HOST_BITS_PER_WIDE_INT)
227 l1 &= ~((HOST_WIDE_INT) (-1) << prec);
230 /* Then do sign extension if necessary. */
231 if (!sign_extended_type)
232 /* No sign extension */;
233 else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
234 /* Correct width already. */;
235 else if (prec > HOST_BITS_PER_WIDE_INT)
237 /* Sign extend top half? */
238 if (h1 & ((unsigned HOST_WIDE_INT)1
239 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
240 h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
242 else if (prec == HOST_BITS_PER_WIDE_INT)
244 if ((HOST_WIDE_INT)l1 < 0)
249 /* Sign extend bottom half? */
250 if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
253 l1 |= (HOST_WIDE_INT)(-1) << prec;
260 /* If the value didn't fit, signal overflow. */
261 return l1 != low0 || h1 != high0;
264 /* We force the double-int HIGH:LOW to the range of the type TYPE by
265 sign or zero extending it.
266 OVERFLOWABLE indicates if we are interested
267 in overflow of the value, when >0 we are only interested in signed
268 overflow, for <0 we are interested in any overflow. OVERFLOWED
269 indicates whether overflow has already occurred. CONST_OVERFLOWED
270 indicates whether constant overflow has already occurred. We force
271 T's value to be within range of T's type (by setting to 0 or 1 all
272 the bits outside the type's range). We set TREE_OVERFLOWED if,
273 OVERFLOWED is nonzero,
274 or OVERFLOWABLE is >0 and signed overflow occurs
275 or OVERFLOWABLE is <0 and any overflow occurs
276 We return a new tree node for the extended double-int. The node
277 is shared if no overflow flags are set. */
280 force_fit_type_double (tree type, unsigned HOST_WIDE_INT low,
281 HOST_WIDE_INT high, int overflowable,
284 int sign_extended_type;
287 /* Size types *are* sign extended. */
288 sign_extended_type = (!TYPE_UNSIGNED (type)
289 || (TREE_CODE (type) == INTEGER_TYPE
290 && TYPE_IS_SIZETYPE (type)));
292 overflow = fit_double_type (low, high, &low, &high, type);
294 /* If we need to set overflow flags, return a new unshared node. */
295 if (overflowed || overflow)
299 || (overflowable > 0 && sign_extended_type))
301 tree t = make_node (INTEGER_CST);
302 TREE_INT_CST_LOW (t) = low;
303 TREE_INT_CST_HIGH (t) = high;
304 TREE_TYPE (t) = type;
305 TREE_OVERFLOW (t) = 1;
310 /* Else build a shared node. */
311 return build_int_cst_wide (type, low, high);
314 /* Add two doubleword integers with doubleword result.
315 Return nonzero if the operation overflows according to UNSIGNED_P.
316 Each argument is given as two `HOST_WIDE_INT' pieces.
317 One argument is L1 and H1; the other, L2 and H2.
318 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
321 add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
322 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
323 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
326 unsigned HOST_WIDE_INT l;
330 h = h1 + h2 + (l < l1);
336 return (unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1;
338 return OVERFLOW_SUM_SIGN (h1, h2, h);
341 /* Negate a doubleword integer with doubleword result.
342 Return nonzero if the operation overflows, assuming it's signed.
343 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
344 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
347 neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
348 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
354 return (*hv & h1) < 0;
364 /* Multiply two doubleword integers with doubleword result.
365 Return nonzero if the operation overflows according to UNSIGNED_P.
366 Each argument is given as two `HOST_WIDE_INT' pieces.
367 One argument is L1 and H1; the other, L2 and H2.
368 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
371 mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
372 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
373 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
376 HOST_WIDE_INT arg1[4];
377 HOST_WIDE_INT arg2[4];
378 HOST_WIDE_INT prod[4 * 2];
379 unsigned HOST_WIDE_INT carry;
381 unsigned HOST_WIDE_INT toplow, neglow;
382 HOST_WIDE_INT tophigh, neghigh;
384 encode (arg1, l1, h1);
385 encode (arg2, l2, h2);
387 memset (prod, 0, sizeof prod);
389 for (i = 0; i < 4; i++)
392 for (j = 0; j < 4; j++)
395 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
396 carry += arg1[i] * arg2[j];
397 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
399 prod[k] = LOWPART (carry);
400 carry = HIGHPART (carry);
405 decode (prod, lv, hv);
406 decode (prod + 4, &toplow, &tophigh);
408 /* Unsigned overflow is immediate. */
410 return (toplow | tophigh) != 0;
412 /* Check for signed overflow by calculating the signed representation of the
413 top half of the result; it should agree with the low half's sign bit. */
416 neg_double (l2, h2, &neglow, &neghigh);
417 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
421 neg_double (l1, h1, &neglow, &neghigh);
422 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
424 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
427 /* Shift the doubleword integer in L1, H1 left by COUNT places
428 keeping only PREC bits of result.
429 Shift right if COUNT is negative.
430 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
431 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
434 lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
435 HOST_WIDE_INT count, unsigned int prec,
436 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
438 unsigned HOST_WIDE_INT signmask;
442 rshift_double (l1, h1, -count, prec, lv, hv, arith);
446 if (SHIFT_COUNT_TRUNCATED)
449 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
451 /* Shifting by the host word size is undefined according to the
452 ANSI standard, so we must handle this as a special case. */
456 else if (count >= HOST_BITS_PER_WIDE_INT)
458 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
463 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
464 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
468 /* Sign extend all bits that are beyond the precision. */
470 signmask = -((prec > HOST_BITS_PER_WIDE_INT
471 ? ((unsigned HOST_WIDE_INT) *hv
472 >> (prec - HOST_BITS_PER_WIDE_INT - 1))
473 : (*lv >> (prec - 1))) & 1);
475 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
477 else if (prec >= HOST_BITS_PER_WIDE_INT)
479 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
480 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
485 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
486 *lv |= signmask << prec;
490 /* Shift the doubleword integer in L1, H1 right by COUNT places
491 keeping only PREC bits of result. COUNT must be positive.
492 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
493 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
496 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
497 HOST_WIDE_INT count, unsigned int prec,
498 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
501 unsigned HOST_WIDE_INT signmask;
504 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
507 if (SHIFT_COUNT_TRUNCATED)
510 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
512 /* Shifting by the host word size is undefined according to the
513 ANSI standard, so we must handle this as a special case. */
517 else if (count >= HOST_BITS_PER_WIDE_INT)
520 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
524 *hv = (unsigned HOST_WIDE_INT) h1 >> count;
526 | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
529 /* Zero / sign extend all bits that are beyond the precision. */
531 if (count >= (HOST_WIDE_INT)prec)
536 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
538 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
540 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
541 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
546 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
547 *lv |= signmask << (prec - count);
551 /* Rotate the doubleword integer in L1, H1 left by COUNT places
552 keeping only PREC bits of result.
553 Rotate right if COUNT is negative.
554 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
557 lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
558 HOST_WIDE_INT count, unsigned int prec,
559 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
561 unsigned HOST_WIDE_INT s1l, s2l;
562 HOST_WIDE_INT s1h, s2h;
568 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
569 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
574 /* Rotate the doubleword integer in L1, H1 left by COUNT places
575 keeping only PREC bits of result. COUNT must be positive.
576 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
579 rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
580 HOST_WIDE_INT count, unsigned int prec,
581 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
583 unsigned HOST_WIDE_INT s1l, s2l;
584 HOST_WIDE_INT s1h, s2h;
590 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
591 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
596 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
597 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
598 CODE is a tree code for a kind of division, one of
599 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
601 It controls how the quotient is rounded to an integer.
602 Return nonzero if the operation overflows.
603 UNS nonzero says do unsigned division. */
606 div_and_round_double (enum tree_code code, int uns,
607 unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
608 HOST_WIDE_INT hnum_orig,
609 unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
610 HOST_WIDE_INT hden_orig,
611 unsigned HOST_WIDE_INT *lquo,
612 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
616 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
617 HOST_WIDE_INT den[4], quo[4];
619 unsigned HOST_WIDE_INT work;
620 unsigned HOST_WIDE_INT carry = 0;
621 unsigned HOST_WIDE_INT lnum = lnum_orig;
622 HOST_WIDE_INT hnum = hnum_orig;
623 unsigned HOST_WIDE_INT lden = lden_orig;
624 HOST_WIDE_INT hden = hden_orig;
627 if (hden == 0 && lden == 0)
628 overflow = 1, lden = 1;
630 /* Calculate quotient sign and convert operands to unsigned. */
636 /* (minimum integer) / (-1) is the only overflow case. */
637 if (neg_double (lnum, hnum, &lnum, &hnum)
638 && ((HOST_WIDE_INT) lden & hden) == -1)
644 neg_double (lden, hden, &lden, &hden);
648 if (hnum == 0 && hden == 0)
649 { /* single precision */
651 /* This unsigned division rounds toward zero. */
657 { /* trivial case: dividend < divisor */
658 /* hden != 0 already checked. */
665 memset (quo, 0, sizeof quo);
667 memset (num, 0, sizeof num); /* to zero 9th element */
668 memset (den, 0, sizeof den);
670 encode (num, lnum, hnum);
671 encode (den, lden, hden);
673 /* Special code for when the divisor < BASE. */
674 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
676 /* hnum != 0 already checked. */
677 for (i = 4 - 1; i >= 0; i--)
679 work = num[i] + carry * BASE;
680 quo[i] = work / lden;
686 /* Full double precision division,
687 with thanks to Don Knuth's "Seminumerical Algorithms". */
688 int num_hi_sig, den_hi_sig;
689 unsigned HOST_WIDE_INT quo_est, scale;
691 /* Find the highest nonzero divisor digit. */
692 for (i = 4 - 1;; i--)
699 /* Insure that the first digit of the divisor is at least BASE/2.
700 This is required by the quotient digit estimation algorithm. */
702 scale = BASE / (den[den_hi_sig] + 1);
704 { /* scale divisor and dividend */
706 for (i = 0; i <= 4 - 1; i++)
708 work = (num[i] * scale) + carry;
709 num[i] = LOWPART (work);
710 carry = HIGHPART (work);
715 for (i = 0; i <= 4 - 1; i++)
717 work = (den[i] * scale) + carry;
718 den[i] = LOWPART (work);
719 carry = HIGHPART (work);
720 if (den[i] != 0) den_hi_sig = i;
727 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
729 /* Guess the next quotient digit, quo_est, by dividing the first
730 two remaining dividend digits by the high order quotient digit.
731 quo_est is never low and is at most 2 high. */
732 unsigned HOST_WIDE_INT tmp;
734 num_hi_sig = i + den_hi_sig + 1;
735 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
736 if (num[num_hi_sig] != den[den_hi_sig])
737 quo_est = work / den[den_hi_sig];
741 /* Refine quo_est so it's usually correct, and at most one high. */
742 tmp = work - quo_est * den[den_hi_sig];
744 && (den[den_hi_sig - 1] * quo_est
745 > (tmp * BASE + num[num_hi_sig - 2])))
748 /* Try QUO_EST as the quotient digit, by multiplying the
749 divisor by QUO_EST and subtracting from the remaining dividend.
750 Keep in mind that QUO_EST is the I - 1st digit. */
753 for (j = 0; j <= den_hi_sig; j++)
755 work = quo_est * den[j] + carry;
756 carry = HIGHPART (work);
757 work = num[i + j] - LOWPART (work);
758 num[i + j] = LOWPART (work);
759 carry += HIGHPART (work) != 0;
762 /* If quo_est was high by one, then num[i] went negative and
763 we need to correct things. */
764 if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
767 carry = 0; /* add divisor back in */
768 for (j = 0; j <= den_hi_sig; j++)
770 work = num[i + j] + den[j] + carry;
771 carry = HIGHPART (work);
772 num[i + j] = LOWPART (work);
775 num [num_hi_sig] += carry;
778 /* Store the quotient digit. */
783 decode (quo, lquo, hquo);
786 /* If result is negative, make it so. */
788 neg_double (*lquo, *hquo, lquo, hquo);
790 /* Compute trial remainder: rem = num - (quo * den) */
791 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
792 neg_double (*lrem, *hrem, lrem, hrem);
793 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
798 case TRUNC_MOD_EXPR: /* round toward zero */
799 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
803 case FLOOR_MOD_EXPR: /* round toward negative infinity */
804 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
807 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
815 case CEIL_MOD_EXPR: /* round toward positive infinity */
816 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
818 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
826 case ROUND_MOD_EXPR: /* round to closest integer */
828 unsigned HOST_WIDE_INT labs_rem = *lrem;
829 HOST_WIDE_INT habs_rem = *hrem;
830 unsigned HOST_WIDE_INT labs_den = lden, ltwice;
831 HOST_WIDE_INT habs_den = hden, htwice;
833 /* Get absolute values. */
835 neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
837 neg_double (lden, hden, &labs_den, &habs_den);
839 /* If (2 * abs (lrem) >= abs (lden)) */
840 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
841 labs_rem, habs_rem, <wice, &htwice);
843 if (((unsigned HOST_WIDE_INT) habs_den
844 < (unsigned HOST_WIDE_INT) htwice)
845 || (((unsigned HOST_WIDE_INT) habs_den
846 == (unsigned HOST_WIDE_INT) htwice)
847 && (labs_den < ltwice)))
851 add_double (*lquo, *hquo,
852 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
855 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
867 /* Compute true remainder: rem = num - (quo * den) */
868 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
869 neg_double (*lrem, *hrem, lrem, hrem);
870 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
874 /* If ARG2 divides ARG1 with zero remainder, carries out the division
875 of type CODE and returns the quotient.
876 Otherwise returns NULL_TREE. */
879 div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
881 unsigned HOST_WIDE_INT int1l, int2l;
882 HOST_WIDE_INT int1h, int2h;
883 unsigned HOST_WIDE_INT quol, reml;
884 HOST_WIDE_INT quoh, remh;
885 tree type = TREE_TYPE (arg1);
886 int uns = TYPE_UNSIGNED (type);
888 int1l = TREE_INT_CST_LOW (arg1);
889 int1h = TREE_INT_CST_HIGH (arg1);
890 /* &obj[0] + -128 really should be compiled as &obj[-8] rather than
891 &obj[some_exotic_number]. */
892 if (POINTER_TYPE_P (type))
895 type = signed_type_for (type);
896 fit_double_type (int1l, int1h, &int1l, &int1h,
900 fit_double_type (int1l, int1h, &int1l, &int1h, type);
901 int2l = TREE_INT_CST_LOW (arg2);
902 int2h = TREE_INT_CST_HIGH (arg2);
904 div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
905 &quol, &quoh, &reml, &remh);
906 if (remh != 0 || reml != 0)
909 return build_int_cst_wide (type, quol, quoh);
912 /* This is nonzero if we should defer warnings about undefined
913 overflow. This facility exists because these warnings are a
914 special case. The code to estimate loop iterations does not want
915 to issue any warnings, since it works with expressions which do not
916 occur in user code. Various bits of cleanup code call fold(), but
917 only use the result if it has certain characteristics (e.g., is a
918 constant); that code only wants to issue a warning if the result is
921 static int fold_deferring_overflow_warnings;
923 /* If a warning about undefined overflow is deferred, this is the
924 warning. Note that this may cause us to turn two warnings into
925 one, but that is fine since it is sufficient to only give one
926 warning per expression. */
928 static const char* fold_deferred_overflow_warning;
930 /* If a warning about undefined overflow is deferred, this is the
931 level at which the warning should be emitted. */
933 static enum warn_strict_overflow_code fold_deferred_overflow_code;
935 /* Start deferring overflow warnings. We could use a stack here to
936 permit nested calls, but at present it is not necessary. */
939 fold_defer_overflow_warnings (void)
941 ++fold_deferring_overflow_warnings;
944 /* Stop deferring overflow warnings. If there is a pending warning,
945 and ISSUE is true, then issue the warning if appropriate. STMT is
946 the statement with which the warning should be associated (used for
947 location information); STMT may be NULL. CODE is the level of the
948 warning--a warn_strict_overflow_code value. This function will use
949 the smaller of CODE and the deferred code when deciding whether to
950 issue the warning. CODE may be zero to mean to always use the
954 fold_undefer_overflow_warnings (bool issue, const_tree stmt, int code)
959 gcc_assert (fold_deferring_overflow_warnings > 0);
960 --fold_deferring_overflow_warnings;
961 if (fold_deferring_overflow_warnings > 0)
963 if (fold_deferred_overflow_warning != NULL
965 && code < (int) fold_deferred_overflow_code)
966 fold_deferred_overflow_code = code;
970 warnmsg = fold_deferred_overflow_warning;
971 fold_deferred_overflow_warning = NULL;
973 if (!issue || warnmsg == NULL)
976 if (stmt != NULL_TREE && TREE_NO_WARNING (stmt))
979 /* Use the smallest code level when deciding to issue the
981 if (code == 0 || code > (int) fold_deferred_overflow_code)
982 code = fold_deferred_overflow_code;
984 if (!issue_strict_overflow_warning (code))
987 if (stmt == NULL_TREE || !expr_has_location (stmt))
988 locus = input_location;
990 locus = expr_location (stmt);
991 warning (OPT_Wstrict_overflow, "%H%s", &locus, warnmsg);
994 /* Stop deferring overflow warnings, ignoring any deferred
998 fold_undefer_and_ignore_overflow_warnings (void)
1000 fold_undefer_overflow_warnings (false, NULL_TREE, 0);
1003 /* Whether we are deferring overflow warnings. */
1006 fold_deferring_overflow_warnings_p (void)
1008 return fold_deferring_overflow_warnings > 0;
1011 /* This is called when we fold something based on the fact that signed
1012 overflow is undefined. */
1015 fold_overflow_warning (const char* gmsgid, enum warn_strict_overflow_code wc)
1017 gcc_assert (!flag_wrapv && !flag_trapv);
1018 if (fold_deferring_overflow_warnings > 0)
1020 if (fold_deferred_overflow_warning == NULL
1021 || wc < fold_deferred_overflow_code)
1023 fold_deferred_overflow_warning = gmsgid;
1024 fold_deferred_overflow_code = wc;
1027 else if (issue_strict_overflow_warning (wc))
1028 warning (OPT_Wstrict_overflow, gmsgid);
1031 /* Return true if the built-in mathematical function specified by CODE
1032 is odd, i.e. -f(x) == f(-x). */
1035 negate_mathfn_p (enum built_in_function code)
1039 CASE_FLT_FN (BUILT_IN_ASIN):
1040 CASE_FLT_FN (BUILT_IN_ASINH):
1041 CASE_FLT_FN (BUILT_IN_ATAN):
1042 CASE_FLT_FN (BUILT_IN_ATANH):
1043 CASE_FLT_FN (BUILT_IN_CASIN):
1044 CASE_FLT_FN (BUILT_IN_CASINH):
1045 CASE_FLT_FN (BUILT_IN_CATAN):
1046 CASE_FLT_FN (BUILT_IN_CATANH):
1047 CASE_FLT_FN (BUILT_IN_CBRT):
1048 CASE_FLT_FN (BUILT_IN_CPROJ):
1049 CASE_FLT_FN (BUILT_IN_CSIN):
1050 CASE_FLT_FN (BUILT_IN_CSINH):
1051 CASE_FLT_FN (BUILT_IN_CTAN):
1052 CASE_FLT_FN (BUILT_IN_CTANH):
1053 CASE_FLT_FN (BUILT_IN_ERF):
1054 CASE_FLT_FN (BUILT_IN_LLROUND):
1055 CASE_FLT_FN (BUILT_IN_LROUND):
1056 CASE_FLT_FN (BUILT_IN_ROUND):
1057 CASE_FLT_FN (BUILT_IN_SIN):
1058 CASE_FLT_FN (BUILT_IN_SINH):
1059 CASE_FLT_FN (BUILT_IN_TAN):
1060 CASE_FLT_FN (BUILT_IN_TANH):
1061 CASE_FLT_FN (BUILT_IN_TRUNC):
1064 CASE_FLT_FN (BUILT_IN_LLRINT):
1065 CASE_FLT_FN (BUILT_IN_LRINT):
1066 CASE_FLT_FN (BUILT_IN_NEARBYINT):
1067 CASE_FLT_FN (BUILT_IN_RINT):
1068 return !flag_rounding_math;
1076 /* Check whether we may negate an integer constant T without causing
1080 may_negate_without_overflow_p (const_tree t)
1082 unsigned HOST_WIDE_INT val;
1086 gcc_assert (TREE_CODE (t) == INTEGER_CST);
1088 type = TREE_TYPE (t);
1089 if (TYPE_UNSIGNED (type))
1092 prec = TYPE_PRECISION (type);
1093 if (prec > HOST_BITS_PER_WIDE_INT)
1095 if (TREE_INT_CST_LOW (t) != 0)
1097 prec -= HOST_BITS_PER_WIDE_INT;
1098 val = TREE_INT_CST_HIGH (t);
1101 val = TREE_INT_CST_LOW (t);
1102 if (prec < HOST_BITS_PER_WIDE_INT)
1103 val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
1104 return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
1107 /* Determine whether an expression T can be cheaply negated using
1108 the function negate_expr without introducing undefined overflow. */
1111 negate_expr_p (tree t)
1118 type = TREE_TYPE (t);
1120 STRIP_SIGN_NOPS (t);
1121 switch (TREE_CODE (t))
1124 if (TYPE_OVERFLOW_WRAPS (type))
1127 /* Check that -CST will not overflow type. */
1128 return may_negate_without_overflow_p (t);
1130 return (INTEGRAL_TYPE_P (type)
1131 && TYPE_OVERFLOW_WRAPS (type));
1139 return negate_expr_p (TREE_REALPART (t))
1140 && negate_expr_p (TREE_IMAGPART (t));
1143 return negate_expr_p (TREE_OPERAND (t, 0))
1144 && negate_expr_p (TREE_OPERAND (t, 1));
1147 return negate_expr_p (TREE_OPERAND (t, 0));
1150 if (HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1151 || HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
1153 /* -(A + B) -> (-B) - A. */
1154 if (negate_expr_p (TREE_OPERAND (t, 1))
1155 && reorder_operands_p (TREE_OPERAND (t, 0),
1156 TREE_OPERAND (t, 1)))
1158 /* -(A + B) -> (-A) - B. */
1159 return negate_expr_p (TREE_OPERAND (t, 0));
1162 /* We can't turn -(A-B) into B-A when we honor signed zeros. */
1163 return !HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1164 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
1165 && reorder_operands_p (TREE_OPERAND (t, 0),
1166 TREE_OPERAND (t, 1));
1169 if (TYPE_UNSIGNED (TREE_TYPE (t)))
1175 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
1176 return negate_expr_p (TREE_OPERAND (t, 1))
1177 || negate_expr_p (TREE_OPERAND (t, 0));
1180 case TRUNC_DIV_EXPR:
1181 case ROUND_DIV_EXPR:
1182 case FLOOR_DIV_EXPR:
1184 case EXACT_DIV_EXPR:
1185 /* In general we can't negate A / B, because if A is INT_MIN and
1186 B is 1, we may turn this into INT_MIN / -1 which is undefined
1187 and actually traps on some architectures. But if overflow is
1188 undefined, we can negate, because - (INT_MIN / 1) is an
1190 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
1191 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
1193 return negate_expr_p (TREE_OPERAND (t, 1))
1194 || negate_expr_p (TREE_OPERAND (t, 0));
1197 /* Negate -((double)float) as (double)(-float). */
1198 if (TREE_CODE (type) == REAL_TYPE)
1200 tree tem = strip_float_extensions (t);
1202 return negate_expr_p (tem);
1207 /* Negate -f(x) as f(-x). */
1208 if (negate_mathfn_p (builtin_mathfn_code (t)))
1209 return negate_expr_p (CALL_EXPR_ARG (t, 0));
1213 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1214 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1216 tree op1 = TREE_OPERAND (t, 1);
1217 if (TREE_INT_CST_HIGH (op1) == 0
1218 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1219 == TREE_INT_CST_LOW (op1))
1230 /* Given T, an expression, return a folded tree for -T or NULL_TREE, if no
1231 simplification is possible.
1232 If negate_expr_p would return true for T, NULL_TREE will never be
1236 fold_negate_expr (tree t)
1238 tree type = TREE_TYPE (t);
1241 switch (TREE_CODE (t))
1243 /* Convert - (~A) to A + 1. */
1245 if (INTEGRAL_TYPE_P (type))
1246 return fold_build2 (PLUS_EXPR, type, TREE_OPERAND (t, 0),
1247 build_int_cst (type, 1));
1251 tem = fold_negate_const (t, type);
1252 if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
1253 || !TYPE_OVERFLOW_TRAPS (type))
1258 tem = fold_negate_const (t, type);
1259 /* Two's complement FP formats, such as c4x, may overflow. */
1260 if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
1265 tem = fold_negate_const (t, type);
1270 tree rpart = negate_expr (TREE_REALPART (t));
1271 tree ipart = negate_expr (TREE_IMAGPART (t));
1273 if ((TREE_CODE (rpart) == REAL_CST
1274 && TREE_CODE (ipart) == REAL_CST)
1275 || (TREE_CODE (rpart) == INTEGER_CST
1276 && TREE_CODE (ipart) == INTEGER_CST))
1277 return build_complex (type, rpart, ipart);
1282 if (negate_expr_p (t))
1283 return fold_build2 (COMPLEX_EXPR, type,
1284 fold_negate_expr (TREE_OPERAND (t, 0)),
1285 fold_negate_expr (TREE_OPERAND (t, 1)));
1289 if (negate_expr_p (t))
1290 return fold_build1 (CONJ_EXPR, type,
1291 fold_negate_expr (TREE_OPERAND (t, 0)));
1295 return TREE_OPERAND (t, 0);
1298 if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1299 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
1301 /* -(A + B) -> (-B) - A. */
1302 if (negate_expr_p (TREE_OPERAND (t, 1))
1303 && reorder_operands_p (TREE_OPERAND (t, 0),
1304 TREE_OPERAND (t, 1)))
1306 tem = negate_expr (TREE_OPERAND (t, 1));
1307 return fold_build2 (MINUS_EXPR, type,
1308 tem, TREE_OPERAND (t, 0));
1311 /* -(A + B) -> (-A) - B. */
1312 if (negate_expr_p (TREE_OPERAND (t, 0)))
1314 tem = negate_expr (TREE_OPERAND (t, 0));
1315 return fold_build2 (MINUS_EXPR, type,
1316 tem, TREE_OPERAND (t, 1));
1322 /* - (A - B) -> B - A */
1323 if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1324 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
1325 && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)))
1326 return fold_build2 (MINUS_EXPR, type,
1327 TREE_OPERAND (t, 1), TREE_OPERAND (t, 0));
1331 if (TYPE_UNSIGNED (type))
1337 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type)))
1339 tem = TREE_OPERAND (t, 1);
1340 if (negate_expr_p (tem))
1341 return fold_build2 (TREE_CODE (t), type,
1342 TREE_OPERAND (t, 0), negate_expr (tem));
1343 tem = TREE_OPERAND (t, 0);
1344 if (negate_expr_p (tem))
1345 return fold_build2 (TREE_CODE (t), type,
1346 negate_expr (tem), TREE_OPERAND (t, 1));
1350 case TRUNC_DIV_EXPR:
1351 case ROUND_DIV_EXPR:
1352 case FLOOR_DIV_EXPR:
1354 case EXACT_DIV_EXPR:
1355 /* In general we can't negate A / B, because if A is INT_MIN and
1356 B is 1, we may turn this into INT_MIN / -1 which is undefined
1357 and actually traps on some architectures. But if overflow is
1358 undefined, we can negate, because - (INT_MIN / 1) is an
1360 if (!INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type))
1362 const char * const warnmsg = G_("assuming signed overflow does not "
1363 "occur when negating a division");
1364 tem = TREE_OPERAND (t, 1);
1365 if (negate_expr_p (tem))
1367 if (INTEGRAL_TYPE_P (type)
1368 && (TREE_CODE (tem) != INTEGER_CST
1369 || integer_onep (tem)))
1370 fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
1371 return fold_build2 (TREE_CODE (t), type,
1372 TREE_OPERAND (t, 0), negate_expr (tem));
1374 tem = TREE_OPERAND (t, 0);
1375 if (negate_expr_p (tem))
1377 if (INTEGRAL_TYPE_P (type)
1378 && (TREE_CODE (tem) != INTEGER_CST
1379 || tree_int_cst_equal (tem, TYPE_MIN_VALUE (type))))
1380 fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
1381 return fold_build2 (TREE_CODE (t), type,
1382 negate_expr (tem), TREE_OPERAND (t, 1));
1388 /* Convert -((double)float) into (double)(-float). */
1389 if (TREE_CODE (type) == REAL_TYPE)
1391 tem = strip_float_extensions (t);
1392 if (tem != t && negate_expr_p (tem))
1393 return fold_convert (type, negate_expr (tem));
1398 /* Negate -f(x) as f(-x). */
1399 if (negate_mathfn_p (builtin_mathfn_code (t))
1400 && negate_expr_p (CALL_EXPR_ARG (t, 0)))
1404 fndecl = get_callee_fndecl (t);
1405 arg = negate_expr (CALL_EXPR_ARG (t, 0));
1406 return build_call_expr (fndecl, 1, arg);
1411 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1412 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1414 tree op1 = TREE_OPERAND (t, 1);
1415 if (TREE_INT_CST_HIGH (op1) == 0
1416 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1417 == TREE_INT_CST_LOW (op1))
1419 tree ntype = TYPE_UNSIGNED (type)
1420 ? signed_type_for (type)
1421 : unsigned_type_for (type);
1422 tree temp = fold_convert (ntype, TREE_OPERAND (t, 0));
1423 temp = fold_build2 (RSHIFT_EXPR, ntype, temp, op1);
1424 return fold_convert (type, temp);
1436 /* Like fold_negate_expr, but return a NEGATE_EXPR tree, if T can not be
1437 negated in a simpler way. Also allow for T to be NULL_TREE, in which case
1438 return NULL_TREE. */
1441 negate_expr (tree t)
1448 type = TREE_TYPE (t);
1449 STRIP_SIGN_NOPS (t);
1451 tem = fold_negate_expr (t);
1453 tem = build1 (NEGATE_EXPR, TREE_TYPE (t), t);
1454 return fold_convert (type, tem);
1457 /* Split a tree IN into a constant, literal and variable parts that could be
1458 combined with CODE to make IN. "constant" means an expression with
1459 TREE_CONSTANT but that isn't an actual constant. CODE must be a
1460 commutative arithmetic operation. Store the constant part into *CONP,
1461 the literal in *LITP and return the variable part. If a part isn't
1462 present, set it to null. If the tree does not decompose in this way,
1463 return the entire tree as the variable part and the other parts as null.
1465 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
1466 case, we negate an operand that was subtracted. Except if it is a
1467 literal for which we use *MINUS_LITP instead.
1469 If NEGATE_P is true, we are negating all of IN, again except a literal
1470 for which we use *MINUS_LITP instead.
1472 If IN is itself a literal or constant, return it as appropriate.
1474 Note that we do not guarantee that any of the three values will be the
1475 same type as IN, but they will have the same signedness and mode. */
1478 split_tree (tree in, enum tree_code code, tree *conp, tree *litp,
1479 tree *minus_litp, int negate_p)
1487 /* Strip any conversions that don't change the machine mode or signedness. */
1488 STRIP_SIGN_NOPS (in);
1490 if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST
1491 || TREE_CODE (in) == FIXED_CST)
1493 else if (TREE_CODE (in) == code
1494 || ((! FLOAT_TYPE_P (TREE_TYPE (in)) || flag_associative_math)
1495 && ! SAT_FIXED_POINT_TYPE_P (TREE_TYPE (in))
1496 /* We can associate addition and subtraction together (even
1497 though the C standard doesn't say so) for integers because
1498 the value is not affected. For reals, the value might be
1499 affected, so we can't. */
1500 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1501 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1503 tree op0 = TREE_OPERAND (in, 0);
1504 tree op1 = TREE_OPERAND (in, 1);
1505 int neg1_p = TREE_CODE (in) == MINUS_EXPR;
1506 int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
1508 /* First see if either of the operands is a literal, then a constant. */
1509 if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST
1510 || TREE_CODE (op0) == FIXED_CST)
1511 *litp = op0, op0 = 0;
1512 else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST
1513 || TREE_CODE (op1) == FIXED_CST)
1514 *litp = op1, neg_litp_p = neg1_p, op1 = 0;
1516 if (op0 != 0 && TREE_CONSTANT (op0))
1517 *conp = op0, op0 = 0;
1518 else if (op1 != 0 && TREE_CONSTANT (op1))
1519 *conp = op1, neg_conp_p = neg1_p, op1 = 0;
1521 /* If we haven't dealt with either operand, this is not a case we can
1522 decompose. Otherwise, VAR is either of the ones remaining, if any. */
1523 if (op0 != 0 && op1 != 0)
1528 var = op1, neg_var_p = neg1_p;
1530 /* Now do any needed negations. */
1532 *minus_litp = *litp, *litp = 0;
1534 *conp = negate_expr (*conp);
1536 var = negate_expr (var);
1538 else if (TREE_CONSTANT (in))
1546 *minus_litp = *litp, *litp = 0;
1547 else if (*minus_litp)
1548 *litp = *minus_litp, *minus_litp = 0;
1549 *conp = negate_expr (*conp);
1550 var = negate_expr (var);
1556 /* Re-associate trees split by the above function. T1 and T2 are either
1557 expressions to associate or null. Return the new expression, if any. If
1558 we build an operation, do it in TYPE and with CODE. */
1561 associate_trees (tree t1, tree t2, enum tree_code code, tree type)
1568 /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
1569 try to fold this since we will have infinite recursion. But do
1570 deal with any NEGATE_EXPRs. */
1571 if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
1572 || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
1574 if (code == PLUS_EXPR)
1576 if (TREE_CODE (t1) == NEGATE_EXPR)
1577 return build2 (MINUS_EXPR, type, fold_convert (type, t2),
1578 fold_convert (type, TREE_OPERAND (t1, 0)));
1579 else if (TREE_CODE (t2) == NEGATE_EXPR)
1580 return build2 (MINUS_EXPR, type, fold_convert (type, t1),
1581 fold_convert (type, TREE_OPERAND (t2, 0)));
1582 else if (integer_zerop (t2))
1583 return fold_convert (type, t1);
1585 else if (code == MINUS_EXPR)
1587 if (integer_zerop (t2))
1588 return fold_convert (type, t1);
1591 return build2 (code, type, fold_convert (type, t1),
1592 fold_convert (type, t2));
1595 return fold_build2 (code, type, fold_convert (type, t1),
1596 fold_convert (type, t2));
1599 /* Check whether TYPE1 and TYPE2 are equivalent integer types, suitable
1600 for use in int_const_binop, size_binop and size_diffop. */
1603 int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2)
1605 if (TREE_CODE (type1) != INTEGER_TYPE && !POINTER_TYPE_P (type1))
1607 if (TREE_CODE (type2) != INTEGER_TYPE && !POINTER_TYPE_P (type2))
1622 return TYPE_UNSIGNED (type1) == TYPE_UNSIGNED (type2)
1623 && TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
1624 && TYPE_MODE (type1) == TYPE_MODE (type2);
1628 /* Combine two integer constants ARG1 and ARG2 under operation CODE
1629 to produce a new constant. Return NULL_TREE if we don't know how
1630 to evaluate CODE at compile-time.
1632 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1635 int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2, int notrunc)
1637 unsigned HOST_WIDE_INT int1l, int2l;
1638 HOST_WIDE_INT int1h, int2h;
1639 unsigned HOST_WIDE_INT low;
1641 unsigned HOST_WIDE_INT garbagel;
1642 HOST_WIDE_INT garbageh;
1644 tree type = TREE_TYPE (arg1);
1645 int uns = TYPE_UNSIGNED (type);
1647 = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
1650 int1l = TREE_INT_CST_LOW (arg1);
1651 int1h = TREE_INT_CST_HIGH (arg1);
1652 int2l = TREE_INT_CST_LOW (arg2);
1653 int2h = TREE_INT_CST_HIGH (arg2);
1658 low = int1l | int2l, hi = int1h | int2h;
1662 low = int1l ^ int2l, hi = int1h ^ int2h;
1666 low = int1l & int2l, hi = int1h & int2h;
1672 /* It's unclear from the C standard whether shifts can overflow.
1673 The following code ignores overflow; perhaps a C standard
1674 interpretation ruling is needed. */
1675 lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1682 lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1687 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1691 neg_double (int2l, int2h, &low, &hi);
1692 add_double (int1l, int1h, low, hi, &low, &hi);
1693 overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
1697 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1700 case TRUNC_DIV_EXPR:
1701 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1702 case EXACT_DIV_EXPR:
1703 /* This is a shortcut for a common special case. */
1704 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1705 && !TREE_OVERFLOW (arg1)
1706 && !TREE_OVERFLOW (arg2)
1707 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1709 if (code == CEIL_DIV_EXPR)
1712 low = int1l / int2l, hi = 0;
1716 /* ... fall through ... */
1718 case ROUND_DIV_EXPR:
1719 if (int2h == 0 && int2l == 0)
1721 if (int2h == 0 && int2l == 1)
1723 low = int1l, hi = int1h;
1726 if (int1l == int2l && int1h == int2h
1727 && ! (int1l == 0 && int1h == 0))
1732 overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
1733 &low, &hi, &garbagel, &garbageh);
1736 case TRUNC_MOD_EXPR:
1737 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1738 /* This is a shortcut for a common special case. */
1739 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1740 && !TREE_OVERFLOW (arg1)
1741 && !TREE_OVERFLOW (arg2)
1742 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1744 if (code == CEIL_MOD_EXPR)
1746 low = int1l % int2l, hi = 0;
1750 /* ... fall through ... */
1752 case ROUND_MOD_EXPR:
1753 if (int2h == 0 && int2l == 0)
1755 overflow = div_and_round_double (code, uns,
1756 int1l, int1h, int2l, int2h,
1757 &garbagel, &garbageh, &low, &hi);
1763 low = (((unsigned HOST_WIDE_INT) int1h
1764 < (unsigned HOST_WIDE_INT) int2h)
1765 || (((unsigned HOST_WIDE_INT) int1h
1766 == (unsigned HOST_WIDE_INT) int2h)
1769 low = (int1h < int2h
1770 || (int1h == int2h && int1l < int2l));
1772 if (low == (code == MIN_EXPR))
1773 low = int1l, hi = int1h;
1775 low = int2l, hi = int2h;
1784 t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
1786 /* Propagate overflow flags ourselves. */
1787 if (((!uns || is_sizetype) && overflow)
1788 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1791 TREE_OVERFLOW (t) = 1;
1795 t = force_fit_type_double (TREE_TYPE (arg1), low, hi, 1,
1796 ((!uns || is_sizetype) && overflow)
1797 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1802 /* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
1803 constant. We assume ARG1 and ARG2 have the same data type, or at least
1804 are the same kind of constant and the same machine mode. Return zero if
1805 combining the constants is not allowed in the current operating mode.
1807 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1810 const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1812 /* Sanity check for the recursive cases. */
1819 if (TREE_CODE (arg1) == INTEGER_CST)
1820 return int_const_binop (code, arg1, arg2, notrunc);
1822 if (TREE_CODE (arg1) == REAL_CST)
1824 enum machine_mode mode;
1827 REAL_VALUE_TYPE value;
1828 REAL_VALUE_TYPE result;
1832 /* The following codes are handled by real_arithmetic. */
1847 d1 = TREE_REAL_CST (arg1);
1848 d2 = TREE_REAL_CST (arg2);
1850 type = TREE_TYPE (arg1);
1851 mode = TYPE_MODE (type);
1853 /* Don't perform operation if we honor signaling NaNs and
1854 either operand is a NaN. */
1855 if (HONOR_SNANS (mode)
1856 && (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2)))
1859 /* Don't perform operation if it would raise a division
1860 by zero exception. */
1861 if (code == RDIV_EXPR
1862 && REAL_VALUES_EQUAL (d2, dconst0)
1863 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1866 /* If either operand is a NaN, just return it. Otherwise, set up
1867 for floating-point trap; we return an overflow. */
1868 if (REAL_VALUE_ISNAN (d1))
1870 else if (REAL_VALUE_ISNAN (d2))
1873 inexact = real_arithmetic (&value, code, &d1, &d2);
1874 real_convert (&result, mode, &value);
1876 /* Don't constant fold this floating point operation if
1877 the result has overflowed and flag_trapping_math. */
1878 if (flag_trapping_math
1879 && MODE_HAS_INFINITIES (mode)
1880 && REAL_VALUE_ISINF (result)
1881 && !REAL_VALUE_ISINF (d1)
1882 && !REAL_VALUE_ISINF (d2))
1885 /* Don't constant fold this floating point operation if the
1886 result may dependent upon the run-time rounding mode and
1887 flag_rounding_math is set, or if GCC's software emulation
1888 is unable to accurately represent the result. */
1889 if ((flag_rounding_math
1890 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
1891 && !flag_unsafe_math_optimizations))
1892 && (inexact || !real_identical (&result, &value)))
1895 t = build_real (type, result);
1897 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1901 if (TREE_CODE (arg1) == FIXED_CST)
1903 FIXED_VALUE_TYPE f1;
1904 FIXED_VALUE_TYPE f2;
1905 FIXED_VALUE_TYPE result;
1910 /* The following codes are handled by fixed_arithmetic. */
1916 case TRUNC_DIV_EXPR:
1917 f2 = TREE_FIXED_CST (arg2);
1922 f2.data.high = TREE_INT_CST_HIGH (arg2);
1923 f2.data.low = TREE_INT_CST_LOW (arg2);
1931 f1 = TREE_FIXED_CST (arg1);
1932 type = TREE_TYPE (arg1);
1933 sat_p = TYPE_SATURATING (type);
1934 overflow_p = fixed_arithmetic (&result, code, &f1, &f2, sat_p);
1935 t = build_fixed (type, result);
1936 /* Propagate overflow flags. */
1937 if (overflow_p | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1939 TREE_OVERFLOW (t) = 1;
1940 TREE_CONSTANT_OVERFLOW (t) = 1;
1942 else if (TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2))
1943 TREE_CONSTANT_OVERFLOW (t) = 1;
1947 if (TREE_CODE (arg1) == COMPLEX_CST)
1949 tree type = TREE_TYPE (arg1);
1950 tree r1 = TREE_REALPART (arg1);
1951 tree i1 = TREE_IMAGPART (arg1);
1952 tree r2 = TREE_REALPART (arg2);
1953 tree i2 = TREE_IMAGPART (arg2);
1960 real = const_binop (code, r1, r2, notrunc);
1961 imag = const_binop (code, i1, i2, notrunc);
1965 real = const_binop (MINUS_EXPR,
1966 const_binop (MULT_EXPR, r1, r2, notrunc),
1967 const_binop (MULT_EXPR, i1, i2, notrunc),
1969 imag = const_binop (PLUS_EXPR,
1970 const_binop (MULT_EXPR, r1, i2, notrunc),
1971 const_binop (MULT_EXPR, i1, r2, notrunc),
1978 = const_binop (PLUS_EXPR,
1979 const_binop (MULT_EXPR, r2, r2, notrunc),
1980 const_binop (MULT_EXPR, i2, i2, notrunc),
1983 = const_binop (PLUS_EXPR,
1984 const_binop (MULT_EXPR, r1, r2, notrunc),
1985 const_binop (MULT_EXPR, i1, i2, notrunc),
1988 = const_binop (MINUS_EXPR,
1989 const_binop (MULT_EXPR, i1, r2, notrunc),
1990 const_binop (MULT_EXPR, r1, i2, notrunc),
1993 if (INTEGRAL_TYPE_P (TREE_TYPE (r1)))
1994 code = TRUNC_DIV_EXPR;
1996 real = const_binop (code, t1, magsquared, notrunc);
1997 imag = const_binop (code, t2, magsquared, notrunc);
2006 return build_complex (type, real, imag);
2012 /* Create a size type INT_CST node with NUMBER sign extended. KIND
2013 indicates which particular sizetype to create. */
2016 size_int_kind (HOST_WIDE_INT number, enum size_type_kind kind)
2018 return build_int_cst (sizetype_tab[(int) kind], number);
2021 /* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
2022 is a tree code. The type of the result is taken from the operands.
2023 Both must be equivalent integer types, ala int_binop_types_match_p.
2024 If the operands are constant, so is the result. */
2027 size_binop (enum tree_code code, tree arg0, tree arg1)
2029 tree type = TREE_TYPE (arg0);
2031 if (arg0 == error_mark_node || arg1 == error_mark_node)
2032 return error_mark_node;
2034 gcc_assert (int_binop_types_match_p (code, TREE_TYPE (arg0),
2037 /* Handle the special case of two integer constants faster. */
2038 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
2040 /* And some specific cases even faster than that. */
2041 if (code == PLUS_EXPR)
2043 if (integer_zerop (arg0) && !TREE_OVERFLOW (arg0))
2045 if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
2048 else if (code == MINUS_EXPR)
2050 if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
2053 else if (code == MULT_EXPR)
2055 if (integer_onep (arg0) && !TREE_OVERFLOW (arg0))
2059 /* Handle general case of two integer constants. */
2060 return int_const_binop (code, arg0, arg1, 0);
2063 return fold_build2 (code, type, arg0, arg1);
2066 /* Given two values, either both of sizetype or both of bitsizetype,
2067 compute the difference between the two values. Return the value
2068 in signed type corresponding to the type of the operands. */
2071 size_diffop (tree arg0, tree arg1)
2073 tree type = TREE_TYPE (arg0);
2076 gcc_assert (int_binop_types_match_p (MINUS_EXPR, TREE_TYPE (arg0),
2079 /* If the type is already signed, just do the simple thing. */
2080 if (!TYPE_UNSIGNED (type))
2081 return size_binop (MINUS_EXPR, arg0, arg1);
2083 if (type == sizetype)
2085 else if (type == bitsizetype)
2086 ctype = sbitsizetype;
2088 ctype = signed_type_for (type);
2090 /* If either operand is not a constant, do the conversions to the signed
2091 type and subtract. The hardware will do the right thing with any
2092 overflow in the subtraction. */
2093 if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST)
2094 return size_binop (MINUS_EXPR, fold_convert (ctype, arg0),
2095 fold_convert (ctype, arg1));
2097 /* If ARG0 is larger than ARG1, subtract and return the result in CTYPE.
2098 Otherwise, subtract the other way, convert to CTYPE (we know that can't
2099 overflow) and negate (which can't either). Special-case a result
2100 of zero while we're here. */
2101 if (tree_int_cst_equal (arg0, arg1))
2102 return build_int_cst (ctype, 0);
2103 else if (tree_int_cst_lt (arg1, arg0))
2104 return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1));
2106 return size_binop (MINUS_EXPR, build_int_cst (ctype, 0),
2107 fold_convert (ctype, size_binop (MINUS_EXPR,
2111 /* A subroutine of fold_convert_const handling conversions of an
2112 INTEGER_CST to another integer type. */
2115 fold_convert_const_int_from_int (tree type, const_tree arg1)
2119 /* Given an integer constant, make new constant with new type,
2120 appropriately sign-extended or truncated. */
2121 t = force_fit_type_double (type, TREE_INT_CST_LOW (arg1),
2122 TREE_INT_CST_HIGH (arg1),
2123 /* Don't set the overflow when
2124 converting from a pointer, */
2125 !POINTER_TYPE_P (TREE_TYPE (arg1))
2126 /* or to a sizetype with same signedness
2127 and the precision is unchanged.
2128 ??? sizetype is always sign-extended,
2129 but its signedness depends on the
2130 frontend. Thus we see spurious overflows
2131 here if we do not check this. */
2132 && !((TYPE_PRECISION (TREE_TYPE (arg1))
2133 == TYPE_PRECISION (type))
2134 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
2135 == TYPE_UNSIGNED (type))
2136 && ((TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
2137 && TYPE_IS_SIZETYPE (TREE_TYPE (arg1)))
2138 || (TREE_CODE (type) == INTEGER_TYPE
2139 && TYPE_IS_SIZETYPE (type)))),
2140 (TREE_INT_CST_HIGH (arg1) < 0
2141 && (TYPE_UNSIGNED (type)
2142 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
2143 | TREE_OVERFLOW (arg1));
2148 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2149 to an integer type. */
2152 fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1)
2157 /* The following code implements the floating point to integer
2158 conversion rules required by the Java Language Specification,
2159 that IEEE NaNs are mapped to zero and values that overflow
2160 the target precision saturate, i.e. values greater than
2161 INT_MAX are mapped to INT_MAX, and values less than INT_MIN
2162 are mapped to INT_MIN. These semantics are allowed by the
2163 C and C++ standards that simply state that the behavior of
2164 FP-to-integer conversion is unspecified upon overflow. */
2166 HOST_WIDE_INT high, low;
2168 REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
2172 case FIX_TRUNC_EXPR:
2173 real_trunc (&r, VOIDmode, &x);
2180 /* If R is NaN, return zero and show we have an overflow. */
2181 if (REAL_VALUE_ISNAN (r))
2188 /* See if R is less than the lower bound or greater than the
2193 tree lt = TYPE_MIN_VALUE (type);
2194 REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
2195 if (REAL_VALUES_LESS (r, l))
2198 high = TREE_INT_CST_HIGH (lt);
2199 low = TREE_INT_CST_LOW (lt);
2205 tree ut = TYPE_MAX_VALUE (type);
2208 REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
2209 if (REAL_VALUES_LESS (u, r))
2212 high = TREE_INT_CST_HIGH (ut);
2213 low = TREE_INT_CST_LOW (ut);
2219 REAL_VALUE_TO_INT (&low, &high, r);
2221 t = force_fit_type_double (type, low, high, -1,
2222 overflow | TREE_OVERFLOW (arg1));
2226 /* A subroutine of fold_convert_const handling conversions of a
2227 FIXED_CST to an integer type. */
2230 fold_convert_const_int_from_fixed (tree type, const_tree arg1)
2233 double_int temp, temp_trunc;
2236 /* Right shift FIXED_CST to temp by fbit. */
2237 temp = TREE_FIXED_CST (arg1).data;
2238 mode = TREE_FIXED_CST (arg1).mode;
2239 if (GET_MODE_FBIT (mode) < 2 * HOST_BITS_PER_WIDE_INT)
2241 lshift_double (temp.low, temp.high,
2242 - GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
2243 &temp.low, &temp.high, SIGNED_FIXED_POINT_MODE_P (mode));
2245 /* Left shift temp to temp_trunc by fbit. */
2246 lshift_double (temp.low, temp.high,
2247 GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
2248 &temp_trunc.low, &temp_trunc.high,
2249 SIGNED_FIXED_POINT_MODE_P (mode));
2256 temp_trunc.high = 0;
2259 /* If FIXED_CST is negative, we need to round the value toward 0.
2260 By checking if the fractional bits are not zero to add 1 to temp. */
2261 if (SIGNED_FIXED_POINT_MODE_P (mode) && temp_trunc.high < 0
2262 && !double_int_equal_p (TREE_FIXED_CST (arg1).data, temp_trunc))
2267 temp = double_int_add (temp, one);
2270 /* Given a fixed-point constant, make new constant with new type,
2271 appropriately sign-extended or truncated. */
2272 t = force_fit_type_double (type, temp.low, temp.high, -1,
2274 && (TYPE_UNSIGNED (type)
2275 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
2276 | TREE_OVERFLOW (arg1));
2281 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2282 to another floating point type. */
2285 fold_convert_const_real_from_real (tree type, const_tree arg1)
2287 REAL_VALUE_TYPE value;
2290 real_convert (&value, TYPE_MODE (type), &TREE_REAL_CST (arg1));
2291 t = build_real (type, value);
2293 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
2297 /* A subroutine of fold_convert_const handling conversions a FIXED_CST
2298 to a floating point type. */
2301 fold_convert_const_real_from_fixed (tree type, const_tree arg1)
2303 REAL_VALUE_TYPE value;
2306 real_convert_from_fixed (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1));
2307 t = build_real (type, value);
2309 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
2310 TREE_CONSTANT_OVERFLOW (t)
2311 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
2315 /* A subroutine of fold_convert_const handling conversions a FIXED_CST
2316 to another fixed-point type. */
2319 fold_convert_const_fixed_from_fixed (tree type, const_tree arg1)
2321 FIXED_VALUE_TYPE value;
2325 overflow_p = fixed_convert (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1),
2326 TYPE_SATURATING (type));
2327 t = build_fixed (type, value);
2329 /* Propagate overflow flags. */
2330 if (overflow_p | TREE_OVERFLOW (arg1))
2332 TREE_OVERFLOW (t) = 1;
2333 TREE_CONSTANT_OVERFLOW (t) = 1;
2335 else if (TREE_CONSTANT_OVERFLOW (arg1))
2336 TREE_CONSTANT_OVERFLOW (t) = 1;
2340 /* A subroutine of fold_convert_const handling conversions an INTEGER_CST
2341 to a fixed-point type. */
2344 fold_convert_const_fixed_from_int (tree type, const_tree arg1)
2346 FIXED_VALUE_TYPE value;
2350 overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type),
2351 TREE_INT_CST (arg1),
2352 TYPE_UNSIGNED (TREE_TYPE (arg1)),
2353 TYPE_SATURATING (type));
2354 t = build_fixed (type, value);
2356 /* Propagate overflow flags. */
2357 if (overflow_p | TREE_OVERFLOW (arg1))
2359 TREE_OVERFLOW (t) = 1;
2360 TREE_CONSTANT_OVERFLOW (t) = 1;
2362 else if (TREE_CONSTANT_OVERFLOW (arg1))
2363 TREE_CONSTANT_OVERFLOW (t) = 1;
2367 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2368 to a fixed-point type. */
2371 fold_convert_const_fixed_from_real (tree type, const_tree arg1)
2373 FIXED_VALUE_TYPE value;
2377 overflow_p = fixed_convert_from_real (&value, TYPE_MODE (type),
2378 &TREE_REAL_CST (arg1),
2379 TYPE_SATURATING (type));
2380 t = build_fixed (type, value);
2382 /* Propagate overflow flags. */
2383 if (overflow_p | TREE_OVERFLOW (arg1))
2385 TREE_OVERFLOW (t) = 1;
2386 TREE_CONSTANT_OVERFLOW (t) = 1;
2388 else if (TREE_CONSTANT_OVERFLOW (arg1))
2389 TREE_CONSTANT_OVERFLOW (t) = 1;
2393 /* Attempt to fold type conversion operation CODE of expression ARG1 to
2394 type TYPE. If no simplification can be done return NULL_TREE. */
2397 fold_convert_const (enum tree_code code, tree type, tree arg1)
2399 if (TREE_TYPE (arg1) == type)
2402 if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
2404 if (TREE_CODE (arg1) == INTEGER_CST)
2405 return fold_convert_const_int_from_int (type, arg1);
2406 else if (TREE_CODE (arg1) == REAL_CST)
2407 return fold_convert_const_int_from_real (code, type, arg1);
2408 else if (TREE_CODE (arg1) == FIXED_CST)
2409 return fold_convert_const_int_from_fixed (type, arg1);
2411 else if (TREE_CODE (type) == REAL_TYPE)
2413 if (TREE_CODE (arg1) == INTEGER_CST)
2414 return build_real_from_int_cst (type, arg1);
2415 else if (TREE_CODE (arg1) == REAL_CST)
2416 return fold_convert_const_real_from_real (type, arg1);
2417 else if (TREE_CODE (arg1) == FIXED_CST)
2418 return fold_convert_const_real_from_fixed (type, arg1);
2420 else if (TREE_CODE (type) == FIXED_POINT_TYPE)
2422 if (TREE_CODE (arg1) == FIXED_CST)
2423 return fold_convert_const_fixed_from_fixed (type, arg1);
2424 else if (TREE_CODE (arg1) == INTEGER_CST)
2425 return fold_convert_const_fixed_from_int (type, arg1);
2426 else if (TREE_CODE (arg1) == REAL_CST)
2427 return fold_convert_const_fixed_from_real (type, arg1);
2432 /* Construct a vector of zero elements of vector type TYPE. */
2435 build_zero_vector (tree type)
2440 elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
2441 units = TYPE_VECTOR_SUBPARTS (type);
2444 for (i = 0; i < units; i++)
2445 list = tree_cons (NULL_TREE, elem, list);
2446 return build_vector (type, list);
2449 /* Returns true, if ARG is convertible to TYPE using a NOP_EXPR. */
2452 fold_convertible_p (const_tree type, const_tree arg)
2454 tree orig = TREE_TYPE (arg);
2459 if (TREE_CODE (arg) == ERROR_MARK
2460 || TREE_CODE (type) == ERROR_MARK
2461 || TREE_CODE (orig) == ERROR_MARK)
2464 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
2467 switch (TREE_CODE (type))
2469 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
2470 case POINTER_TYPE: case REFERENCE_TYPE:
2472 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2473 || TREE_CODE (orig) == OFFSET_TYPE)
2475 return (TREE_CODE (orig) == VECTOR_TYPE
2476 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2479 case FIXED_POINT_TYPE:
2483 return TREE_CODE (type) == TREE_CODE (orig);
2490 /* Convert expression ARG to type TYPE. Used by the middle-end for
2491 simple conversions in preference to calling the front-end's convert. */
2494 fold_convert (tree type, tree arg)
2496 tree orig = TREE_TYPE (arg);
2502 if (TREE_CODE (arg) == ERROR_MARK
2503 || TREE_CODE (type) == ERROR_MARK
2504 || TREE_CODE (orig) == ERROR_MARK)
2505 return error_mark_node;
2507 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
2508 return fold_build1 (NOP_EXPR, type, arg);
2510 switch (TREE_CODE (type))
2512 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
2513 case POINTER_TYPE: case REFERENCE_TYPE:
2515 if (TREE_CODE (arg) == INTEGER_CST)
2517 tem = fold_convert_const (NOP_EXPR, type, arg);
2518 if (tem != NULL_TREE)
2521 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2522 || TREE_CODE (orig) == OFFSET_TYPE)
2523 return fold_build1 (NOP_EXPR, type, arg);
2524 if (TREE_CODE (orig) == COMPLEX_TYPE)
2526 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2527 return fold_convert (type, tem);
2529 gcc_assert (TREE_CODE (orig) == VECTOR_TYPE
2530 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2531 return fold_build1 (NOP_EXPR, type, arg);
2534 if (TREE_CODE (arg) == INTEGER_CST)
2536 tem = fold_convert_const (FLOAT_EXPR, type, arg);
2537 if (tem != NULL_TREE)
2540 else if (TREE_CODE (arg) == REAL_CST)
2542 tem = fold_convert_const (NOP_EXPR, type, arg);
2543 if (tem != NULL_TREE)
2546 else if (TREE_CODE (arg) == FIXED_CST)
2548 tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
2549 if (tem != NULL_TREE)
2553 switch (TREE_CODE (orig))
2556 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2557 case POINTER_TYPE: case REFERENCE_TYPE:
2558 return fold_build1 (FLOAT_EXPR, type, arg);
2561 return fold_build1 (NOP_EXPR, type, arg);
2563 case FIXED_POINT_TYPE:
2564 return fold_build1 (FIXED_CONVERT_EXPR, type, arg);
2567 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2568 return fold_convert (type, tem);
2574 case FIXED_POINT_TYPE:
2575 if (TREE_CODE (arg) == FIXED_CST || TREE_CODE (arg) == INTEGER_CST
2576 || TREE_CODE (arg) == REAL_CST)
2578 tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
2579 if (tem != NULL_TREE)
2583 switch (TREE_CODE (orig))
2585 case FIXED_POINT_TYPE:
2590 return fold_build1 (FIXED_CONVERT_EXPR, type, arg);
2593 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2594 return fold_convert (type, tem);
2601 switch (TREE_CODE (orig))
2604 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2605 case POINTER_TYPE: case REFERENCE_TYPE:
2607 case FIXED_POINT_TYPE:
2608 return build2 (COMPLEX_EXPR, type,
2609 fold_convert (TREE_TYPE (type), arg),
2610 fold_convert (TREE_TYPE (type), integer_zero_node));
2615 if (TREE_CODE (arg) == COMPLEX_EXPR)
2617 rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0));
2618 ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1));
2619 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2622 arg = save_expr (arg);
2623 rpart = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2624 ipart = fold_build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg);
2625 rpart = fold_convert (TREE_TYPE (type), rpart);
2626 ipart = fold_convert (TREE_TYPE (type), ipart);
2627 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2635 if (integer_zerop (arg))
2636 return build_zero_vector (type);
2637 gcc_assert (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2638 gcc_assert (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2639 || TREE_CODE (orig) == VECTOR_TYPE);
2640 return fold_build1 (VIEW_CONVERT_EXPR, type, arg);
2643 tem = fold_ignored_result (arg);
2644 if (TREE_CODE (tem) == GIMPLE_MODIFY_STMT)
2646 return fold_build1 (NOP_EXPR, type, tem);
2653 /* Return false if expr can be assumed not to be an lvalue, true
2657 maybe_lvalue_p (const_tree x)
2659 /* We only need to wrap lvalue tree codes. */
2660 switch (TREE_CODE (x))
2671 case ALIGN_INDIRECT_REF:
2672 case MISALIGNED_INDIRECT_REF:
2674 case ARRAY_RANGE_REF:
2680 case PREINCREMENT_EXPR:
2681 case PREDECREMENT_EXPR:
2683 case TRY_CATCH_EXPR:
2684 case WITH_CLEANUP_EXPR:
2687 case GIMPLE_MODIFY_STMT:
2696 /* Assume the worst for front-end tree codes. */
2697 if ((int)TREE_CODE (x) >= NUM_TREE_CODES)
2705 /* Return an expr equal to X but certainly not valid as an lvalue. */
2710 /* While we are in GIMPLE, NON_LVALUE_EXPR doesn't mean anything to
2715 if (! maybe_lvalue_p (x))
2717 return build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
2720 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
2721 Zero means allow extended lvalues. */
2723 int pedantic_lvalues;
2725 /* When pedantic, return an expr equal to X but certainly not valid as a
2726 pedantic lvalue. Otherwise, return X. */
2729 pedantic_non_lvalue (tree x)
2731 if (pedantic_lvalues)
2732 return non_lvalue (x);
2737 /* Given a tree comparison code, return the code that is the logical inverse
2738 of the given code. It is not safe to do this for floating-point
2739 comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode
2740 as well: if reversing the comparison is unsafe, return ERROR_MARK. */
2743 invert_tree_comparison (enum tree_code code, bool honor_nans)
2745 if (honor_nans && flag_trapping_math)
2755 return honor_nans ? UNLE_EXPR : LE_EXPR;
2757 return honor_nans ? UNLT_EXPR : LT_EXPR;
2759 return honor_nans ? UNGE_EXPR : GE_EXPR;
2761 return honor_nans ? UNGT_EXPR : GT_EXPR;
2775 return UNORDERED_EXPR;
2776 case UNORDERED_EXPR:
2777 return ORDERED_EXPR;
2783 /* Similar, but return the comparison that results if the operands are
2784 swapped. This is safe for floating-point. */
2787 swap_tree_comparison (enum tree_code code)
2794 case UNORDERED_EXPR:
2820 /* Convert a comparison tree code from an enum tree_code representation
2821 into a compcode bit-based encoding. This function is the inverse of
2822 compcode_to_comparison. */
2824 static enum comparison_code
2825 comparison_to_compcode (enum tree_code code)
2842 return COMPCODE_ORD;
2843 case UNORDERED_EXPR:
2844 return COMPCODE_UNORD;
2846 return COMPCODE_UNLT;
2848 return COMPCODE_UNEQ;
2850 return COMPCODE_UNLE;
2852 return COMPCODE_UNGT;
2854 return COMPCODE_LTGT;
2856 return COMPCODE_UNGE;
2862 /* Convert a compcode bit-based encoding of a comparison operator back
2863 to GCC's enum tree_code representation. This function is the
2864 inverse of comparison_to_compcode. */
2866 static enum tree_code
2867 compcode_to_comparison (enum comparison_code code)
2884 return ORDERED_EXPR;
2885 case COMPCODE_UNORD:
2886 return UNORDERED_EXPR;
2904 /* Return a tree for the comparison which is the combination of
2905 doing the AND or OR (depending on CODE) of the two operations LCODE
2906 and RCODE on the identical operands LL_ARG and LR_ARG. Take into account
2907 the possibility of trapping if the mode has NaNs, and return NULL_TREE
2908 if this makes the transformation invalid. */
2911 combine_comparisons (enum tree_code code, enum tree_code lcode,
2912 enum tree_code rcode, tree truth_type,
2913 tree ll_arg, tree lr_arg)
2915 bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg)));
2916 enum comparison_code lcompcode = comparison_to_compcode (lcode);
2917 enum comparison_code rcompcode = comparison_to_compcode (rcode);
2918 enum comparison_code compcode;
2922 case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR:
2923 compcode = lcompcode & rcompcode;
2926 case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR:
2927 compcode = lcompcode | rcompcode;
2936 /* Eliminate unordered comparisons, as well as LTGT and ORD
2937 which are not used unless the mode has NaNs. */
2938 compcode &= ~COMPCODE_UNORD;
2939 if (compcode == COMPCODE_LTGT)
2940 compcode = COMPCODE_NE;
2941 else if (compcode == COMPCODE_ORD)
2942 compcode = COMPCODE_TRUE;
2944 else if (flag_trapping_math)
2946 /* Check that the original operation and the optimized ones will trap
2947 under the same condition. */
2948 bool ltrap = (lcompcode & COMPCODE_UNORD) == 0
2949 && (lcompcode != COMPCODE_EQ)
2950 && (lcompcode != COMPCODE_ORD);
2951 bool rtrap = (rcompcode & COMPCODE_UNORD) == 0
2952 && (rcompcode != COMPCODE_EQ)
2953 && (rcompcode != COMPCODE_ORD);
2954 bool trap = (compcode & COMPCODE_UNORD) == 0
2955 && (compcode != COMPCODE_EQ)
2956 && (compcode != COMPCODE_ORD);
2958 /* In a short-circuited boolean expression the LHS might be
2959 such that the RHS, if evaluated, will never trap. For
2960 example, in ORD (x, y) && (x < y), we evaluate the RHS only
2961 if neither x nor y is NaN. (This is a mixed blessing: for
2962 example, the expression above will never trap, hence
2963 optimizing it to x < y would be invalid). */
2964 if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD))
2965 || (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD)))
2968 /* If the comparison was short-circuited, and only the RHS
2969 trapped, we may now generate a spurious trap. */
2971 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2974 /* If we changed the conditions that cause a trap, we lose. */
2975 if ((ltrap || rtrap) != trap)
2979 if (compcode == COMPCODE_TRUE)
2980 return constant_boolean_node (true, truth_type);
2981 else if (compcode == COMPCODE_FALSE)
2982 return constant_boolean_node (false, truth_type);
2984 return fold_build2 (compcode_to_comparison (compcode),
2985 truth_type, ll_arg, lr_arg);
2988 /* Return nonzero if CODE is a tree code that represents a truth value. */
2991 truth_value_p (enum tree_code code)
2993 return (TREE_CODE_CLASS (code) == tcc_comparison
2994 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
2995 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
2996 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
2999 /* Return nonzero if two operands (typically of the same tree node)
3000 are necessarily equal. If either argument has side-effects this
3001 function returns zero. FLAGS modifies behavior as follows:
3003 If OEP_ONLY_CONST is set, only return nonzero for constants.
3004 This function tests whether the operands are indistinguishable;
3005 it does not test whether they are equal using C's == operation.
3006 The distinction is important for IEEE floating point, because
3007 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
3008 (2) two NaNs may be indistinguishable, but NaN!=NaN.
3010 If OEP_ONLY_CONST is unset, a VAR_DECL is considered equal to itself
3011 even though it may hold multiple values during a function.
3012 This is because a GCC tree node guarantees that nothing else is
3013 executed between the evaluation of its "operands" (which may often
3014 be evaluated in arbitrary order). Hence if the operands themselves
3015 don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the
3016 same value in each operand/subexpression. Hence leaving OEP_ONLY_CONST
3017 unset means assuming isochronic (or instantaneous) tree equivalence.
3018 Unless comparing arbitrary expression trees, such as from different
3019 statements, this flag can usually be left unset.
3021 If OEP_PURE_SAME is set, then pure functions with identical arguments
3022 are considered the same. It is used when the caller has other ways
3023 to ensure that global memory is unchanged in between. */
3026 operand_equal_p (const_tree arg0, const_tree arg1, unsigned int flags)
3028 /* If either is ERROR_MARK, they aren't equal. */
3029 if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK)
3032 /* If both types don't have the same signedness, then we can't consider
3033 them equal. We must check this before the STRIP_NOPS calls
3034 because they may change the signedness of the arguments. */
3035 if (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1)))
3038 /* If both types don't have the same precision, then it is not safe
3040 if (TYPE_PRECISION (TREE_TYPE (arg0)) != TYPE_PRECISION (TREE_TYPE (arg1)))
3046 /* In case both args are comparisons but with different comparison
3047 code, try to swap the comparison operands of one arg to produce
3048 a match and compare that variant. */
3049 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3050 && COMPARISON_CLASS_P (arg0)
3051 && COMPARISON_CLASS_P (arg1))
3053 enum tree_code swap_code = swap_tree_comparison (TREE_CODE (arg1));
3055 if (TREE_CODE (arg0) == swap_code)
3056 return operand_equal_p (TREE_OPERAND (arg0, 0),
3057 TREE_OPERAND (arg1, 1), flags)
3058 && operand_equal_p (TREE_OPERAND (arg0, 1),
3059 TREE_OPERAND (arg1, 0), flags);
3062 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3063 /* This is needed for conversions and for COMPONENT_REF.
3064 Might as well play it safe and always test this. */
3065 || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
3066 || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
3067 || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
3070 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
3071 We don't care about side effects in that case because the SAVE_EXPR
3072 takes care of that for us. In all other cases, two expressions are
3073 equal if they have no side effects. If we have two identical
3074 expressions with side effects that should be treated the same due
3075 to the only side effects being identical SAVE_EXPR's, that will
3076 be detected in the recursive calls below. */
3077 if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST)
3078 && (TREE_CODE (arg0) == SAVE_EXPR
3079 || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
3082 /* Next handle constant cases, those for which we can return 1 even
3083 if ONLY_CONST is set. */
3084 if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
3085 switch (TREE_CODE (arg0))
3088 return tree_int_cst_equal (arg0, arg1);
3091 return FIXED_VALUES_IDENTICAL (TREE_FIXED_CST (arg0),
3092 TREE_FIXED_CST (arg1));
3095 if (REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
3096 TREE_REAL_CST (arg1)))
3100 if (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))))
3102 /* If we do not distinguish between signed and unsigned zero,
3103 consider them equal. */
3104 if (real_zerop (arg0) && real_zerop (arg1))
3113 v1 = TREE_VECTOR_CST_ELTS (arg0);
3114 v2 = TREE_VECTOR_CST_ELTS (arg1);
3117 if (!operand_equal_p (TREE_VALUE (v1), TREE_VALUE (v2),
3120 v1 = TREE_CHAIN (v1);
3121 v2 = TREE_CHAIN (v2);
3128 return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
3130 && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
3134 return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
3135 && ! memcmp (TREE_STRING_POINTER (arg0),
3136 TREE_STRING_POINTER (arg1),
3137 TREE_STRING_LENGTH (arg0)));
3140 return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
3146 if (flags & OEP_ONLY_CONST)
3149 /* Define macros to test an operand from arg0 and arg1 for equality and a
3150 variant that allows null and views null as being different from any
3151 non-null value. In the latter case, if either is null, the both
3152 must be; otherwise, do the normal comparison. */
3153 #define OP_SAME(N) operand_equal_p (TREE_OPERAND (arg0, N), \
3154 TREE_OPERAND (arg1, N), flags)
3156 #define OP_SAME_WITH_NULL(N) \
3157 ((!TREE_OPERAND (arg0, N) || !TREE_OPERAND (arg1, N)) \
3158 ? TREE_OPERAND (arg0, N) == TREE_OPERAND (arg1, N) : OP_SAME (N))
3160 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
3163 /* Two conversions are equal only if signedness and modes match. */
3164 switch (TREE_CODE (arg0))
3168 case FIX_TRUNC_EXPR:
3169 if (TYPE_UNSIGNED (TREE_TYPE (arg0))
3170 != TYPE_UNSIGNED (TREE_TYPE (arg1)))
3180 case tcc_comparison:
3182 if (OP_SAME (0) && OP_SAME (1))
3185 /* For commutative ops, allow the other order. */
3186 return (commutative_tree_code (TREE_CODE (arg0))
3187 && operand_equal_p (TREE_OPERAND (arg0, 0),
3188 TREE_OPERAND (arg1, 1), flags)
3189 && operand_equal_p (TREE_OPERAND (arg0, 1),
3190 TREE_OPERAND (arg1, 0), flags));
3193 /* If either of the pointer (or reference) expressions we are
3194 dereferencing contain a side effect, these cannot be equal. */
3195 if (TREE_SIDE_EFFECTS (arg0)
3196 || TREE_SIDE_EFFECTS (arg1))
3199 switch (TREE_CODE (arg0))
3202 case ALIGN_INDIRECT_REF:
3203 case MISALIGNED_INDIRECT_REF:
3209 case ARRAY_RANGE_REF:
3210 /* Operands 2 and 3 may be null.
3211 Compare the array index by value if it is constant first as we
3212 may have different types but same value here. */
3214 && (tree_int_cst_equal (TREE_OPERAND (arg0, 1),
3215 TREE_OPERAND (arg1, 1))
3217 && OP_SAME_WITH_NULL (2)
3218 && OP_SAME_WITH_NULL (3));
3221 /* Handle operand 2 the same as for ARRAY_REF. Operand 0
3222 may be NULL when we're called to compare MEM_EXPRs. */
3223 return OP_SAME_WITH_NULL (0)
3225 && OP_SAME_WITH_NULL (2);
3228 return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
3234 case tcc_expression:
3235 switch (TREE_CODE (arg0))
3238 case TRUTH_NOT_EXPR:
3241 case TRUTH_ANDIF_EXPR:
3242 case TRUTH_ORIF_EXPR:
3243 return OP_SAME (0) && OP_SAME (1);
3245 case TRUTH_AND_EXPR:
3247 case TRUTH_XOR_EXPR:
3248 if (OP_SAME (0) && OP_SAME (1))
3251 /* Otherwise take into account this is a commutative operation. */
3252 return (operand_equal_p (TREE_OPERAND (arg0, 0),
3253 TREE_OPERAND (arg1, 1), flags)
3254 && operand_equal_p (TREE_OPERAND (arg0, 1),
3255 TREE_OPERAND (arg1, 0), flags));
3262 switch (TREE_CODE (arg0))
3265 /* If the CALL_EXPRs call different functions, then they
3266 clearly can not be equal. */
3267 if (! operand_equal_p (CALL_EXPR_FN (arg0), CALL_EXPR_FN (arg1),
3272 unsigned int cef = call_expr_flags (arg0);
3273 if (flags & OEP_PURE_SAME)
3274 cef &= ECF_CONST | ECF_PURE;
3281 /* Now see if all the arguments are the same. */
3283 const_call_expr_arg_iterator iter0, iter1;
3285 for (a0 = first_const_call_expr_arg (arg0, &iter0),
3286 a1 = first_const_call_expr_arg (arg1, &iter1);
3288 a0 = next_const_call_expr_arg (&iter0),
3289 a1 = next_const_call_expr_arg (&iter1))
3290 if (! operand_equal_p (a0, a1, flags))
3293 /* If we get here and both argument lists are exhausted
3294 then the CALL_EXPRs are equal. */
3295 return ! (a0 || a1);
3301 case tcc_declaration:
3302 /* Consider __builtin_sqrt equal to sqrt. */
3303 return (TREE_CODE (arg0) == FUNCTION_DECL
3304 && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1)
3305 && DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1)
3306 && DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1));
3313 #undef OP_SAME_WITH_NULL
3316 /* Similar to operand_equal_p, but see if ARG0 might have been made by
3317 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
3319 When in doubt, return 0. */
3322 operand_equal_for_comparison_p (tree arg0, tree arg1, tree other)
3324 int unsignedp1, unsignedpo;
3325 tree primarg0, primarg1, primother;
3326 unsigned int correct_width;
3328 if (operand_equal_p (arg0, arg1, 0))
3331 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
3332 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
3335 /* Discard any conversions that don't change the modes of ARG0 and ARG1
3336 and see if the inner values are the same. This removes any
3337 signedness comparison, which doesn't matter here. */
3338 primarg0 = arg0, primarg1 = arg1;
3339 STRIP_NOPS (primarg0);
3340 STRIP_NOPS (primarg1);
3341 if (operand_equal_p (primarg0, primarg1, 0))
3344 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
3345 actual comparison operand, ARG0.
3347 First throw away any conversions to wider types
3348 already present in the operands. */
3350 primarg1 = get_narrower (arg1, &unsignedp1);
3351 primother = get_narrower (other, &unsignedpo);
3353 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
3354 if (unsignedp1 == unsignedpo
3355 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
3356 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
3358 tree type = TREE_TYPE (arg0);
3360 /* Make sure shorter operand is extended the right way
3361 to match the longer operand. */
3362 primarg1 = fold_convert (signed_or_unsigned_type_for
3363 (unsignedp1, TREE_TYPE (primarg1)), primarg1);
3365 if (operand_equal_p (arg0, fold_convert (type, primarg1), 0))
3372 /* See if ARG is an expression that is either a comparison or is performing
3373 arithmetic on comparisons. The comparisons must only be comparing
3374 two different values, which will be stored in *CVAL1 and *CVAL2; if
3375 they are nonzero it means that some operands have already been found.
3376 No variables may be used anywhere else in the expression except in the
3377 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
3378 the expression and save_expr needs to be called with CVAL1 and CVAL2.
3380 If this is true, return 1. Otherwise, return zero. */
3383 twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p)
3385 enum tree_code code = TREE_CODE (arg);
3386 enum tree_code_class class = TREE_CODE_CLASS (code);
3388 /* We can handle some of the tcc_expression cases here. */
3389 if (class == tcc_expression && code == TRUTH_NOT_EXPR)
3391 else if (class == tcc_expression
3392 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
3393 || code == COMPOUND_EXPR))
3396 else if (class == tcc_expression && code == SAVE_EXPR
3397 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
3399 /* If we've already found a CVAL1 or CVAL2, this expression is
3400 two complex to handle. */
3401 if (*cval1 || *cval2)
3411 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
3414 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
3415 && twoval_comparison_p (TREE_OPERAND (arg, 1),
3416 cval1, cval2, save_p));
3421 case tcc_expression:
3422 if (code == COND_EXPR)
3423 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
3424 cval1, cval2, save_p)
3425 && twoval_comparison_p (TREE_OPERAND (arg, 1),
3426 cval1, cval2, save_p)
3427 && twoval_comparison_p (TREE_OPERAND (arg, 2),
3428 cval1, cval2, save_p));
3431 case tcc_comparison:
3432 /* First see if we can handle the first operand, then the second. For
3433 the second operand, we know *CVAL1 can't be zero. It must be that
3434 one side of the comparison is each of the values; test for the
3435 case where this isn't true by failing if the two operands
3438 if (operand_equal_p (TREE_OPERAND (arg, 0),
3439 TREE_OPERAND (arg, 1), 0))
3443 *cval1 = TREE_OPERAND (arg, 0);
3444 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
3446 else if (*cval2 == 0)
3447 *cval2 = TREE_OPERAND (arg, 0);
3448 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
3453 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
3455 else if (*cval2 == 0)
3456 *cval2 = TREE_OPERAND (arg, 1);
3457 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
3469 /* ARG is a tree that is known to contain just arithmetic operations and
3470 comparisons. Evaluate the operations in the tree substituting NEW0 for
3471 any occurrence of OLD0 as an operand of a comparison and likewise for
3475 eval_subst (tree arg, tree old0, tree new0, tree old1, tree new1)
3477 tree type = TREE_TYPE (arg);
3478 enum tree_code code = TREE_CODE (arg);
3479 enum tree_code_class class = TREE_CODE_CLASS (code);
3481 /* We can handle some of the tcc_expression cases here. */
3482 if (class == tcc_expression && code == TRUTH_NOT_EXPR)
3484 else if (class == tcc_expression
3485 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
3491 return fold_build1 (code, type,
3492 eval_subst (TREE_OPERAND (arg, 0),
3493 old0, new0, old1, new1));
3496 return fold_build2 (code, type,
3497 eval_subst (TREE_OPERAND (arg, 0),
3498 old0, new0, old1, new1),
3499 eval_subst (TREE_OPERAND (arg, 1),
3500 old0, new0, old1, new1));
3502 case tcc_expression:
3506 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
3509 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
3512 return fold_build3 (code, type,
3513 eval_subst (TREE_OPERAND (arg, 0),
3514 old0, new0, old1, new1),
3515 eval_subst (TREE_OPERAND (arg, 1),
3516 old0, new0, old1, new1),