1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
30 /* The entry points in this file are fold, size_int_wide, size_binop
31 and force_fit_type_double.
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'.
42 force_fit_type_double takes a constant, an overflowable flag and a
43 prior overflow indicator. It forces the value to fit the type and
46 Note: Since the folders get called on non-gimple code as well as
47 gimple code, we need to handle GIMPLE tuples as well as their
48 corresponding tree equivalents. */
52 #include "coretypes.h"
57 #include "fixed-value.h"
66 #include "langhooks.h"
70 /* Nonzero if we are folding constants inside an initializer; zero
72 int folding_initializer = 0;
74 /* The following constants represent a bit based encoding of GCC's
75 comparison operators. This encoding simplifies transformations
76 on relational comparison operators, such as AND and OR. */
77 enum comparison_code {
96 static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
97 static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
98 static bool negate_mathfn_p (enum built_in_function);
99 static bool negate_expr_p (tree);
100 static tree negate_expr (tree);
101 static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
102 static tree associate_trees (tree, tree, enum tree_code, tree);
103 static tree const_binop (enum tree_code, tree, tree, int);
104 static enum comparison_code comparison_to_compcode (enum tree_code);
105 static enum tree_code compcode_to_comparison (enum comparison_code);
106 static int operand_equal_for_comparison_p (tree, tree, tree);
107 static int twoval_comparison_p (tree, tree *, tree *, int *);
108 static tree eval_subst (tree, tree, tree, tree, tree);
109 static tree pedantic_omit_one_operand (tree, tree, tree);
110 static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
111 static tree make_bit_field_ref (tree, tree, HOST_WIDE_INT, HOST_WIDE_INT, int);
112 static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree);
113 static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
114 enum machine_mode *, int *, int *,
116 static int all_ones_mask_p (const_tree, int);
117 static tree sign_bit_p (tree, const_tree);
118 static int simple_operand_p (const_tree);
119 static tree range_binop (enum tree_code, tree, tree, int, tree, int);
120 static tree range_predecessor (tree);
121 static tree range_successor (tree);
122 extern tree make_range (tree, int *, tree *, tree *, bool *);
123 extern tree build_range_check (tree, tree, int, tree, tree);
124 extern bool merge_ranges (int *, tree *, tree *, int, tree, tree, int,
126 static tree fold_range_test (enum tree_code, tree, tree, tree);
127 static tree fold_cond_expr_with_comparison (tree, tree, tree, tree);
128 static tree unextend (tree, int, int, tree);
129 static tree fold_truthop (enum tree_code, tree, tree, tree);
130 static tree optimize_minmax_comparison (enum tree_code, tree, tree, tree);
131 static tree extract_muldiv (tree, tree, enum tree_code, tree, bool *);
132 static tree extract_muldiv_1 (tree, tree, enum tree_code, tree, bool *);
133 static tree fold_binary_op_with_conditional_arg (enum tree_code, tree,
136 static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
138 static tree fold_inf_compare (enum tree_code, tree, tree, tree);
139 static tree fold_div_compare (enum tree_code, tree, tree, tree);
140 static bool reorder_operands_p (const_tree, const_tree);
141 static tree fold_negate_const (tree, tree);
142 static tree fold_not_const (tree, tree);
143 static tree fold_relational_const (enum tree_code, tree, tree, tree);
144 static tree fold_convert_const (enum tree_code, tree, tree);
147 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
148 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
149 and SUM1. Then this yields nonzero if overflow occurred during the
152 Overflow occurs if A and B have the same sign, but A and SUM differ in
153 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
155 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
157 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
158 We do that by representing the two-word integer in 4 words, with only
159 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
160 number. The value of the word is LOWPART + HIGHPART * BASE. */
163 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
164 #define HIGHPART(x) \
165 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
166 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
168 /* Unpack a two-word integer into 4 words.
169 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
170 WORDS points to the array of HOST_WIDE_INTs. */
173 encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
175 words[0] = LOWPART (low);
176 words[1] = HIGHPART (low);
177 words[2] = LOWPART (hi);
178 words[3] = HIGHPART (hi);
181 /* Pack an array of 4 words into a two-word integer.
182 WORDS points to the array of words.
183 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
186 decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
189 *low = words[0] + words[1] * BASE;
190 *hi = words[2] + words[3] * BASE;
193 /* Force the double-word integer L1, H1 to be within the range of the
194 integer type TYPE. Stores the properly truncated and sign-extended
195 double-word integer in *LV, *HV. Returns true if the operation
196 overflows, that is, argument and result are different. */
199 fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
200 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, const_tree type)
202 unsigned HOST_WIDE_INT low0 = l1;
203 HOST_WIDE_INT high0 = h1;
205 int sign_extended_type;
207 if (POINTER_TYPE_P (type)
208 || TREE_CODE (type) == OFFSET_TYPE)
211 prec = TYPE_PRECISION (type);
213 /* Size types *are* sign extended. */
214 sign_extended_type = (!TYPE_UNSIGNED (type)
215 || (TREE_CODE (type) == INTEGER_TYPE
216 && TYPE_IS_SIZETYPE (type)));
218 /* First clear all bits that are beyond the type's precision. */
219 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
221 else if (prec > HOST_BITS_PER_WIDE_INT)
222 h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
226 if (prec < HOST_BITS_PER_WIDE_INT)
227 l1 &= ~((HOST_WIDE_INT) (-1) << prec);
230 /* Then do sign extension if necessary. */
231 if (!sign_extended_type)
232 /* No sign extension */;
233 else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
234 /* Correct width already. */;
235 else if (prec > HOST_BITS_PER_WIDE_INT)
237 /* Sign extend top half? */
238 if (h1 & ((unsigned HOST_WIDE_INT)1
239 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
240 h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
242 else if (prec == HOST_BITS_PER_WIDE_INT)
244 if ((HOST_WIDE_INT)l1 < 0)
249 /* Sign extend bottom half? */
250 if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
253 l1 |= (HOST_WIDE_INT)(-1) << prec;
260 /* If the value didn't fit, signal overflow. */
261 return l1 != low0 || h1 != high0;
264 /* We force the double-int HIGH:LOW to the range of the type TYPE by
265 sign or zero extending it.
266 OVERFLOWABLE indicates if we are interested
267 in overflow of the value, when >0 we are only interested in signed
268 overflow, for <0 we are interested in any overflow. OVERFLOWED
269 indicates whether overflow has already occurred. CONST_OVERFLOWED
270 indicates whether constant overflow has already occurred. We force
271 T's value to be within range of T's type (by setting to 0 or 1 all
272 the bits outside the type's range). We set TREE_OVERFLOWED if,
273 OVERFLOWED is nonzero,
274 or OVERFLOWABLE is >0 and signed overflow occurs
275 or OVERFLOWABLE is <0 and any overflow occurs
276 We return a new tree node for the extended double-int. The node
277 is shared if no overflow flags are set. */
280 force_fit_type_double (tree type, unsigned HOST_WIDE_INT low,
281 HOST_WIDE_INT high, int overflowable,
284 int sign_extended_type;
287 /* Size types *are* sign extended. */
288 sign_extended_type = (!TYPE_UNSIGNED (type)
289 || (TREE_CODE (type) == INTEGER_TYPE
290 && TYPE_IS_SIZETYPE (type)));
292 overflow = fit_double_type (low, high, &low, &high, type);
294 /* If we need to set overflow flags, return a new unshared node. */
295 if (overflowed || overflow)
299 || (overflowable > 0 && sign_extended_type))
301 tree t = make_node (INTEGER_CST);
302 TREE_INT_CST_LOW (t) = low;
303 TREE_INT_CST_HIGH (t) = high;
304 TREE_TYPE (t) = type;
305 TREE_OVERFLOW (t) = 1;
310 /* Else build a shared node. */
311 return build_int_cst_wide (type, low, high);
314 /* Add two doubleword integers with doubleword result.
315 Return nonzero if the operation overflows according to UNSIGNED_P.
316 Each argument is given as two `HOST_WIDE_INT' pieces.
317 One argument is L1 and H1; the other, L2 and H2.
318 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
321 add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
322 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
323 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
326 unsigned HOST_WIDE_INT l;
330 h = h1 + h2 + (l < l1);
336 return (unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1;
338 return OVERFLOW_SUM_SIGN (h1, h2, h);
341 /* Negate a doubleword integer with doubleword result.
342 Return nonzero if the operation overflows, assuming it's signed.
343 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
344 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
347 neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
348 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
354 return (*hv & h1) < 0;
364 /* Multiply two doubleword integers with doubleword result.
365 Return nonzero if the operation overflows according to UNSIGNED_P.
366 Each argument is given as two `HOST_WIDE_INT' pieces.
367 One argument is L1 and H1; the other, L2 and H2.
368 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
371 mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
372 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
373 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
376 HOST_WIDE_INT arg1[4];
377 HOST_WIDE_INT arg2[4];
378 HOST_WIDE_INT prod[4 * 2];
379 unsigned HOST_WIDE_INT carry;
381 unsigned HOST_WIDE_INT toplow, neglow;
382 HOST_WIDE_INT tophigh, neghigh;
384 encode (arg1, l1, h1);
385 encode (arg2, l2, h2);
387 memset (prod, 0, sizeof prod);
389 for (i = 0; i < 4; i++)
392 for (j = 0; j < 4; j++)
395 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
396 carry += arg1[i] * arg2[j];
397 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
399 prod[k] = LOWPART (carry);
400 carry = HIGHPART (carry);
405 decode (prod, lv, hv);
406 decode (prod + 4, &toplow, &tophigh);
408 /* Unsigned overflow is immediate. */
410 return (toplow | tophigh) != 0;
412 /* Check for signed overflow by calculating the signed representation of the
413 top half of the result; it should agree with the low half's sign bit. */
416 neg_double (l2, h2, &neglow, &neghigh);
417 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
421 neg_double (l1, h1, &neglow, &neghigh);
422 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
424 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
427 /* Shift the doubleword integer in L1, H1 left by COUNT places
428 keeping only PREC bits of result.
429 Shift right if COUNT is negative.
430 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
431 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
434 lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
435 HOST_WIDE_INT count, unsigned int prec,
436 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
438 unsigned HOST_WIDE_INT signmask;
442 rshift_double (l1, h1, -count, prec, lv, hv, arith);
446 if (SHIFT_COUNT_TRUNCATED)
449 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
451 /* Shifting by the host word size is undefined according to the
452 ANSI standard, so we must handle this as a special case. */
456 else if (count >= HOST_BITS_PER_WIDE_INT)
458 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
463 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
464 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
468 /* Sign extend all bits that are beyond the precision. */
470 signmask = -((prec > HOST_BITS_PER_WIDE_INT
471 ? ((unsigned HOST_WIDE_INT) *hv
472 >> (prec - HOST_BITS_PER_WIDE_INT - 1))
473 : (*lv >> (prec - 1))) & 1);
475 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
477 else if (prec >= HOST_BITS_PER_WIDE_INT)
479 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
480 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
485 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
486 *lv |= signmask << prec;
490 /* Shift the doubleword integer in L1, H1 right by COUNT places
491 keeping only PREC bits of result. COUNT must be positive.
492 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
493 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
496 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
497 HOST_WIDE_INT count, unsigned int prec,
498 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
501 unsigned HOST_WIDE_INT signmask;
504 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
507 if (SHIFT_COUNT_TRUNCATED)
510 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
512 /* Shifting by the host word size is undefined according to the
513 ANSI standard, so we must handle this as a special case. */
517 else if (count >= HOST_BITS_PER_WIDE_INT)
520 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
524 *hv = (unsigned HOST_WIDE_INT) h1 >> count;
526 | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
529 /* Zero / sign extend all bits that are beyond the precision. */
531 if (count >= (HOST_WIDE_INT)prec)
536 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
538 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
540 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
541 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
546 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
547 *lv |= signmask << (prec - count);
551 /* Rotate the doubleword integer in L1, H1 left by COUNT places
552 keeping only PREC bits of result.
553 Rotate right if COUNT is negative.
554 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
557 lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
558 HOST_WIDE_INT count, unsigned int prec,
559 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
561 unsigned HOST_WIDE_INT s1l, s2l;
562 HOST_WIDE_INT s1h, s2h;
568 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
569 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
574 /* Rotate the doubleword integer in L1, H1 left by COUNT places
575 keeping only PREC bits of result. COUNT must be positive.
576 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
579 rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
580 HOST_WIDE_INT count, unsigned int prec,
581 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
583 unsigned HOST_WIDE_INT s1l, s2l;
584 HOST_WIDE_INT s1h, s2h;
590 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
591 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
596 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
597 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
598 CODE is a tree code for a kind of division, one of
599 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
601 It controls how the quotient is rounded to an integer.
602 Return nonzero if the operation overflows.
603 UNS nonzero says do unsigned division. */
606 div_and_round_double (enum tree_code code, int uns,
607 unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
608 HOST_WIDE_INT hnum_orig,
609 unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
610 HOST_WIDE_INT hden_orig,
611 unsigned HOST_WIDE_INT *lquo,
612 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
616 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
617 HOST_WIDE_INT den[4], quo[4];
619 unsigned HOST_WIDE_INT work;
620 unsigned HOST_WIDE_INT carry = 0;
621 unsigned HOST_WIDE_INT lnum = lnum_orig;
622 HOST_WIDE_INT hnum = hnum_orig;
623 unsigned HOST_WIDE_INT lden = lden_orig;
624 HOST_WIDE_INT hden = hden_orig;
627 if (hden == 0 && lden == 0)
628 overflow = 1, lden = 1;
630 /* Calculate quotient sign and convert operands to unsigned. */
636 /* (minimum integer) / (-1) is the only overflow case. */
637 if (neg_double (lnum, hnum, &lnum, &hnum)
638 && ((HOST_WIDE_INT) lden & hden) == -1)
644 neg_double (lden, hden, &lden, &hden);
648 if (hnum == 0 && hden == 0)
649 { /* single precision */
651 /* This unsigned division rounds toward zero. */
657 { /* trivial case: dividend < divisor */
658 /* hden != 0 already checked. */
665 memset (quo, 0, sizeof quo);
667 memset (num, 0, sizeof num); /* to zero 9th element */
668 memset (den, 0, sizeof den);
670 encode (num, lnum, hnum);
671 encode (den, lden, hden);
673 /* Special code for when the divisor < BASE. */
674 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
676 /* hnum != 0 already checked. */
677 for (i = 4 - 1; i >= 0; i--)
679 work = num[i] + carry * BASE;
680 quo[i] = work / lden;
686 /* Full double precision division,
687 with thanks to Don Knuth's "Seminumerical Algorithms". */
688 int num_hi_sig, den_hi_sig;
689 unsigned HOST_WIDE_INT quo_est, scale;
691 /* Find the highest nonzero divisor digit. */
692 for (i = 4 - 1;; i--)
699 /* Insure that the first digit of the divisor is at least BASE/2.
700 This is required by the quotient digit estimation algorithm. */
702 scale = BASE / (den[den_hi_sig] + 1);
704 { /* scale divisor and dividend */
706 for (i = 0; i <= 4 - 1; i++)
708 work = (num[i] * scale) + carry;
709 num[i] = LOWPART (work);
710 carry = HIGHPART (work);
715 for (i = 0; i <= 4 - 1; i++)
717 work = (den[i] * scale) + carry;
718 den[i] = LOWPART (work);
719 carry = HIGHPART (work);
720 if (den[i] != 0) den_hi_sig = i;
727 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
729 /* Guess the next quotient digit, quo_est, by dividing the first
730 two remaining dividend digits by the high order quotient digit.
731 quo_est is never low and is at most 2 high. */
732 unsigned HOST_WIDE_INT tmp;
734 num_hi_sig = i + den_hi_sig + 1;
735 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
736 if (num[num_hi_sig] != den[den_hi_sig])
737 quo_est = work / den[den_hi_sig];
741 /* Refine quo_est so it's usually correct, and at most one high. */
742 tmp = work - quo_est * den[den_hi_sig];
744 && (den[den_hi_sig - 1] * quo_est
745 > (tmp * BASE + num[num_hi_sig - 2])))
748 /* Try QUO_EST as the quotient digit, by multiplying the
749 divisor by QUO_EST and subtracting from the remaining dividend.
750 Keep in mind that QUO_EST is the I - 1st digit. */
753 for (j = 0; j <= den_hi_sig; j++)
755 work = quo_est * den[j] + carry;
756 carry = HIGHPART (work);
757 work = num[i + j] - LOWPART (work);
758 num[i + j] = LOWPART (work);
759 carry += HIGHPART (work) != 0;
762 /* If quo_est was high by one, then num[i] went negative and
763 we need to correct things. */
764 if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
767 carry = 0; /* add divisor back in */
768 for (j = 0; j <= den_hi_sig; j++)
770 work = num[i + j] + den[j] + carry;
771 carry = HIGHPART (work);
772 num[i + j] = LOWPART (work);
775 num [num_hi_sig] += carry;
778 /* Store the quotient digit. */
783 decode (quo, lquo, hquo);
786 /* If result is negative, make it so. */
788 neg_double (*lquo, *hquo, lquo, hquo);
790 /* Compute trial remainder: rem = num - (quo * den) */
791 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
792 neg_double (*lrem, *hrem, lrem, hrem);
793 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
798 case TRUNC_MOD_EXPR: /* round toward zero */
799 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
803 case FLOOR_MOD_EXPR: /* round toward negative infinity */
804 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
807 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
815 case CEIL_MOD_EXPR: /* round toward positive infinity */
816 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
818 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
826 case ROUND_MOD_EXPR: /* round to closest integer */
828 unsigned HOST_WIDE_INT labs_rem = *lrem;
829 HOST_WIDE_INT habs_rem = *hrem;
830 unsigned HOST_WIDE_INT labs_den = lden, ltwice;
831 HOST_WIDE_INT habs_den = hden, htwice;
833 /* Get absolute values. */
835 neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
837 neg_double (lden, hden, &labs_den, &habs_den);
839 /* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
840 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
841 labs_rem, habs_rem, <wice, &htwice);
843 if (((unsigned HOST_WIDE_INT) habs_den
844 < (unsigned HOST_WIDE_INT) htwice)
845 || (((unsigned HOST_WIDE_INT) habs_den
846 == (unsigned HOST_WIDE_INT) htwice)
847 && (labs_den <= ltwice)))
851 add_double (*lquo, *hquo,
852 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
855 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
867 /* Compute true remainder: rem = num - (quo * den) */
868 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
869 neg_double (*lrem, *hrem, lrem, hrem);
870 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
874 /* If ARG2 divides ARG1 with zero remainder, carries out the division
875 of type CODE and returns the quotient.
876 Otherwise returns NULL_TREE. */
879 div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
881 unsigned HOST_WIDE_INT int1l, int2l;
882 HOST_WIDE_INT int1h, int2h;
883 unsigned HOST_WIDE_INT quol, reml;
884 HOST_WIDE_INT quoh, remh;
885 tree type = TREE_TYPE (arg1);
886 int uns = TYPE_UNSIGNED (type);
888 int1l = TREE_INT_CST_LOW (arg1);
889 int1h = TREE_INT_CST_HIGH (arg1);
890 /* &obj[0] + -128 really should be compiled as &obj[-8] rather than
891 &obj[some_exotic_number]. */
892 if (POINTER_TYPE_P (type))
895 type = signed_type_for (type);
896 fit_double_type (int1l, int1h, &int1l, &int1h,
900 fit_double_type (int1l, int1h, &int1l, &int1h, type);
901 int2l = TREE_INT_CST_LOW (arg2);
902 int2h = TREE_INT_CST_HIGH (arg2);
904 div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
905 &quol, &quoh, &reml, &remh);
906 if (remh != 0 || reml != 0)
909 return build_int_cst_wide (type, quol, quoh);
912 /* This is nonzero if we should defer warnings about undefined
913 overflow. This facility exists because these warnings are a
914 special case. The code to estimate loop iterations does not want
915 to issue any warnings, since it works with expressions which do not
916 occur in user code. Various bits of cleanup code call fold(), but
917 only use the result if it has certain characteristics (e.g., is a
918 constant); that code only wants to issue a warning if the result is
921 static int fold_deferring_overflow_warnings;
923 /* If a warning about undefined overflow is deferred, this is the
924 warning. Note that this may cause us to turn two warnings into
925 one, but that is fine since it is sufficient to only give one
926 warning per expression. */
928 static const char* fold_deferred_overflow_warning;
930 /* If a warning about undefined overflow is deferred, this is the
931 level at which the warning should be emitted. */
933 static enum warn_strict_overflow_code fold_deferred_overflow_code;
935 /* Start deferring overflow warnings. We could use a stack here to
936 permit nested calls, but at present it is not necessary. */
939 fold_defer_overflow_warnings (void)
941 ++fold_deferring_overflow_warnings;
944 /* Stop deferring overflow warnings. If there is a pending warning,
945 and ISSUE is true, then issue the warning if appropriate. STMT is
946 the statement with which the warning should be associated (used for
947 location information); STMT may be NULL. CODE is the level of the
948 warning--a warn_strict_overflow_code value. This function will use
949 the smaller of CODE and the deferred code when deciding whether to
950 issue the warning. CODE may be zero to mean to always use the
954 fold_undefer_overflow_warnings (bool issue, const_gimple stmt, int code)
959 gcc_assert (fold_deferring_overflow_warnings > 0);
960 --fold_deferring_overflow_warnings;
961 if (fold_deferring_overflow_warnings > 0)
963 if (fold_deferred_overflow_warning != NULL
965 && code < (int) fold_deferred_overflow_code)
966 fold_deferred_overflow_code = (enum warn_strict_overflow_code) code;
970 warnmsg = fold_deferred_overflow_warning;
971 fold_deferred_overflow_warning = NULL;
973 if (!issue || warnmsg == NULL)
976 if (gimple_no_warning_p (stmt))
979 /* Use the smallest code level when deciding to issue the
981 if (code == 0 || code > (int) fold_deferred_overflow_code)
982 code = fold_deferred_overflow_code;
984 if (!issue_strict_overflow_warning (code))
988 locus = input_location;
990 locus = gimple_location (stmt);
991 warning (OPT_Wstrict_overflow, "%H%s", &locus, warnmsg);
994 /* Stop deferring overflow warnings, ignoring any deferred
998 fold_undefer_and_ignore_overflow_warnings (void)
1000 fold_undefer_overflow_warnings (false, NULL, 0);
1003 /* Whether we are deferring overflow warnings. */
1006 fold_deferring_overflow_warnings_p (void)
1008 return fold_deferring_overflow_warnings > 0;
1011 /* This is called when we fold something based on the fact that signed
1012 overflow is undefined. */
1015 fold_overflow_warning (const char* gmsgid, enum warn_strict_overflow_code wc)
1017 if (fold_deferring_overflow_warnings > 0)
1019 if (fold_deferred_overflow_warning == NULL
1020 || wc < fold_deferred_overflow_code)
1022 fold_deferred_overflow_warning = gmsgid;
1023 fold_deferred_overflow_code = wc;
1026 else if (issue_strict_overflow_warning (wc))
1027 warning (OPT_Wstrict_overflow, gmsgid);
1030 /* Return true if the built-in mathematical function specified by CODE
1031 is odd, i.e. -f(x) == f(-x). */
1034 negate_mathfn_p (enum built_in_function code)
1038 CASE_FLT_FN (BUILT_IN_ASIN):
1039 CASE_FLT_FN (BUILT_IN_ASINH):
1040 CASE_FLT_FN (BUILT_IN_ATAN):
1041 CASE_FLT_FN (BUILT_IN_ATANH):
1042 CASE_FLT_FN (BUILT_IN_CASIN):
1043 CASE_FLT_FN (BUILT_IN_CASINH):
1044 CASE_FLT_FN (BUILT_IN_CATAN):
1045 CASE_FLT_FN (BUILT_IN_CATANH):
1046 CASE_FLT_FN (BUILT_IN_CBRT):
1047 CASE_FLT_FN (BUILT_IN_CPROJ):
1048 CASE_FLT_FN (BUILT_IN_CSIN):
1049 CASE_FLT_FN (BUILT_IN_CSINH):
1050 CASE_FLT_FN (BUILT_IN_CTAN):
1051 CASE_FLT_FN (BUILT_IN_CTANH):
1052 CASE_FLT_FN (BUILT_IN_ERF):
1053 CASE_FLT_FN (BUILT_IN_LLROUND):
1054 CASE_FLT_FN (BUILT_IN_LROUND):
1055 CASE_FLT_FN (BUILT_IN_ROUND):
1056 CASE_FLT_FN (BUILT_IN_SIN):
1057 CASE_FLT_FN (BUILT_IN_SINH):
1058 CASE_FLT_FN (BUILT_IN_TAN):
1059 CASE_FLT_FN (BUILT_IN_TANH):
1060 CASE_FLT_FN (BUILT_IN_TRUNC):
1063 CASE_FLT_FN (BUILT_IN_LLRINT):
1064 CASE_FLT_FN (BUILT_IN_LRINT):
1065 CASE_FLT_FN (BUILT_IN_NEARBYINT):
1066 CASE_FLT_FN (BUILT_IN_RINT):
1067 return !flag_rounding_math;
1075 /* Check whether we may negate an integer constant T without causing
1079 may_negate_without_overflow_p (const_tree t)
1081 unsigned HOST_WIDE_INT val;
1085 gcc_assert (TREE_CODE (t) == INTEGER_CST);
1087 type = TREE_TYPE (t);
1088 if (TYPE_UNSIGNED (type))
1091 prec = TYPE_PRECISION (type);
1092 if (prec > HOST_BITS_PER_WIDE_INT)
1094 if (TREE_INT_CST_LOW (t) != 0)
1096 prec -= HOST_BITS_PER_WIDE_INT;
1097 val = TREE_INT_CST_HIGH (t);
1100 val = TREE_INT_CST_LOW (t);
1101 if (prec < HOST_BITS_PER_WIDE_INT)
1102 val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
1103 return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
1106 /* Determine whether an expression T can be cheaply negated using
1107 the function negate_expr without introducing undefined overflow. */
1110 negate_expr_p (tree t)
1117 type = TREE_TYPE (t);
1119 STRIP_SIGN_NOPS (t);
1120 switch (TREE_CODE (t))
1123 if (TYPE_OVERFLOW_WRAPS (type))
1126 /* Check that -CST will not overflow type. */
1127 return may_negate_without_overflow_p (t);
1129 return (INTEGRAL_TYPE_P (type)
1130 && TYPE_OVERFLOW_WRAPS (type));
1138 return negate_expr_p (TREE_REALPART (t))
1139 && negate_expr_p (TREE_IMAGPART (t));
1142 return negate_expr_p (TREE_OPERAND (t, 0))
1143 && negate_expr_p (TREE_OPERAND (t, 1));
1146 return negate_expr_p (TREE_OPERAND (t, 0));
1149 if (HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1150 || HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
1152 /* -(A + B) -> (-B) - A. */
1153 if (negate_expr_p (TREE_OPERAND (t, 1))
1154 && reorder_operands_p (TREE_OPERAND (t, 0),
1155 TREE_OPERAND (t, 1)))
1157 /* -(A + B) -> (-A) - B. */
1158 return negate_expr_p (TREE_OPERAND (t, 0));
1161 /* We can't turn -(A-B) into B-A when we honor signed zeros. */
1162 return !HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1163 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
1164 && reorder_operands_p (TREE_OPERAND (t, 0),
1165 TREE_OPERAND (t, 1));
1168 if (TYPE_UNSIGNED (TREE_TYPE (t)))
1174 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
1175 return negate_expr_p (TREE_OPERAND (t, 1))
1176 || negate_expr_p (TREE_OPERAND (t, 0));
1179 case TRUNC_DIV_EXPR:
1180 case ROUND_DIV_EXPR:
1181 case FLOOR_DIV_EXPR:
1183 case EXACT_DIV_EXPR:
1184 /* In general we can't negate A / B, because if A is INT_MIN and
1185 B is 1, we may turn this into INT_MIN / -1 which is undefined
1186 and actually traps on some architectures. But if overflow is
1187 undefined, we can negate, because - (INT_MIN / 1) is an
1189 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
1190 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
1192 return negate_expr_p (TREE_OPERAND (t, 1))
1193 || negate_expr_p (TREE_OPERAND (t, 0));
1196 /* Negate -((double)float) as (double)(-float). */
1197 if (TREE_CODE (type) == REAL_TYPE)
1199 tree tem = strip_float_extensions (t);
1201 return negate_expr_p (tem);
1206 /* Negate -f(x) as f(-x). */
1207 if (negate_mathfn_p (builtin_mathfn_code (t)))
1208 return negate_expr_p (CALL_EXPR_ARG (t, 0));
1212 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1213 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1215 tree op1 = TREE_OPERAND (t, 1);
1216 if (TREE_INT_CST_HIGH (op1) == 0
1217 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1218 == TREE_INT_CST_LOW (op1))
1229 /* Given T, an expression, return a folded tree for -T or NULL_TREE, if no
1230 simplification is possible.
1231 If negate_expr_p would return true for T, NULL_TREE will never be
1235 fold_negate_expr (tree t)
1237 tree type = TREE_TYPE (t);
1240 switch (TREE_CODE (t))
1242 /* Convert - (~A) to A + 1. */
1244 if (INTEGRAL_TYPE_P (type))
1245 return fold_build2 (PLUS_EXPR, type, TREE_OPERAND (t, 0),
1246 build_int_cst (type, 1));
1250 tem = fold_negate_const (t, type);
1251 if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
1252 || !TYPE_OVERFLOW_TRAPS (type))
1257 tem = fold_negate_const (t, type);
1258 /* Two's complement FP formats, such as c4x, may overflow. */
1259 if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
1264 tem = fold_negate_const (t, type);
1269 tree rpart = negate_expr (TREE_REALPART (t));
1270 tree ipart = negate_expr (TREE_IMAGPART (t));
1272 if ((TREE_CODE (rpart) == REAL_CST
1273 && TREE_CODE (ipart) == REAL_CST)
1274 || (TREE_CODE (rpart) == INTEGER_CST
1275 && TREE_CODE (ipart) == INTEGER_CST))
1276 return build_complex (type, rpart, ipart);
1281 if (negate_expr_p (t))
1282 return fold_build2 (COMPLEX_EXPR, type,
1283 fold_negate_expr (TREE_OPERAND (t, 0)),
1284 fold_negate_expr (TREE_OPERAND (t, 1)));
1288 if (negate_expr_p (t))
1289 return fold_build1 (CONJ_EXPR, type,
1290 fold_negate_expr (TREE_OPERAND (t, 0)));
1294 return TREE_OPERAND (t, 0);
1297 if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1298 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
1300 /* -(A + B) -> (-B) - A. */
1301 if (negate_expr_p (TREE_OPERAND (t, 1))
1302 && reorder_operands_p (TREE_OPERAND (t, 0),
1303 TREE_OPERAND (t, 1)))
1305 tem = negate_expr (TREE_OPERAND (t, 1));
1306 return fold_build2 (MINUS_EXPR, type,
1307 tem, TREE_OPERAND (t, 0));
1310 /* -(A + B) -> (-A) - B. */
1311 if (negate_expr_p (TREE_OPERAND (t, 0)))
1313 tem = negate_expr (TREE_OPERAND (t, 0));
1314 return fold_build2 (MINUS_EXPR, type,
1315 tem, TREE_OPERAND (t, 1));
1321 /* - (A - B) -> B - A */
1322 if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1323 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
1324 && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)))
1325 return fold_build2 (MINUS_EXPR, type,
1326 TREE_OPERAND (t, 1), TREE_OPERAND (t, 0));
1330 if (TYPE_UNSIGNED (type))
1336 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type)))
1338 tem = TREE_OPERAND (t, 1);
1339 if (negate_expr_p (tem))
1340 return fold_build2 (TREE_CODE (t), type,
1341 TREE_OPERAND (t, 0), negate_expr (tem));
1342 tem = TREE_OPERAND (t, 0);
1343 if (negate_expr_p (tem))
1344 return fold_build2 (TREE_CODE (t), type,
1345 negate_expr (tem), TREE_OPERAND (t, 1));
1349 case TRUNC_DIV_EXPR:
1350 case ROUND_DIV_EXPR:
1351 case FLOOR_DIV_EXPR:
1353 case EXACT_DIV_EXPR:
1354 /* In general we can't negate A / B, because if A is INT_MIN and
1355 B is 1, we may turn this into INT_MIN / -1 which is undefined
1356 and actually traps on some architectures. But if overflow is
1357 undefined, we can negate, because - (INT_MIN / 1) is an
1359 if (!INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type))
1361 const char * const warnmsg = G_("assuming signed overflow does not "
1362 "occur when negating a division");
1363 tem = TREE_OPERAND (t, 1);
1364 if (negate_expr_p (tem))
1366 if (INTEGRAL_TYPE_P (type)
1367 && (TREE_CODE (tem) != INTEGER_CST
1368 || integer_onep (tem)))
1369 fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
1370 return fold_build2 (TREE_CODE (t), type,
1371 TREE_OPERAND (t, 0), negate_expr (tem));
1373 tem = TREE_OPERAND (t, 0);
1374 if (negate_expr_p (tem))
1376 if (INTEGRAL_TYPE_P (type)
1377 && (TREE_CODE (tem) != INTEGER_CST
1378 || tree_int_cst_equal (tem, TYPE_MIN_VALUE (type))))
1379 fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
1380 return fold_build2 (TREE_CODE (t), type,
1381 negate_expr (tem), TREE_OPERAND (t, 1));
1387 /* Convert -((double)float) into (double)(-float). */
1388 if (TREE_CODE (type) == REAL_TYPE)
1390 tem = strip_float_extensions (t);
1391 if (tem != t && negate_expr_p (tem))
1392 return fold_convert (type, negate_expr (tem));
1397 /* Negate -f(x) as f(-x). */
1398 if (negate_mathfn_p (builtin_mathfn_code (t))
1399 && negate_expr_p (CALL_EXPR_ARG (t, 0)))
1403 fndecl = get_callee_fndecl (t);
1404 arg = negate_expr (CALL_EXPR_ARG (t, 0));
1405 return build_call_expr (fndecl, 1, arg);
1410 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1411 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1413 tree op1 = TREE_OPERAND (t, 1);
1414 if (TREE_INT_CST_HIGH (op1) == 0
1415 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1416 == TREE_INT_CST_LOW (op1))
1418 tree ntype = TYPE_UNSIGNED (type)
1419 ? signed_type_for (type)
1420 : unsigned_type_for (type);
1421 tree temp = fold_convert (ntype, TREE_OPERAND (t, 0));
1422 temp = fold_build2 (RSHIFT_EXPR, ntype, temp, op1);
1423 return fold_convert (type, temp);
1435 /* Like fold_negate_expr, but return a NEGATE_EXPR tree, if T can not be
1436 negated in a simpler way. Also allow for T to be NULL_TREE, in which case
1437 return NULL_TREE. */
1440 negate_expr (tree t)
1447 type = TREE_TYPE (t);
1448 STRIP_SIGN_NOPS (t);
1450 tem = fold_negate_expr (t);
1452 tem = build1 (NEGATE_EXPR, TREE_TYPE (t), t);
1453 return fold_convert (type, tem);
1456 /* Split a tree IN into a constant, literal and variable parts that could be
1457 combined with CODE to make IN. "constant" means an expression with
1458 TREE_CONSTANT but that isn't an actual constant. CODE must be a
1459 commutative arithmetic operation. Store the constant part into *CONP,
1460 the literal in *LITP and return the variable part. If a part isn't
1461 present, set it to null. If the tree does not decompose in this way,
1462 return the entire tree as the variable part and the other parts as null.
1464 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
1465 case, we negate an operand that was subtracted. Except if it is a
1466 literal for which we use *MINUS_LITP instead.
1468 If NEGATE_P is true, we are negating all of IN, again except a literal
1469 for which we use *MINUS_LITP instead.
1471 If IN is itself a literal or constant, return it as appropriate.
1473 Note that we do not guarantee that any of the three values will be the
1474 same type as IN, but they will have the same signedness and mode. */
1477 split_tree (tree in, enum tree_code code, tree *conp, tree *litp,
1478 tree *minus_litp, int negate_p)
1486 /* Strip any conversions that don't change the machine mode or signedness. */
1487 STRIP_SIGN_NOPS (in);
1489 if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST
1490 || TREE_CODE (in) == FIXED_CST)
1492 else if (TREE_CODE (in) == code
1493 || ((! FLOAT_TYPE_P (TREE_TYPE (in)) || flag_associative_math)
1494 && ! SAT_FIXED_POINT_TYPE_P (TREE_TYPE (in))
1495 /* We can associate addition and subtraction together (even
1496 though the C standard doesn't say so) for integers because
1497 the value is not affected. For reals, the value might be
1498 affected, so we can't. */
1499 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1500 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1502 tree op0 = TREE_OPERAND (in, 0);
1503 tree op1 = TREE_OPERAND (in, 1);
1504 int neg1_p = TREE_CODE (in) == MINUS_EXPR;
1505 int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
1507 /* First see if either of the operands is a literal, then a constant. */
1508 if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST
1509 || TREE_CODE (op0) == FIXED_CST)
1510 *litp = op0, op0 = 0;
1511 else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST
1512 || TREE_CODE (op1) == FIXED_CST)
1513 *litp = op1, neg_litp_p = neg1_p, op1 = 0;
1515 if (op0 != 0 && TREE_CONSTANT (op0))
1516 *conp = op0, op0 = 0;
1517 else if (op1 != 0 && TREE_CONSTANT (op1))
1518 *conp = op1, neg_conp_p = neg1_p, op1 = 0;
1520 /* If we haven't dealt with either operand, this is not a case we can
1521 decompose. Otherwise, VAR is either of the ones remaining, if any. */
1522 if (op0 != 0 && op1 != 0)
1527 var = op1, neg_var_p = neg1_p;
1529 /* Now do any needed negations. */
1531 *minus_litp = *litp, *litp = 0;
1533 *conp = negate_expr (*conp);
1535 var = negate_expr (var);
1537 else if (TREE_CONSTANT (in))
1545 *minus_litp = *litp, *litp = 0;
1546 else if (*minus_litp)
1547 *litp = *minus_litp, *minus_litp = 0;
1548 *conp = negate_expr (*conp);
1549 var = negate_expr (var);
1555 /* Re-associate trees split by the above function. T1 and T2 are either
1556 expressions to associate or null. Return the new expression, if any. If
1557 we build an operation, do it in TYPE and with CODE. */
1560 associate_trees (tree t1, tree t2, enum tree_code code, tree type)
1567 /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
1568 try to fold this since we will have infinite recursion. But do
1569 deal with any NEGATE_EXPRs. */
1570 if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
1571 || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
1573 if (code == PLUS_EXPR)
1575 if (TREE_CODE (t1) == NEGATE_EXPR)
1576 return build2 (MINUS_EXPR, type, fold_convert (type, t2),
1577 fold_convert (type, TREE_OPERAND (t1, 0)));
1578 else if (TREE_CODE (t2) == NEGATE_EXPR)
1579 return build2 (MINUS_EXPR, type, fold_convert (type, t1),
1580 fold_convert (type, TREE_OPERAND (t2, 0)));
1581 else if (integer_zerop (t2))
1582 return fold_convert (type, t1);
1584 else if (code == MINUS_EXPR)
1586 if (integer_zerop (t2))
1587 return fold_convert (type, t1);
1590 return build2 (code, type, fold_convert (type, t1),
1591 fold_convert (type, t2));
1594 return fold_build2 (code, type, fold_convert (type, t1),
1595 fold_convert (type, t2));
1598 /* Check whether TYPE1 and TYPE2 are equivalent integer types, suitable
1599 for use in int_const_binop, size_binop and size_diffop. */
1602 int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2)
1604 if (TREE_CODE (type1) != INTEGER_TYPE && !POINTER_TYPE_P (type1))
1606 if (TREE_CODE (type2) != INTEGER_TYPE && !POINTER_TYPE_P (type2))
1621 return TYPE_UNSIGNED (type1) == TYPE_UNSIGNED (type2)
1622 && TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
1623 && TYPE_MODE (type1) == TYPE_MODE (type2);
1627 /* Combine two integer constants ARG1 and ARG2 under operation CODE
1628 to produce a new constant. Return NULL_TREE if we don't know how
1629 to evaluate CODE at compile-time.
1631 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1634 int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2, int notrunc)
1636 unsigned HOST_WIDE_INT int1l, int2l;
1637 HOST_WIDE_INT int1h, int2h;
1638 unsigned HOST_WIDE_INT low;
1640 unsigned HOST_WIDE_INT garbagel;
1641 HOST_WIDE_INT garbageh;
1643 tree type = TREE_TYPE (arg1);
1644 int uns = TYPE_UNSIGNED (type);
1646 = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
1649 int1l = TREE_INT_CST_LOW (arg1);
1650 int1h = TREE_INT_CST_HIGH (arg1);
1651 int2l = TREE_INT_CST_LOW (arg2);
1652 int2h = TREE_INT_CST_HIGH (arg2);
1657 low = int1l | int2l, hi = int1h | int2h;
1661 low = int1l ^ int2l, hi = int1h ^ int2h;
1665 low = int1l & int2l, hi = int1h & int2h;
1671 /* It's unclear from the C standard whether shifts can overflow.
1672 The following code ignores overflow; perhaps a C standard
1673 interpretation ruling is needed. */
1674 lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1681 lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1686 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1690 neg_double (int2l, int2h, &low, &hi);
1691 add_double (int1l, int1h, low, hi, &low, &hi);
1692 overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
1696 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1699 case TRUNC_DIV_EXPR:
1700 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1701 case EXACT_DIV_EXPR:
1702 /* This is a shortcut for a common special case. */
1703 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1704 && !TREE_OVERFLOW (arg1)
1705 && !TREE_OVERFLOW (arg2)
1706 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1708 if (code == CEIL_DIV_EXPR)
1711 low = int1l / int2l, hi = 0;
1715 /* ... fall through ... */
1717 case ROUND_DIV_EXPR:
1718 if (int2h == 0 && int2l == 0)
1720 if (int2h == 0 && int2l == 1)
1722 low = int1l, hi = int1h;
1725 if (int1l == int2l && int1h == int2h
1726 && ! (int1l == 0 && int1h == 0))
1731 overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
1732 &low, &hi, &garbagel, &garbageh);
1735 case TRUNC_MOD_EXPR:
1736 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1737 /* This is a shortcut for a common special case. */
1738 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1739 && !TREE_OVERFLOW (arg1)
1740 && !TREE_OVERFLOW (arg2)
1741 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1743 if (code == CEIL_MOD_EXPR)
1745 low = int1l % int2l, hi = 0;
1749 /* ... fall through ... */
1751 case ROUND_MOD_EXPR:
1752 if (int2h == 0 && int2l == 0)
1754 overflow = div_and_round_double (code, uns,
1755 int1l, int1h, int2l, int2h,
1756 &garbagel, &garbageh, &low, &hi);
1762 low = (((unsigned HOST_WIDE_INT) int1h
1763 < (unsigned HOST_WIDE_INT) int2h)
1764 || (((unsigned HOST_WIDE_INT) int1h
1765 == (unsigned HOST_WIDE_INT) int2h)
1768 low = (int1h < int2h
1769 || (int1h == int2h && int1l < int2l));
1771 if (low == (code == MIN_EXPR))
1772 low = int1l, hi = int1h;
1774 low = int2l, hi = int2h;
1783 t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
1785 /* Propagate overflow flags ourselves. */
1786 if (((!uns || is_sizetype) && overflow)
1787 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1790 TREE_OVERFLOW (t) = 1;
1794 t = force_fit_type_double (TREE_TYPE (arg1), low, hi, 1,
1795 ((!uns || is_sizetype) && overflow)
1796 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1801 /* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
1802 constant. We assume ARG1 and ARG2 have the same data type, or at least
1803 are the same kind of constant and the same machine mode. Return zero if
1804 combining the constants is not allowed in the current operating mode.
1806 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1809 const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1811 /* Sanity check for the recursive cases. */
1818 if (TREE_CODE (arg1) == INTEGER_CST)
1819 return int_const_binop (code, arg1, arg2, notrunc);
1821 if (TREE_CODE (arg1) == REAL_CST)
1823 enum machine_mode mode;
1826 REAL_VALUE_TYPE value;
1827 REAL_VALUE_TYPE result;
1831 /* The following codes are handled by real_arithmetic. */
1846 d1 = TREE_REAL_CST (arg1);
1847 d2 = TREE_REAL_CST (arg2);
1849 type = TREE_TYPE (arg1);
1850 mode = TYPE_MODE (type);
1852 /* Don't perform operation if we honor signaling NaNs and
1853 either operand is a NaN. */
1854 if (HONOR_SNANS (mode)
1855 && (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2)))
1858 /* Don't perform operation if it would raise a division
1859 by zero exception. */
1860 if (code == RDIV_EXPR
1861 && REAL_VALUES_EQUAL (d2, dconst0)
1862 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1865 /* If either operand is a NaN, just return it. Otherwise, set up
1866 for floating-point trap; we return an overflow. */
1867 if (REAL_VALUE_ISNAN (d1))
1869 else if (REAL_VALUE_ISNAN (d2))
1872 inexact = real_arithmetic (&value, code, &d1, &d2);
1873 real_convert (&result, mode, &value);
1875 /* Don't constant fold this floating point operation if
1876 the result has overflowed and flag_trapping_math. */
1877 if (flag_trapping_math
1878 && MODE_HAS_INFINITIES (mode)
1879 && REAL_VALUE_ISINF (result)
1880 && !REAL_VALUE_ISINF (d1)
1881 && !REAL_VALUE_ISINF (d2))
1884 /* Don't constant fold this floating point operation if the
1885 result may dependent upon the run-time rounding mode and
1886 flag_rounding_math is set, or if GCC's software emulation
1887 is unable to accurately represent the result. */
1888 if ((flag_rounding_math
1889 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
1890 && (inexact || !real_identical (&result, &value)))
1893 t = build_real (type, result);
1895 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1899 if (TREE_CODE (arg1) == FIXED_CST)
1901 FIXED_VALUE_TYPE f1;
1902 FIXED_VALUE_TYPE f2;
1903 FIXED_VALUE_TYPE result;
1908 /* The following codes are handled by fixed_arithmetic. */
1914 case TRUNC_DIV_EXPR:
1915 f2 = TREE_FIXED_CST (arg2);
1920 f2.data.high = TREE_INT_CST_HIGH (arg2);
1921 f2.data.low = TREE_INT_CST_LOW (arg2);
1929 f1 = TREE_FIXED_CST (arg1);
1930 type = TREE_TYPE (arg1);
1931 sat_p = TYPE_SATURATING (type);
1932 overflow_p = fixed_arithmetic (&result, code, &f1, &f2, sat_p);
1933 t = build_fixed (type, result);
1934 /* Propagate overflow flags. */
1935 if (overflow_p | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1936 TREE_OVERFLOW (t) = 1;
1940 if (TREE_CODE (arg1) == COMPLEX_CST)
1942 tree type = TREE_TYPE (arg1);
1943 tree r1 = TREE_REALPART (arg1);
1944 tree i1 = TREE_IMAGPART (arg1);
1945 tree r2 = TREE_REALPART (arg2);
1946 tree i2 = TREE_IMAGPART (arg2);
1953 real = const_binop (code, r1, r2, notrunc);
1954 imag = const_binop (code, i1, i2, notrunc);
1958 real = const_binop (MINUS_EXPR,
1959 const_binop (MULT_EXPR, r1, r2, notrunc),
1960 const_binop (MULT_EXPR, i1, i2, notrunc),
1962 imag = const_binop (PLUS_EXPR,
1963 const_binop (MULT_EXPR, r1, i2, notrunc),
1964 const_binop (MULT_EXPR, i1, r2, notrunc),
1971 = const_binop (PLUS_EXPR,
1972 const_binop (MULT_EXPR, r2, r2, notrunc),
1973 const_binop (MULT_EXPR, i2, i2, notrunc),
1976 = const_binop (PLUS_EXPR,
1977 const_binop (MULT_EXPR, r1, r2, notrunc),
1978 const_binop (MULT_EXPR, i1, i2, notrunc),
1981 = const_binop (MINUS_EXPR,
1982 const_binop (MULT_EXPR, i1, r2, notrunc),
1983 const_binop (MULT_EXPR, r1, i2, notrunc),
1986 if (INTEGRAL_TYPE_P (TREE_TYPE (r1)))
1987 code = TRUNC_DIV_EXPR;
1989 real = const_binop (code, t1, magsquared, notrunc);
1990 imag = const_binop (code, t2, magsquared, notrunc);
1999 return build_complex (type, real, imag);
2002 if (TREE_CODE (arg1) == VECTOR_CST)
2004 tree type = TREE_TYPE(arg1);
2005 int count = TYPE_VECTOR_SUBPARTS (type), i;
2006 tree elements1, elements2, list = NULL_TREE;
2008 if(TREE_CODE(arg2) != VECTOR_CST)
2011 elements1 = TREE_VECTOR_CST_ELTS (arg1);
2012 elements2 = TREE_VECTOR_CST_ELTS (arg2);
2014 for (i = 0; i < count; i++)
2016 tree elem1, elem2, elem;
2018 /* The trailing elements can be empty and should be treated as 0 */
2020 elem1 = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
2023 elem1 = TREE_VALUE(elements1);
2024 elements1 = TREE_CHAIN (elements1);
2028 elem2 = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
2031 elem2 = TREE_VALUE(elements2);
2032 elements2 = TREE_CHAIN (elements2);
2035 elem = const_binop (code, elem1, elem2, notrunc);
2037 /* It is possible that const_binop cannot handle the given
2038 code and return NULL_TREE */
2039 if(elem == NULL_TREE)
2042 list = tree_cons (NULL_TREE, elem, list);
2044 return build_vector(type, nreverse(list));
2049 /* Create a size type INT_CST node with NUMBER sign extended. KIND
2050 indicates which particular sizetype to create. */
2053 size_int_kind (HOST_WIDE_INT number, enum size_type_kind kind)
2055 return build_int_cst (sizetype_tab[(int) kind], number);
2058 /* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
2059 is a tree code. The type of the result is taken from the operands.
2060 Both must be equivalent integer types, ala int_binop_types_match_p.
2061 If the operands are constant, so is the result. */
2064 size_binop (enum tree_code code, tree arg0, tree arg1)
2066 tree type = TREE_TYPE (arg0);
2068 if (arg0 == error_mark_node || arg1 == error_mark_node)
2069 return error_mark_node;
2071 gcc_assert (int_binop_types_match_p (code, TREE_TYPE (arg0),
2074 /* Handle the special case of two integer constants faster. */
2075 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
2077 /* And some specific cases even faster than that. */
2078 if (code == PLUS_EXPR)
2080 if (integer_zerop (arg0) && !TREE_OVERFLOW (arg0))
2082 if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
2085 else if (code == MINUS_EXPR)
2087 if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
2090 else if (code == MULT_EXPR)
2092 if (integer_onep (arg0) && !TREE_OVERFLOW (arg0))
2096 /* Handle general case of two integer constants. */
2097 return int_const_binop (code, arg0, arg1, 0);
2100 return fold_build2 (code, type, arg0, arg1);
2103 /* Given two values, either both of sizetype or both of bitsizetype,
2104 compute the difference between the two values. Return the value
2105 in signed type corresponding to the type of the operands. */
2108 size_diffop (tree arg0, tree arg1)
2110 tree type = TREE_TYPE (arg0);
2113 gcc_assert (int_binop_types_match_p (MINUS_EXPR, TREE_TYPE (arg0),
2116 /* If the type is already signed, just do the simple thing. */
2117 if (!TYPE_UNSIGNED (type))
2118 return size_binop (MINUS_EXPR, arg0, arg1);
2120 if (type == sizetype)
2122 else if (type == bitsizetype)
2123 ctype = sbitsizetype;
2125 ctype = signed_type_for (type);
2127 /* If either operand is not a constant, do the conversions to the signed
2128 type and subtract. The hardware will do the right thing with any
2129 overflow in the subtraction. */
2130 if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST)
2131 return size_binop (MINUS_EXPR, fold_convert (ctype, arg0),
2132 fold_convert (ctype, arg1));
2134 /* If ARG0 is larger than ARG1, subtract and return the result in CTYPE.
2135 Otherwise, subtract the other way, convert to CTYPE (we know that can't
2136 overflow) and negate (which can't either). Special-case a result
2137 of zero while we're here. */
2138 if (tree_int_cst_equal (arg0, arg1))
2139 return build_int_cst (ctype, 0);
2140 else if (tree_int_cst_lt (arg1, arg0))
2141 return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1));
2143 return size_binop (MINUS_EXPR, build_int_cst (ctype, 0),
2144 fold_convert (ctype, size_binop (MINUS_EXPR,
2148 /* A subroutine of fold_convert_const handling conversions of an
2149 INTEGER_CST to another integer type. */
2152 fold_convert_const_int_from_int (tree type, const_tree arg1)
2156 /* Given an integer constant, make new constant with new type,
2157 appropriately sign-extended or truncated. */
2158 t = force_fit_type_double (type, TREE_INT_CST_LOW (arg1),
2159 TREE_INT_CST_HIGH (arg1),
2160 /* Don't set the overflow when
2161 converting from a pointer, */
2162 !POINTER_TYPE_P (TREE_TYPE (arg1))
2163 /* or to a sizetype with same signedness
2164 and the precision is unchanged.
2165 ??? sizetype is always sign-extended,
2166 but its signedness depends on the
2167 frontend. Thus we see spurious overflows
2168 here if we do not check this. */
2169 && !((TYPE_PRECISION (TREE_TYPE (arg1))
2170 == TYPE_PRECISION (type))
2171 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
2172 == TYPE_UNSIGNED (type))
2173 && ((TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
2174 && TYPE_IS_SIZETYPE (TREE_TYPE (arg1)))
2175 || (TREE_CODE (type) == INTEGER_TYPE
2176 && TYPE_IS_SIZETYPE (type)))),
2177 (TREE_INT_CST_HIGH (arg1) < 0
2178 && (TYPE_UNSIGNED (type)
2179 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
2180 | TREE_OVERFLOW (arg1));
2185 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2186 to an integer type. */
2189 fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1)
2194 /* The following code implements the floating point to integer
2195 conversion rules required by the Java Language Specification,
2196 that IEEE NaNs are mapped to zero and values that overflow
2197 the target precision saturate, i.e. values greater than
2198 INT_MAX are mapped to INT_MAX, and values less than INT_MIN
2199 are mapped to INT_MIN. These semantics are allowed by the
2200 C and C++ standards that simply state that the behavior of
2201 FP-to-integer conversion is unspecified upon overflow. */
2203 HOST_WIDE_INT high, low;
2205 REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
2209 case FIX_TRUNC_EXPR:
2210 real_trunc (&r, VOIDmode, &x);
2217 /* If R is NaN, return zero and show we have an overflow. */
2218 if (REAL_VALUE_ISNAN (r))
2225 /* See if R is less than the lower bound or greater than the
2230 tree lt = TYPE_MIN_VALUE (type);
2231 REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
2232 if (REAL_VALUES_LESS (r, l))
2235 high = TREE_INT_CST_HIGH (lt);
2236 low = TREE_INT_CST_LOW (lt);
2242 tree ut = TYPE_MAX_VALUE (type);
2245 REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
2246 if (REAL_VALUES_LESS (u, r))
2249 high = TREE_INT_CST_HIGH (ut);
2250 low = TREE_INT_CST_LOW (ut);
2256 REAL_VALUE_TO_INT (&low, &high, r);
2258 t = force_fit_type_double (type, low, high, -1,
2259 overflow | TREE_OVERFLOW (arg1));
2263 /* A subroutine of fold_convert_const handling conversions of a
2264 FIXED_CST to an integer type. */
2267 fold_convert_const_int_from_fixed (tree type, const_tree arg1)
2270 double_int temp, temp_trunc;
2273 /* Right shift FIXED_CST to temp by fbit. */
2274 temp = TREE_FIXED_CST (arg1).data;
2275 mode = TREE_FIXED_CST (arg1).mode;
2276 if (GET_MODE_FBIT (mode) < 2 * HOST_BITS_PER_WIDE_INT)
2278 lshift_double (temp.low, temp.high,
2279 - GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
2280 &temp.low, &temp.high, SIGNED_FIXED_POINT_MODE_P (mode));
2282 /* Left shift temp to temp_trunc by fbit. */
2283 lshift_double (temp.low, temp.high,
2284 GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
2285 &temp_trunc.low, &temp_trunc.high,
2286 SIGNED_FIXED_POINT_MODE_P (mode));
2293 temp_trunc.high = 0;
2296 /* If FIXED_CST is negative, we need to round the value toward 0.
2297 By checking if the fractional bits are not zero to add 1 to temp. */
2298 if (SIGNED_FIXED_POINT_MODE_P (mode) && temp_trunc.high < 0
2299 && !double_int_equal_p (TREE_FIXED_CST (arg1).data, temp_trunc))
2304 temp = double_int_add (temp, one);
2307 /* Given a fixed-point constant, make new constant with new type,
2308 appropriately sign-extended or truncated. */
2309 t = force_fit_type_double (type, temp.low, temp.high, -1,
2311 && (TYPE_UNSIGNED (type)
2312 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
2313 | TREE_OVERFLOW (arg1));
2318 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2319 to another floating point type. */
2322 fold_convert_const_real_from_real (tree type, const_tree arg1)
2324 REAL_VALUE_TYPE value;
2327 real_convert (&value, TYPE_MODE (type), &TREE_REAL_CST (arg1));
2328 t = build_real (type, value);
2330 /* If converting an infinity or NAN to a representation that doesn't
2331 have one, set the overflow bit so that we can produce some kind of
2332 error message at the appropriate point if necessary. It's not the
2333 most user-friendly message, but it's better than nothing. */
2334 if (REAL_VALUE_ISINF (TREE_REAL_CST (arg1))
2335 && !MODE_HAS_INFINITIES (TYPE_MODE (type)))
2336 TREE_OVERFLOW (t) = 1;
2337 else if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1))
2338 && !MODE_HAS_NANS (TYPE_MODE (type)))
2339 TREE_OVERFLOW (t) = 1;
2340 /* Regular overflow, conversion produced an infinity in a mode that
2341 can't represent them. */
2342 else if (!MODE_HAS_INFINITIES (TYPE_MODE (type))
2343 && REAL_VALUE_ISINF (value)
2344 && !REAL_VALUE_ISINF (TREE_REAL_CST (arg1)))
2345 TREE_OVERFLOW (t) = 1;
2347 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
2351 /* A subroutine of fold_convert_const handling conversions a FIXED_CST
2352 to a floating point type. */
2355 fold_convert_const_real_from_fixed (tree type, const_tree arg1)
2357 REAL_VALUE_TYPE value;
2360 real_convert_from_fixed (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1));
2361 t = build_real (type, value);
2363 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
2367 /* A subroutine of fold_convert_const handling conversions a FIXED_CST
2368 to another fixed-point type. */
2371 fold_convert_const_fixed_from_fixed (tree type, const_tree arg1)
2373 FIXED_VALUE_TYPE value;
2377 overflow_p = fixed_convert (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1),
2378 TYPE_SATURATING (type));
2379 t = build_fixed (type, value);
2381 /* Propagate overflow flags. */
2382 if (overflow_p | TREE_OVERFLOW (arg1))
2383 TREE_OVERFLOW (t) = 1;
2387 /* A subroutine of fold_convert_const handling conversions an INTEGER_CST
2388 to a fixed-point type. */
2391 fold_convert_const_fixed_from_int (tree type, const_tree arg1)
2393 FIXED_VALUE_TYPE value;
2397 overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type),
2398 TREE_INT_CST (arg1),
2399 TYPE_UNSIGNED (TREE_TYPE (arg1)),
2400 TYPE_SATURATING (type));
2401 t = build_fixed (type, value);
2403 /* Propagate overflow flags. */
2404 if (overflow_p | TREE_OVERFLOW (arg1))
2405 TREE_OVERFLOW (t) = 1;
2409 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2410 to a fixed-point type. */
2413 fold_convert_const_fixed_from_real (tree type, const_tree arg1)
2415 FIXED_VALUE_TYPE value;
2419 overflow_p = fixed_convert_from_real (&value, TYPE_MODE (type),
2420 &TREE_REAL_CST (arg1),
2421 TYPE_SATURATING (type));
2422 t = build_fixed (type, value);
2424 /* Propagate overflow flags. */
2425 if (overflow_p | TREE_OVERFLOW (arg1))
2426 TREE_OVERFLOW (t) = 1;
2430 /* Attempt to fold type conversion operation CODE of expression ARG1 to
2431 type TYPE. If no simplification can be done return NULL_TREE. */
2434 fold_convert_const (enum tree_code code, tree type, tree arg1)
2436 if (TREE_TYPE (arg1) == type)
2439 if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)
2440 || TREE_CODE (type) == OFFSET_TYPE)
2442 if (TREE_CODE (arg1) == INTEGER_CST)
2443 return fold_convert_const_int_from_int (type, arg1);
2444 else if (TREE_CODE (arg1) == REAL_CST)
2445 return fold_convert_const_int_from_real (code, type, arg1);
2446 else if (TREE_CODE (arg1) == FIXED_CST)
2447 return fold_convert_const_int_from_fixed (type, arg1);
2449 else if (TREE_CODE (type) == REAL_TYPE)
2451 if (TREE_CODE (arg1) == INTEGER_CST)
2452 return build_real_from_int_cst (type, arg1);
2453 else if (TREE_CODE (arg1) == REAL_CST)
2454 return fold_convert_const_real_from_real (type, arg1);
2455 else if (TREE_CODE (arg1) == FIXED_CST)
2456 return fold_convert_const_real_from_fixed (type, arg1);
2458 else if (TREE_CODE (type) == FIXED_POINT_TYPE)
2460 if (TREE_CODE (arg1) == FIXED_CST)
2461 return fold_convert_const_fixed_from_fixed (type, arg1);
2462 else if (TREE_CODE (arg1) == INTEGER_CST)
2463 return fold_convert_const_fixed_from_int (type, arg1);
2464 else if (TREE_CODE (arg1) == REAL_CST)
2465 return fold_convert_const_fixed_from_real (type, arg1);
2470 /* Construct a vector of zero elements of vector type TYPE. */
2473 build_zero_vector (tree type)
2478 elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
2479 units = TYPE_VECTOR_SUBPARTS (type);
2482 for (i = 0; i < units; i++)
2483 list = tree_cons (NULL_TREE, elem, list);
2484 return build_vector (type, list);
2487 /* Returns true, if ARG is convertible to TYPE using a NOP_EXPR. */
2490 fold_convertible_p (const_tree type, const_tree arg)
2492 tree orig = TREE_TYPE (arg);
2497 if (TREE_CODE (arg) == ERROR_MARK
2498 || TREE_CODE (type) == ERROR_MARK
2499 || TREE_CODE (orig) == ERROR_MARK)
2502 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
2505 switch (TREE_CODE (type))
2507 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
2508 case POINTER_TYPE: case REFERENCE_TYPE:
2510 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2511 || TREE_CODE (orig) == OFFSET_TYPE)
2513 return (TREE_CODE (orig) == VECTOR_TYPE
2514 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2517 case FIXED_POINT_TYPE:
2521 return TREE_CODE (type) == TREE_CODE (orig);
2528 /* Convert expression ARG to type TYPE. Used by the middle-end for
2529 simple conversions in preference to calling the front-end's convert. */
2532 fold_convert (tree type, tree arg)
2534 tree orig = TREE_TYPE (arg);
2540 if (TREE_CODE (arg) == ERROR_MARK
2541 || TREE_CODE (type) == ERROR_MARK
2542 || TREE_CODE (orig) == ERROR_MARK)
2543 return error_mark_node;
2545 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
2546 return fold_build1 (NOP_EXPR, type, arg);
2548 switch (TREE_CODE (type))
2550 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
2551 case POINTER_TYPE: case REFERENCE_TYPE:
2553 if (TREE_CODE (arg) == INTEGER_CST)
2555 tem = fold_convert_const (NOP_EXPR, type, arg);
2556 if (tem != NULL_TREE)
2559 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2560 || TREE_CODE (orig) == OFFSET_TYPE)
2561 return fold_build1 (NOP_EXPR, type, arg);
2562 if (TREE_CODE (orig) == COMPLEX_TYPE)
2564 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2565 return fold_convert (type, tem);
2567 gcc_assert (TREE_CODE (orig) == VECTOR_TYPE
2568 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2569 return fold_build1 (NOP_EXPR, type, arg);
2572 if (TREE_CODE (arg) == INTEGER_CST)
2574 tem = fold_convert_const (FLOAT_EXPR, type, arg);
2575 if (tem != NULL_TREE)
2578 else if (TREE_CODE (arg) == REAL_CST)
2580 tem = fold_convert_const (NOP_EXPR, type, arg);
2581 if (tem != NULL_TREE)
2584 else if (TREE_CODE (arg) == FIXED_CST)
2586 tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
2587 if (tem != NULL_TREE)
2591 switch (TREE_CODE (orig))
2594 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2595 case POINTER_TYPE: case REFERENCE_TYPE:
2596 return fold_build1 (FLOAT_EXPR, type, arg);
2599 return fold_build1 (NOP_EXPR, type, arg);
2601 case FIXED_POINT_TYPE:
2602 return fold_build1 (FIXED_CONVERT_EXPR, type, arg);
2605 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2606 return fold_convert (type, tem);
2612 case FIXED_POINT_TYPE:
2613 if (TREE_CODE (arg) == FIXED_CST || TREE_CODE (arg) == INTEGER_CST
2614 || TREE_CODE (arg) == REAL_CST)
2616 tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
2617 if (tem != NULL_TREE)
2621 switch (TREE_CODE (orig))
2623 case FIXED_POINT_TYPE:
2628 return fold_build1 (FIXED_CONVERT_EXPR, type, arg);
2631 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2632 return fold_convert (type, tem);
2639 switch (TREE_CODE (orig))
2642 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2643 case POINTER_TYPE: case REFERENCE_TYPE:
2645 case FIXED_POINT_TYPE:
2646 return fold_build2 (COMPLEX_EXPR, type,
2647 fold_convert (TREE_TYPE (type), arg),
2648 fold_convert (TREE_TYPE (type),
2649 integer_zero_node));
2654 if (TREE_CODE (arg) == COMPLEX_EXPR)
2656 rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0));
2657 ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1));
2658 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2661 arg = save_expr (arg);
2662 rpart = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2663 ipart = fold_build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg);
2664 rpart = fold_convert (TREE_TYPE (type), rpart);
2665 ipart = fold_convert (TREE_TYPE (type), ipart);
2666 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2674 if (integer_zerop (arg))
2675 return build_zero_vector (type);
2676 gcc_assert (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2677 gcc_assert (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2678 || TREE_CODE (orig) == VECTOR_TYPE);
2679 return fold_build1 (VIEW_CONVERT_EXPR, type, arg);
2682 tem = fold_ignored_result (arg);
2683 if (TREE_CODE (tem) == MODIFY_EXPR)
2685 return fold_build1 (NOP_EXPR, type, tem);
2692 /* Return false if expr can be assumed not to be an lvalue, true
2696 maybe_lvalue_p (const_tree x)
2698 /* We only need to wrap lvalue tree codes. */
2699 switch (TREE_CODE (x))
2710 case ALIGN_INDIRECT_REF:
2711 case MISALIGNED_INDIRECT_REF:
2713 case ARRAY_RANGE_REF:
2719 case PREINCREMENT_EXPR:
2720 case PREDECREMENT_EXPR:
2722 case TRY_CATCH_EXPR:
2723 case WITH_CLEANUP_EXPR:
2734 /* Assume the worst for front-end tree codes. */
2735 if ((int)TREE_CODE (x) >= NUM_TREE_CODES)
2743 /* Return an expr equal to X but certainly not valid as an lvalue. */
2748 /* While we are in GIMPLE, NON_LVALUE_EXPR doesn't mean anything to
2753 if (! maybe_lvalue_p (x))
2755 return build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
2758 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
2759 Zero means allow extended lvalues. */
2761 int pedantic_lvalues;
2763 /* When pedantic, return an expr equal to X but certainly not valid as a
2764 pedantic lvalue. Otherwise, return X. */
2767 pedantic_non_lvalue (tree x)
2769 if (pedantic_lvalues)
2770 return non_lvalue (x);
2775 /* Given a tree comparison code, return the code that is the logical inverse
2776 of the given code. It is not safe to do this for floating-point
2777 comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode
2778 as well: if reversing the comparison is unsafe, return ERROR_MARK. */
2781 invert_tree_comparison (enum tree_code code, bool honor_nans)
2783 if (honor_nans && flag_trapping_math)
2793 return honor_nans ? UNLE_EXPR : LE_EXPR;
2795 return honor_nans ? UNLT_EXPR : LT_EXPR;
2797 return honor_nans ? UNGE_EXPR : GE_EXPR;
2799 return honor_nans ? UNGT_EXPR : GT_EXPR;
2813 return UNORDERED_EXPR;
2814 case UNORDERED_EXPR:
2815 return ORDERED_EXPR;
2821 /* Similar, but return the comparison that results if the operands are
2822 swapped. This is safe for floating-point. */
2825 swap_tree_comparison (enum tree_code code)
2832 case UNORDERED_EXPR:
2858 /* Convert a comparison tree code from an enum tree_code representation
2859 into a compcode bit-based encoding. This function is the inverse of
2860 compcode_to_comparison. */
2862 static enum comparison_code
2863 comparison_to_compcode (enum tree_code code)
2880 return COMPCODE_ORD;
2881 case UNORDERED_EXPR:
2882 return COMPCODE_UNORD;
2884 return COMPCODE_UNLT;
2886 return COMPCODE_UNEQ;
2888 return COMPCODE_UNLE;
2890 return COMPCODE_UNGT;
2892 return COMPCODE_LTGT;
2894 return COMPCODE_UNGE;
2900 /* Convert a compcode bit-based encoding of a comparison operator back
2901 to GCC's enum tree_code representation. This function is the
2902 inverse of comparison_to_compcode. */
2904 static enum tree_code
2905 compcode_to_comparison (enum comparison_code code)
2922 return ORDERED_EXPR;
2923 case COMPCODE_UNORD:
2924 return UNORDERED_EXPR;
2942 /* Return a tree for the comparison which is the combination of
2943 doing the AND or OR (depending on CODE) of the two operations LCODE
2944 and RCODE on the identical operands LL_ARG and LR_ARG. Take into account
2945 the possibility of trapping if the mode has NaNs, and return NULL_TREE
2946 if this makes the transformation invalid. */
2949 combine_comparisons (enum tree_code code, enum tree_code lcode,
2950 enum tree_code rcode, tree truth_type,
2951 tree ll_arg, tree lr_arg)
2953 bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg)));
2954 enum comparison_code lcompcode = comparison_to_compcode (lcode);
2955 enum comparison_code rcompcode = comparison_to_compcode (rcode);
2960 case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR:
2961 compcode = lcompcode & rcompcode;
2964 case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR:
2965 compcode = lcompcode | rcompcode;
2974 /* Eliminate unordered comparisons, as well as LTGT and ORD
2975 which are not used unless the mode has NaNs. */
2976 compcode &= ~COMPCODE_UNORD;
2977 if (compcode == COMPCODE_LTGT)
2978 compcode = COMPCODE_NE;
2979 else if (compcode == COMPCODE_ORD)
2980 compcode = COMPCODE_TRUE;
2982 else if (flag_trapping_math)
2984 /* Check that the original operation and the optimized ones will trap
2985 under the same condition. */
2986 bool ltrap = (lcompcode & COMPCODE_UNORD) == 0
2987 && (lcompcode != COMPCODE_EQ)
2988 && (lcompcode != COMPCODE_ORD);
2989 bool rtrap = (rcompcode & COMPCODE_UNORD) == 0
2990 && (rcompcode != COMPCODE_EQ)
2991 && (rcompcode != COMPCODE_ORD);
2992 bool trap = (compcode & COMPCODE_UNORD) == 0
2993 && (compcode != COMPCODE_EQ)
2994 && (compcode != COMPCODE_ORD);
2996 /* In a short-circuited boolean expression the LHS might be
2997 such that the RHS, if evaluated, will never trap. For
2998 example, in ORD (x, y) && (x < y), we evaluate the RHS only
2999 if neither x nor y is NaN. (This is a mixed blessing: for
3000 example, the expression above will never trap, hence
3001 optimizing it to x < y would be invalid). */
3002 if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD))
3003 || (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD)))
3006 /* If the comparison was short-circuited, and only the RHS
3007 trapped, we may now generate a spurious trap. */
3009 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
3012 /* If we changed the conditions that cause a trap, we lose. */
3013 if ((ltrap || rtrap) != trap)
3017 if (compcode == COMPCODE_TRUE)
3018 return constant_boolean_node (true, truth_type);
3019 else if (compcode == COMPCODE_FALSE)
3020 return constant_boolean_node (false, truth_type);
3023 enum tree_code tcode;
3025 tcode = compcode_to_comparison ((enum comparison_code) compcode);
3026 return fold_build2 (tcode, truth_type, ll_arg, lr_arg);
3030 /* Return nonzero if two operands (typically of the same tree node)
3031 are necessarily equal. If either argument has side-effects this
3032 function returns zero. FLAGS modifies behavior as follows:
3034 If OEP_ONLY_CONST is set, only return nonzero for constants.
3035 This function tests whether the operands are indistinguishable;
3036 it does not test whether they are equal using C's == operation.
3037 The distinction is important for IEEE floating point, because
3038 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
3039 (2) two NaNs may be indistinguishable, but NaN!=NaN.
3041 If OEP_ONLY_CONST is unset, a VAR_DECL is considered equal to itself
3042 even though it may hold multiple values during a function.
3043 This is because a GCC tree node guarantees that nothing else is
3044 executed between the evaluation of its "operands" (which may often
3045 be evaluated in arbitrary order). Hence if the operands themselves
3046 don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the
3047 same value in each operand/subexpression. Hence leaving OEP_ONLY_CONST
3048 unset means assuming isochronic (or instantaneous) tree equivalence.
3049 Unless comparing arbitrary expression trees, such as from different
3050 statements, this flag can usually be left unset.
3052 If OEP_PURE_SAME is set, then pure functions with identical arguments
3053 are considered the same. It is used when the caller has other ways
3054 to ensure that global memory is unchanged in between. */
3057 operand_equal_p (const_tree arg0, const_tree arg1, unsigned int flags)
3059 /* If either is ERROR_MARK, they aren't equal. */
3060 if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK)
3063 /* Check equality of integer constants before bailing out due to
3064 precision differences. */
3065 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
3066 return tree_int_cst_equal (arg0, arg1);
3068 /* If both types don't have the same signedness, then we can't consider
3069 them equal. We must check this before the STRIP_NOPS calls
3070 because they may change the signedness of the arguments. As pointers
3071 strictly don't have a signedness, require either two pointers or
3072 two non-pointers as well. */
3073 if (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1))
3074 || POINTER_TYPE_P (TREE_TYPE (arg0)) != POINTER_TYPE_P (TREE_TYPE (arg1)))
3077 /* If both types don't have the same precision, then it is not safe
3079 if (TYPE_PRECISION (TREE_TYPE (arg0)) != TYPE_PRECISION (TREE_TYPE (arg1)))
3085 /* In case both args are comparisons but with different comparison
3086 code, try to swap the comparison operands of one arg to produce
3087 a match and compare that variant. */
3088 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3089 && COMPARISON_CLASS_P (arg0)
3090 && COMPARISON_CLASS_P (arg1))
3092 enum tree_code swap_code = swap_tree_comparison (TREE_CODE (arg1));
3094 if (TREE_CODE (arg0) == swap_code)
3095 return operand_equal_p (TREE_OPERAND (arg0, 0),
3096 TREE_OPERAND (arg1, 1), flags)
3097 && operand_equal_p (TREE_OPERAND (arg0, 1),
3098 TREE_OPERAND (arg1, 0), flags);
3101 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3102 /* This is needed for conversions and for COMPONENT_REF.
3103 Might as well play it safe and always test this. */
3104 || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
3105 || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
3106 || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
3109 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
3110 We don't care about side effects in that case because the SAVE_EXPR
3111 takes care of that for us. In all other cases, two expressions are
3112 equal if they have no side effects. If we have two identical
3113 expressions with side effects that should be treated the same due
3114 to the only side effects being identical SAVE_EXPR's, that will
3115 be detected in the recursive calls below. */
3116 if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST)
3117 && (TREE_CODE (arg0) == SAVE_EXPR
3118 || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
3121 /* Next handle constant cases, those for which we can return 1 even
3122 if ONLY_CONST is set. */
3123 if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
3124 switch (TREE_CODE (arg0))
3127 return tree_int_cst_equal (arg0, arg1);
3130 return FIXED_VALUES_IDENTICAL (TREE_FIXED_CST (arg0),
3131 TREE_FIXED_CST (arg1));
3134 if (REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
3135 TREE_REAL_CST (arg1)))
3139 if (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))))
3141 /* If we do not distinguish between signed and unsigned zero,
3142 consider them equal. */
3143 if (real_zerop (arg0) && real_zerop (arg1))
3152 v1 = TREE_VECTOR_CST_ELTS (arg0);
3153 v2 = TREE_VECTOR_CST_ELTS (arg1);
3156 if (!operand_equal_p (TREE_VALUE (v1), TREE_VALUE (v2),
3159 v1 = TREE_CHAIN (v1);
3160 v2 = TREE_CHAIN (v2);
3167 return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
3169 && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
3173 return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
3174 && ! memcmp (TREE_STRING_POINTER (arg0),
3175 TREE_STRING_POINTER (arg1),
3176 TREE_STRING_LENGTH (arg0)));
3179 return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
3185 if (flags & OEP_ONLY_CONST)
3188 /* Define macros to test an operand from arg0 and arg1 for equality and a
3189 variant that allows null and views null as being different from any
3190 non-null value. In the latter case, if either is null, the both
3191 must be; otherwise, do the normal comparison. */
3192 #define OP_SAME(N) operand_equal_p (TREE_OPERAND (arg0, N), \
3193 TREE_OPERAND (arg1, N), flags)
3195 #define OP_SAME_WITH_NULL(N) \
3196 ((!TREE_OPERAND (arg0, N) || !TREE_OPERAND (arg1, N)) \
3197 ? TREE_OPERAND (arg0, N) == TREE_OPERAND (arg1, N) : OP_SAME (N))
3199 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
3202 /* Two conversions are equal only if signedness and modes match. */
3203 switch (TREE_CODE (arg0))
3206 case FIX_TRUNC_EXPR:
3207 if (TYPE_UNSIGNED (TREE_TYPE (arg0))
3208 != TYPE_UNSIGNED (TREE_TYPE (arg1)))
3218 case tcc_comparison:
3220 if (OP_SAME (0) && OP_SAME (1))
3223 /* For commutative ops, allow the other order. */
3224 return (commutative_tree_code (TREE_CODE (arg0))
3225 && operand_equal_p (TREE_OPERAND (arg0, 0),
3226 TREE_OPERAND (arg1, 1), flags)
3227 && operand_equal_p (TREE_OPERAND (arg0, 1),
3228 TREE_OPERAND (arg1, 0), flags));
3231 /* If either of the pointer (or reference) expressions we are
3232 dereferencing contain a side effect, these cannot be equal. */
3233 if (TREE_SIDE_EFFECTS (arg0)
3234 || TREE_SIDE_EFFECTS (arg1))
3237 switch (TREE_CODE (arg0))
3240 case ALIGN_INDIRECT_REF:
3241 case MISALIGNED_INDIRECT_REF:
3247 case ARRAY_RANGE_REF:
3248 /* Operands 2 and 3 may be null.
3249 Compare the array index by value if it is constant first as we
3250 may have different types but same value here. */
3252 && (tree_int_cst_equal (TREE_OPERAND (arg0, 1),
3253 TREE_OPERAND (arg1, 1))
3255 && OP_SAME_WITH_NULL (2)
3256 && OP_SAME_WITH_NULL (3));
3259 /* Handle operand 2 the same as for ARRAY_REF. Operand 0
3260 may be NULL when we're called to compare MEM_EXPRs. */
3261 return OP_SAME_WITH_NULL (0)
3263 && OP_SAME_WITH_NULL (2);
3266 return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
3272 case tcc_expression:
3273 switch (TREE_CODE (arg0))
3276 case TRUTH_NOT_EXPR:
3279 case TRUTH_ANDIF_EXPR:
3280 case TRUTH_ORIF_EXPR:
3281 return OP_SAME (0) && OP_SAME (1);
3283 case TRUTH_AND_EXPR:
3285 case TRUTH_XOR_EXPR:
3286 if (OP_SAME (0) && OP_SAME (1))
3289 /* Otherwise take into account this is a commutative operation. */
3290 return (operand_equal_p (TREE_OPERAND (arg0, 0),
3291 TREE_OPERAND (arg1, 1), flags)
3292 && operand_equal_p (TREE_OPERAND (arg0, 1),
3293 TREE_OPERAND (arg1, 0), flags));
3296 return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
3303 switch (TREE_CODE (arg0))
3306 /* If the CALL_EXPRs call different functions, then they
3307 clearly can not be equal. */
3308 if (! operand_equal_p (CALL_EXPR_FN (arg0), CALL_EXPR_FN (arg1),
3313 unsigned int cef = call_expr_flags (arg0);
3314 if (flags & OEP_PURE_SAME)
3315 cef &= ECF_CONST | ECF_PURE;
3322 /* Now see if all the arguments are the same. */
3324 const_call_expr_arg_iterator iter0, iter1;
3326 for (a0 = first_const_call_expr_arg (arg0, &iter0),
3327 a1 = first_const_call_expr_arg (arg1, &iter1);
3329 a0 = next_const_call_expr_arg (&iter0),
3330 a1 = next_const_call_expr_arg (&iter1))
3331 if (! operand_equal_p (a0, a1, flags))
3334 /* If we get here and both argument lists are exhausted
3335 then the CALL_EXPRs are equal. */
3336 return ! (a0 || a1);
3342 case tcc_declaration:
3343 /* Consider __builtin_sqrt equal to sqrt. */
3344 return (TREE_CODE (arg0) == FUNCTION_DECL
3345 && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1)
3346 && DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1)
3347 && DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1));
3354 #undef OP_SAME_WITH_NULL
3357 /* Similar to operand_equal_p, but see if ARG0 might have been made by
3358 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
3360 When in doubt, return 0. */
3363 operand_equal_for_comparison_p (tree arg0, tree arg1, tree other)
3365 int unsignedp1, unsignedpo;
3366 tree primarg0, primarg1, primother;
3367 unsigned int correct_width;
3369 if (operand_equal_p (arg0, arg1, 0))
3372 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
3373 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
3376 /* Discard any conversions that don't change the modes of ARG0 and ARG1
3377 and see if the inner values are the same. This removes any
3378 signedness comparison, which doesn't matter here. */
3379 primarg0 = arg0, primarg1 = arg1;
3380 STRIP_NOPS (primarg0);
3381 STRIP_NOPS (primarg1);
3382 if (operand_equal_p (primarg0, primarg1, 0))
3385 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
3386 actual comparison operand, ARG0.
3388 First throw away any conversions to wider types
3389 already present in the operands. */
3391 primarg1 = get_narrower (arg1, &unsignedp1);
3392 primother = get_narrower (other, &unsignedpo);
3394 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
3395 if (unsignedp1 == unsignedpo
3396 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
3397 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
3399 tree type = TREE_TYPE (arg0);
3401 /* Make sure shorter operand is extended the right way
3402 to match the longer operand. */
3403 primarg1 = fold_convert (signed_or_unsigned_type_for
3404 (unsignedp1, TREE_TYPE (primarg1)), primarg1);
3406 if (operand_equal_p (arg0, fold_convert (type, primarg1), 0))
3413 /* See if ARG is an expression that is either a comparison or is performing
3414 arithmetic on comparisons. The comparisons must only be comparing
3415 two different values, which will be stored in *CVAL1 and *CVAL2; if
3416 they are nonzero it means that some operands have already been found.
3417 No variables may be used anywhere else in the expression except in the
3418 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
3419 the expression and save_expr needs to be called with CVAL1 and CVAL2.
3421 If this is true, return 1. Otherwise, return zero. */
3424 twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p)
3426 enum tree_code code = TREE_CODE (arg);
3427 enum tree_code_class tclass = TREE_CODE_CLASS (code);
3429 /* We can handle some of the tcc_expression cases here. */
3430 if (tclass == tcc_expression && code == TRUTH_NOT_EXPR)
3432 else if (tclass == tcc_expression
3433 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
3434 || code == COMPOUND_EXPR))
3435 tclass = tcc_binary;
3437 else if (tclass == tcc_expression && code == SAVE_EXPR
3438 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
3440 /* If we've already found a CVAL1 or CVAL2, this expression is
3441 two complex to handle. */
3442 if (*cval1 || *cval2)
3452 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
3455 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
3456 && twoval_comparison_p (TREE_OPERAND (arg, 1),
3457 cval1, cval2, save_p));
3462 case tcc_expression:
3463 if (code == COND_EXPR)
3464 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
3465 cval1, cval2, save_p)
3466 && twoval_comparison_p (TREE_OPERAND (arg, 1),
3467 cval1, cval2, save_p)
3468 && twoval_comparison_p (TREE_OPERAND (arg, 2),
3469 cval1, cval2, save_p));
3472 case tcc_comparison:
3473 /* First see if we can handle the first operand, then the second. For
3474 the second operand, we know *CVAL1 can't be zero. It must be that
3475 one side of the comparison is each of the values; test for the
3476 case where this isn't true by failing if the two operands
3479 if (operand_equal_p (TREE_OPERAND (arg, 0),
3480 TREE_OPERAND (arg, 1), 0))
3484 *cval1 = TREE_OPERAND (arg, 0);
3485 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
3487 else if (*cval2 == 0)
3488 *cval2 = TREE_OPERAND (arg, 0);
3489 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
3494 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
3496 else if (*cval2 == 0)
3497 *cval2 = TREE_OPERAND (arg, 1);
3498 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
3510 /* ARG is a tree that is known to contain just arithmetic operations and
3511 comparisons. Evaluate the operations in the tree substituting NEW0 for
3512 any occurrence of OLD0 as an operand of a comparison and likewise for