1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
23 /*@@ This file should be rewritten to use an arbitrary precision
24 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
25 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
26 @@ The routines that translate from the ap rep should
27 @@ warn if precision et. al. is lost.
28 @@ This would also make life easier when this technology is used
29 @@ for cross-compilers. */
31 /* The entry points in this file are fold, size_int_wide, size_binop
32 and force_fit_type_double.
34 fold takes a tree as argument and returns a simplified tree.
36 size_binop takes a tree code for an arithmetic operation
37 and two operands that are trees, and produces a tree for the
38 result, assuming the type comes from `sizetype'.
40 size_int takes an integer value, and creates a tree constant
41 with type from `sizetype'.
43 force_fit_type_double takes a constant, an overflowable flag and a
44 prior overflow indicator. It forces the value to fit the type and
47 Note: Since the folders get called on non-gimple code as well as
48 gimple code, we need to handle GIMPLE tuples as well as their
49 corresponding tree equivalents. */
53 #include "coretypes.h"
65 #include "langhooks.h"
68 /* Non-zero if we are folding constants inside an initializer; zero
70 int folding_initializer = 0;
72 /* The following constants represent a bit based encoding of GCC's
73 comparison operators. This encoding simplifies transformations
74 on relational comparison operators, such as AND and OR. */
75 enum comparison_code {
94 static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
95 static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
96 static bool negate_mathfn_p (enum built_in_function);
97 static bool negate_expr_p (tree);
98 static tree negate_expr (tree);
99 static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
100 static tree associate_trees (tree, tree, enum tree_code, tree);
101 static tree const_binop (enum tree_code, tree, tree, int);
102 static enum comparison_code comparison_to_compcode (enum tree_code);
103 static enum tree_code compcode_to_comparison (enum comparison_code);
104 static tree combine_comparisons (enum tree_code, enum tree_code,
105 enum tree_code, tree, tree, tree);
106 static int truth_value_p (enum tree_code);
107 static int operand_equal_for_comparison_p (tree, tree, tree);
108 static int twoval_comparison_p (tree, tree *, tree *, int *);
109 static tree eval_subst (tree, tree, tree, tree, tree);
110 static tree pedantic_omit_one_operand (tree, tree, tree);
111 static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
112 static tree make_bit_field_ref (tree, tree, int, int, int);
113 static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree);
114 static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
115 enum machine_mode *, int *, int *,
117 static int all_ones_mask_p (tree, int);
118 static tree sign_bit_p (tree, tree);
119 static int simple_operand_p (tree);
120 static tree range_binop (enum tree_code, tree, tree, int, tree, int);
121 static tree range_predecessor (tree);
122 static tree range_successor (tree);
123 static tree make_range (tree, int *, tree *, tree *, bool *);
124 static tree build_range_check (tree, tree, int, tree, tree);
125 static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree,
127 static tree fold_range_test (enum tree_code, tree, tree, tree);
128 static tree fold_cond_expr_with_comparison (tree, tree, tree, tree);
129 static tree unextend (tree, int, int, tree);
130 static tree fold_truthop (enum tree_code, tree, tree, tree);
131 static tree optimize_minmax_comparison (enum tree_code, tree, tree, tree);
132 static tree extract_muldiv (tree, tree, enum tree_code, tree, bool *);
133 static tree extract_muldiv_1 (tree, tree, enum tree_code, tree, bool *);
134 static int multiple_of_p (tree, tree, tree);
135 static tree fold_binary_op_with_conditional_arg (enum tree_code, tree,
138 static bool fold_real_zero_addition_p (tree, tree, int);
139 static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
141 static tree fold_inf_compare (enum tree_code, tree, tree, tree);
142 static tree fold_div_compare (enum tree_code, tree, tree, tree);
143 static bool reorder_operands_p (tree, tree);
144 static tree fold_negate_const (tree, tree);
145 static tree fold_not_const (tree, tree);
146 static tree fold_relational_const (enum tree_code, tree, tree, tree);
147 static int native_encode_expr (tree, unsigned char *, int);
148 static tree native_interpret_expr (tree, unsigned char *, int);
151 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
152 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
153 and SUM1. Then this yields nonzero if overflow occurred during the
156 Overflow occurs if A and B have the same sign, but A and SUM differ in
157 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
159 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
161 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
162 We do that by representing the two-word integer in 4 words, with only
163 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
164 number. The value of the word is LOWPART + HIGHPART * BASE. */
167 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
168 #define HIGHPART(x) \
169 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
170 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
172 /* Unpack a two-word integer into 4 words.
173 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
174 WORDS points to the array of HOST_WIDE_INTs. */
177 encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
179 words[0] = LOWPART (low);
180 words[1] = HIGHPART (low);
181 words[2] = LOWPART (hi);
182 words[3] = HIGHPART (hi);
185 /* Pack an array of 4 words into a two-word integer.
186 WORDS points to the array of words.
187 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
190 decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
193 *low = words[0] + words[1] * BASE;
194 *hi = words[2] + words[3] * BASE;
197 /* Force the double-word integer L1, H1 to be within the range of the
198 integer type TYPE. Stores the properly truncated and sign-extended
199 double-word integer in *LV, *HV. Returns true if the operation
200 overflows, that is, argument and result are different. */
203 fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
204 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, tree type)
206 unsigned HOST_WIDE_INT low0 = l1;
207 HOST_WIDE_INT high0 = h1;
209 int sign_extended_type;
211 if (POINTER_TYPE_P (type)
212 || TREE_CODE (type) == OFFSET_TYPE)
215 prec = TYPE_PRECISION (type);
217 /* Size types *are* sign extended. */
218 sign_extended_type = (!TYPE_UNSIGNED (type)
219 || (TREE_CODE (type) == INTEGER_TYPE
220 && TYPE_IS_SIZETYPE (type)));
222 /* First clear all bits that are beyond the type's precision. */
223 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
225 else if (prec > HOST_BITS_PER_WIDE_INT)
226 h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
230 if (prec < HOST_BITS_PER_WIDE_INT)
231 l1 &= ~((HOST_WIDE_INT) (-1) << prec);
234 /* Then do sign extension if necessary. */
235 if (!sign_extended_type)
236 /* No sign extension */;
237 else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
238 /* Correct width already. */;
239 else if (prec > HOST_BITS_PER_WIDE_INT)
241 /* Sign extend top half? */
242 if (h1 & ((unsigned HOST_WIDE_INT)1
243 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
244 h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
246 else if (prec == HOST_BITS_PER_WIDE_INT)
248 if ((HOST_WIDE_INT)l1 < 0)
253 /* Sign extend bottom half? */
254 if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
257 l1 |= (HOST_WIDE_INT)(-1) << prec;
264 /* If the value didn't fit, signal overflow. */
265 return l1 != low0 || h1 != high0;
268 /* We force the double-int HIGH:LOW to the range of the type TYPE by
269 sign or zero extending it.
270 OVERFLOWABLE indicates if we are interested
271 in overflow of the value, when >0 we are only interested in signed
272 overflow, for <0 we are interested in any overflow. OVERFLOWED
273 indicates whether overflow has already occurred. CONST_OVERFLOWED
274 indicates whether constant overflow has already occurred. We force
275 T's value to be within range of T's type (by setting to 0 or 1 all
276 the bits outside the type's range). We set TREE_OVERFLOWED if,
277 OVERFLOWED is nonzero,
278 or OVERFLOWABLE is >0 and signed overflow occurs
279 or OVERFLOWABLE is <0 and any overflow occurs
280 We return a new tree node for the extended double-int. The node
281 is shared if no overflow flags are set. */
284 force_fit_type_double (tree type, unsigned HOST_WIDE_INT low,
285 HOST_WIDE_INT high, int overflowable,
288 int sign_extended_type;
291 /* Size types *are* sign extended. */
292 sign_extended_type = (!TYPE_UNSIGNED (type)
293 || (TREE_CODE (type) == INTEGER_TYPE
294 && TYPE_IS_SIZETYPE (type)));
296 overflow = fit_double_type (low, high, &low, &high, type);
298 /* If we need to set overflow flags, return a new unshared node. */
299 if (overflowed || overflow)
303 || (overflowable > 0 && sign_extended_type))
305 tree t = make_node (INTEGER_CST);
306 TREE_INT_CST_LOW (t) = low;
307 TREE_INT_CST_HIGH (t) = high;
308 TREE_TYPE (t) = type;
309 TREE_OVERFLOW (t) = 1;
314 /* Else build a shared node. */
315 return build_int_cst_wide (type, low, high);
318 /* Add two doubleword integers with doubleword result.
319 Return nonzero if the operation overflows according to UNSIGNED_P.
320 Each argument is given as two `HOST_WIDE_INT' pieces.
321 One argument is L1 and H1; the other, L2 and H2.
322 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
325 add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
326 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
327 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
330 unsigned HOST_WIDE_INT l;
334 h = h1 + h2 + (l < l1);
340 return (unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1;
342 return OVERFLOW_SUM_SIGN (h1, h2, h);
345 /* Negate a doubleword integer with doubleword result.
346 Return nonzero if the operation overflows, assuming it's signed.
347 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
348 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
351 neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
352 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
358 return (*hv & h1) < 0;
368 /* Multiply two doubleword integers with doubleword result.
369 Return nonzero if the operation overflows according to UNSIGNED_P.
370 Each argument is given as two `HOST_WIDE_INT' pieces.
371 One argument is L1 and H1; the other, L2 and H2.
372 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
375 mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
376 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
377 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
380 HOST_WIDE_INT arg1[4];
381 HOST_WIDE_INT arg2[4];
382 HOST_WIDE_INT prod[4 * 2];
383 unsigned HOST_WIDE_INT carry;
385 unsigned HOST_WIDE_INT toplow, neglow;
386 HOST_WIDE_INT tophigh, neghigh;
388 encode (arg1, l1, h1);
389 encode (arg2, l2, h2);
391 memset (prod, 0, sizeof prod);
393 for (i = 0; i < 4; i++)
396 for (j = 0; j < 4; j++)
399 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
400 carry += arg1[i] * arg2[j];
401 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
403 prod[k] = LOWPART (carry);
404 carry = HIGHPART (carry);
409 decode (prod, lv, hv);
410 decode (prod + 4, &toplow, &tophigh);
412 /* Unsigned overflow is immediate. */
414 return (toplow | tophigh) != 0;
416 /* Check for signed overflow by calculating the signed representation of the
417 top half of the result; it should agree with the low half's sign bit. */
420 neg_double (l2, h2, &neglow, &neghigh);
421 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
425 neg_double (l1, h1, &neglow, &neghigh);
426 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
428 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
431 /* Shift the doubleword integer in L1, H1 left by COUNT places
432 keeping only PREC bits of result.
433 Shift right if COUNT is negative.
434 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
435 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
438 lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
439 HOST_WIDE_INT count, unsigned int prec,
440 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
442 unsigned HOST_WIDE_INT signmask;
446 rshift_double (l1, h1, -count, prec, lv, hv, arith);
450 if (SHIFT_COUNT_TRUNCATED)
453 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
455 /* Shifting by the host word size is undefined according to the
456 ANSI standard, so we must handle this as a special case. */
460 else if (count >= HOST_BITS_PER_WIDE_INT)
462 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
467 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
468 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
472 /* Sign extend all bits that are beyond the precision. */
474 signmask = -((prec > HOST_BITS_PER_WIDE_INT
475 ? ((unsigned HOST_WIDE_INT) *hv
476 >> (prec - HOST_BITS_PER_WIDE_INT - 1))
477 : (*lv >> (prec - 1))) & 1);
479 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
481 else if (prec >= HOST_BITS_PER_WIDE_INT)
483 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
484 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
489 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
490 *lv |= signmask << prec;
494 /* Shift the doubleword integer in L1, H1 right by COUNT places
495 keeping only PREC bits of result. COUNT must be positive.
496 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
497 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
500 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
501 HOST_WIDE_INT count, unsigned int prec,
502 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
505 unsigned HOST_WIDE_INT signmask;
508 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
511 if (SHIFT_COUNT_TRUNCATED)
514 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
516 /* Shifting by the host word size is undefined according to the
517 ANSI standard, so we must handle this as a special case. */
521 else if (count >= HOST_BITS_PER_WIDE_INT)
524 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
528 *hv = (unsigned HOST_WIDE_INT) h1 >> count;
530 | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
533 /* Zero / sign extend all bits that are beyond the precision. */
535 if (count >= (HOST_WIDE_INT)prec)
540 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
542 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
544 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
545 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
550 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
551 *lv |= signmask << (prec - count);
555 /* Rotate the doubleword integer in L1, H1 left by COUNT places
556 keeping only PREC bits of result.
557 Rotate right if COUNT is negative.
558 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
561 lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
562 HOST_WIDE_INT count, unsigned int prec,
563 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
565 unsigned HOST_WIDE_INT s1l, s2l;
566 HOST_WIDE_INT s1h, s2h;
572 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
573 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
578 /* Rotate the doubleword integer in L1, H1 left by COUNT places
579 keeping only PREC bits of result. COUNT must be positive.
580 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
583 rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
584 HOST_WIDE_INT count, unsigned int prec,
585 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
587 unsigned HOST_WIDE_INT s1l, s2l;
588 HOST_WIDE_INT s1h, s2h;
594 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
595 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
600 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
601 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
602 CODE is a tree code for a kind of division, one of
603 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
605 It controls how the quotient is rounded to an integer.
606 Return nonzero if the operation overflows.
607 UNS nonzero says do unsigned division. */
610 div_and_round_double (enum tree_code code, int uns,
611 unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
612 HOST_WIDE_INT hnum_orig,
613 unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
614 HOST_WIDE_INT hden_orig,
615 unsigned HOST_WIDE_INT *lquo,
616 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
620 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
621 HOST_WIDE_INT den[4], quo[4];
623 unsigned HOST_WIDE_INT work;
624 unsigned HOST_WIDE_INT carry = 0;
625 unsigned HOST_WIDE_INT lnum = lnum_orig;
626 HOST_WIDE_INT hnum = hnum_orig;
627 unsigned HOST_WIDE_INT lden = lden_orig;
628 HOST_WIDE_INT hden = hden_orig;
631 if (hden == 0 && lden == 0)
632 overflow = 1, lden = 1;
634 /* Calculate quotient sign and convert operands to unsigned. */
640 /* (minimum integer) / (-1) is the only overflow case. */
641 if (neg_double (lnum, hnum, &lnum, &hnum)
642 && ((HOST_WIDE_INT) lden & hden) == -1)
648 neg_double (lden, hden, &lden, &hden);
652 if (hnum == 0 && hden == 0)
653 { /* single precision */
655 /* This unsigned division rounds toward zero. */
661 { /* trivial case: dividend < divisor */
662 /* hden != 0 already checked. */
669 memset (quo, 0, sizeof quo);
671 memset (num, 0, sizeof num); /* to zero 9th element */
672 memset (den, 0, sizeof den);
674 encode (num, lnum, hnum);
675 encode (den, lden, hden);
677 /* Special code for when the divisor < BASE. */
678 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
680 /* hnum != 0 already checked. */
681 for (i = 4 - 1; i >= 0; i--)
683 work = num[i] + carry * BASE;
684 quo[i] = work / lden;
690 /* Full double precision division,
691 with thanks to Don Knuth's "Seminumerical Algorithms". */
692 int num_hi_sig, den_hi_sig;
693 unsigned HOST_WIDE_INT quo_est, scale;
695 /* Find the highest nonzero divisor digit. */
696 for (i = 4 - 1;; i--)
703 /* Insure that the first digit of the divisor is at least BASE/2.
704 This is required by the quotient digit estimation algorithm. */
706 scale = BASE / (den[den_hi_sig] + 1);
708 { /* scale divisor and dividend */
710 for (i = 0; i <= 4 - 1; i++)
712 work = (num[i] * scale) + carry;
713 num[i] = LOWPART (work);
714 carry = HIGHPART (work);
719 for (i = 0; i <= 4 - 1; i++)
721 work = (den[i] * scale) + carry;
722 den[i] = LOWPART (work);
723 carry = HIGHPART (work);
724 if (den[i] != 0) den_hi_sig = i;
731 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
733 /* Guess the next quotient digit, quo_est, by dividing the first
734 two remaining dividend digits by the high order quotient digit.
735 quo_est is never low and is at most 2 high. */
736 unsigned HOST_WIDE_INT tmp;
738 num_hi_sig = i + den_hi_sig + 1;
739 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
740 if (num[num_hi_sig] != den[den_hi_sig])
741 quo_est = work / den[den_hi_sig];
745 /* Refine quo_est so it's usually correct, and at most one high. */
746 tmp = work - quo_est * den[den_hi_sig];
748 && (den[den_hi_sig - 1] * quo_est
749 > (tmp * BASE + num[num_hi_sig - 2])))
752 /* Try QUO_EST as the quotient digit, by multiplying the
753 divisor by QUO_EST and subtracting from the remaining dividend.
754 Keep in mind that QUO_EST is the I - 1st digit. */
757 for (j = 0; j <= den_hi_sig; j++)
759 work = quo_est * den[j] + carry;
760 carry = HIGHPART (work);
761 work = num[i + j] - LOWPART (work);
762 num[i + j] = LOWPART (work);
763 carry += HIGHPART (work) != 0;
766 /* If quo_est was high by one, then num[i] went negative and
767 we need to correct things. */
768 if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
771 carry = 0; /* add divisor back in */
772 for (j = 0; j <= den_hi_sig; j++)
774 work = num[i + j] + den[j] + carry;
775 carry = HIGHPART (work);
776 num[i + j] = LOWPART (work);
779 num [num_hi_sig] += carry;
782 /* Store the quotient digit. */
787 decode (quo, lquo, hquo);
790 /* If result is negative, make it so. */
792 neg_double (*lquo, *hquo, lquo, hquo);
794 /* Compute trial remainder: rem = num - (quo * den) */
795 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
796 neg_double (*lrem, *hrem, lrem, hrem);
797 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
802 case TRUNC_MOD_EXPR: /* round toward zero */
803 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
807 case FLOOR_MOD_EXPR: /* round toward negative infinity */
808 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
811 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
819 case CEIL_MOD_EXPR: /* round toward positive infinity */
820 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
822 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
830 case ROUND_MOD_EXPR: /* round to closest integer */
832 unsigned HOST_WIDE_INT labs_rem = *lrem;
833 HOST_WIDE_INT habs_rem = *hrem;
834 unsigned HOST_WIDE_INT labs_den = lden, ltwice;
835 HOST_WIDE_INT habs_den = hden, htwice;
837 /* Get absolute values. */
839 neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
841 neg_double (lden, hden, &labs_den, &habs_den);
843 /* If (2 * abs (lrem) >= abs (lden)) */
844 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
845 labs_rem, habs_rem, <wice, &htwice);
847 if (((unsigned HOST_WIDE_INT) habs_den
848 < (unsigned HOST_WIDE_INT) htwice)
849 || (((unsigned HOST_WIDE_INT) habs_den
850 == (unsigned HOST_WIDE_INT) htwice)
851 && (labs_den < ltwice)))
855 add_double (*lquo, *hquo,
856 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
859 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
871 /* Compute true remainder: rem = num - (quo * den) */
872 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
873 neg_double (*lrem, *hrem, lrem, hrem);
874 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
878 /* If ARG2 divides ARG1 with zero remainder, carries out the division
879 of type CODE and returns the quotient.
880 Otherwise returns NULL_TREE. */
883 div_if_zero_remainder (enum tree_code code, tree arg1, tree arg2)
885 unsigned HOST_WIDE_INT int1l, int2l;
886 HOST_WIDE_INT int1h, int2h;
887 unsigned HOST_WIDE_INT quol, reml;
888 HOST_WIDE_INT quoh, remh;
889 tree type = TREE_TYPE (arg1);
890 int uns = TYPE_UNSIGNED (type);
892 int1l = TREE_INT_CST_LOW (arg1);
893 int1h = TREE_INT_CST_HIGH (arg1);
894 int2l = TREE_INT_CST_LOW (arg2);
895 int2h = TREE_INT_CST_HIGH (arg2);
897 div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
898 &quol, &quoh, &reml, &remh);
899 if (remh != 0 || reml != 0)
902 return build_int_cst_wide (type, quol, quoh);
905 /* This is non-zero if we should defer warnings about undefined
906 overflow. This facility exists because these warnings are a
907 special case. The code to estimate loop iterations does not want
908 to issue any warnings, since it works with expressions which do not
909 occur in user code. Various bits of cleanup code call fold(), but
910 only use the result if it has certain characteristics (e.g., is a
911 constant); that code only wants to issue a warning if the result is
914 static int fold_deferring_overflow_warnings;
916 /* If a warning about undefined overflow is deferred, this is the
917 warning. Note that this may cause us to turn two warnings into
918 one, but that is fine since it is sufficient to only give one
919 warning per expression. */
921 static const char* fold_deferred_overflow_warning;
923 /* If a warning about undefined overflow is deferred, this is the
924 level at which the warning should be emitted. */
926 static enum warn_strict_overflow_code fold_deferred_overflow_code;
928 /* Start deferring overflow warnings. We could use a stack here to
929 permit nested calls, but at present it is not necessary. */
932 fold_defer_overflow_warnings (void)
934 ++fold_deferring_overflow_warnings;
937 /* Stop deferring overflow warnings. If there is a pending warning,
938 and ISSUE is true, then issue the warning if appropriate. STMT is
939 the statement with which the warning should be associated (used for
940 location information); STMT may be NULL. CODE is the level of the
941 warning--a warn_strict_overflow_code value. This function will use
942 the smaller of CODE and the deferred code when deciding whether to
943 issue the warning. CODE may be zero to mean to always use the
947 fold_undefer_overflow_warnings (bool issue, tree stmt, int code)
952 gcc_assert (fold_deferring_overflow_warnings > 0);
953 --fold_deferring_overflow_warnings;
954 if (fold_deferring_overflow_warnings > 0)
956 if (fold_deferred_overflow_warning != NULL
958 && code < (int) fold_deferred_overflow_code)
959 fold_deferred_overflow_code = code;
963 warnmsg = fold_deferred_overflow_warning;
964 fold_deferred_overflow_warning = NULL;
966 if (!issue || warnmsg == NULL)
969 /* Use the smallest code level when deciding to issue the
971 if (code == 0 || code > (int) fold_deferred_overflow_code)
972 code = fold_deferred_overflow_code;
974 if (!issue_strict_overflow_warning (code))
977 if (stmt == NULL_TREE || !expr_has_location (stmt))
978 locus = input_location;
980 locus = expr_location (stmt);
981 warning (OPT_Wstrict_overflow, "%H%s", &locus, warnmsg);
984 /* Stop deferring overflow warnings, ignoring any deferred
988 fold_undefer_and_ignore_overflow_warnings (void)
990 fold_undefer_overflow_warnings (false, NULL_TREE, 0);
993 /* Whether we are deferring overflow warnings. */
996 fold_deferring_overflow_warnings_p (void)
998 return fold_deferring_overflow_warnings > 0;
1001 /* This is called when we fold something based on the fact that signed
1002 overflow is undefined. */
1005 fold_overflow_warning (const char* gmsgid, enum warn_strict_overflow_code wc)
1007 gcc_assert (!flag_wrapv && !flag_trapv);
1008 if (fold_deferring_overflow_warnings > 0)
1010 if (fold_deferred_overflow_warning == NULL
1011 || wc < fold_deferred_overflow_code)
1013 fold_deferred_overflow_warning = gmsgid;
1014 fold_deferred_overflow_code = wc;
1017 else if (issue_strict_overflow_warning (wc))
1018 warning (OPT_Wstrict_overflow, gmsgid);
1021 /* Return true if the built-in mathematical function specified by CODE
1022 is odd, i.e. -f(x) == f(-x). */
1025 negate_mathfn_p (enum built_in_function code)
1029 CASE_FLT_FN (BUILT_IN_ASIN):
1030 CASE_FLT_FN (BUILT_IN_ASINH):
1031 CASE_FLT_FN (BUILT_IN_ATAN):
1032 CASE_FLT_FN (BUILT_IN_ATANH):
1033 CASE_FLT_FN (BUILT_IN_CASIN):
1034 CASE_FLT_FN (BUILT_IN_CASINH):
1035 CASE_FLT_FN (BUILT_IN_CATAN):
1036 CASE_FLT_FN (BUILT_IN_CATANH):
1037 CASE_FLT_FN (BUILT_IN_CBRT):
1038 CASE_FLT_FN (BUILT_IN_CPROJ):
1039 CASE_FLT_FN (BUILT_IN_CSIN):
1040 CASE_FLT_FN (BUILT_IN_CSINH):
1041 CASE_FLT_FN (BUILT_IN_CTAN):
1042 CASE_FLT_FN (BUILT_IN_CTANH):
1043 CASE_FLT_FN (BUILT_IN_ERF):
1044 CASE_FLT_FN (BUILT_IN_LLROUND):
1045 CASE_FLT_FN (BUILT_IN_LROUND):
1046 CASE_FLT_FN (BUILT_IN_ROUND):
1047 CASE_FLT_FN (BUILT_IN_SIN):
1048 CASE_FLT_FN (BUILT_IN_SINH):
1049 CASE_FLT_FN (BUILT_IN_TAN):
1050 CASE_FLT_FN (BUILT_IN_TANH):
1051 CASE_FLT_FN (BUILT_IN_TRUNC):
1054 CASE_FLT_FN (BUILT_IN_LLRINT):
1055 CASE_FLT_FN (BUILT_IN_LRINT):
1056 CASE_FLT_FN (BUILT_IN_NEARBYINT):
1057 CASE_FLT_FN (BUILT_IN_RINT):
1058 return !flag_rounding_math;
1066 /* Check whether we may negate an integer constant T without causing
1070 may_negate_without_overflow_p (tree t)
1072 unsigned HOST_WIDE_INT val;
1076 gcc_assert (TREE_CODE (t) == INTEGER_CST);
1078 type = TREE_TYPE (t);
1079 if (TYPE_UNSIGNED (type))
1082 prec = TYPE_PRECISION (type);
1083 if (prec > HOST_BITS_PER_WIDE_INT)
1085 if (TREE_INT_CST_LOW (t) != 0)
1087 prec -= HOST_BITS_PER_WIDE_INT;
1088 val = TREE_INT_CST_HIGH (t);
1091 val = TREE_INT_CST_LOW (t);
1092 if (prec < HOST_BITS_PER_WIDE_INT)
1093 val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
1094 return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
1097 /* Determine whether an expression T can be cheaply negated using
1098 the function negate_expr without introducing undefined overflow. */
1101 negate_expr_p (tree t)
1108 type = TREE_TYPE (t);
1110 STRIP_SIGN_NOPS (t);
1111 switch (TREE_CODE (t))
1114 if (TYPE_OVERFLOW_WRAPS (type))
1117 /* Check that -CST will not overflow type. */
1118 return may_negate_without_overflow_p (t);
1120 return (INTEGRAL_TYPE_P (type)
1121 && TYPE_OVERFLOW_WRAPS (type));
1128 return negate_expr_p (TREE_REALPART (t))
1129 && negate_expr_p (TREE_IMAGPART (t));
1132 return negate_expr_p (TREE_OPERAND (t, 0))
1133 && negate_expr_p (TREE_OPERAND (t, 1));
1136 return negate_expr_p (TREE_OPERAND (t, 0));
1139 if (HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1140 || HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
1142 /* -(A + B) -> (-B) - A. */
1143 if (negate_expr_p (TREE_OPERAND (t, 1))
1144 && reorder_operands_p (TREE_OPERAND (t, 0),
1145 TREE_OPERAND (t, 1)))
1147 /* -(A + B) -> (-A) - B. */
1148 return negate_expr_p (TREE_OPERAND (t, 0));
1151 /* We can't turn -(A-B) into B-A when we honor signed zeros. */
1152 return !HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1153 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
1154 && reorder_operands_p (TREE_OPERAND (t, 0),
1155 TREE_OPERAND (t, 1));
1158 if (TYPE_UNSIGNED (TREE_TYPE (t)))
1164 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
1165 return negate_expr_p (TREE_OPERAND (t, 1))
1166 || negate_expr_p (TREE_OPERAND (t, 0));
1169 case TRUNC_DIV_EXPR:
1170 case ROUND_DIV_EXPR:
1171 case FLOOR_DIV_EXPR:
1173 case EXACT_DIV_EXPR:
1174 /* In general we can't negate A / B, because if A is INT_MIN and
1175 B is 1, we may turn this into INT_MIN / -1 which is undefined
1176 and actually traps on some architectures. But if overflow is
1177 undefined, we can negate, because - (INT_MIN / 1) is an
1179 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
1180 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
1182 return negate_expr_p (TREE_OPERAND (t, 1))
1183 || negate_expr_p (TREE_OPERAND (t, 0));
1186 /* Negate -((double)float) as (double)(-float). */
1187 if (TREE_CODE (type) == REAL_TYPE)
1189 tree tem = strip_float_extensions (t);
1191 return negate_expr_p (tem);
1196 /* Negate -f(x) as f(-x). */
1197 if (negate_mathfn_p (builtin_mathfn_code (t)))
1198 return negate_expr_p (CALL_EXPR_ARG (t, 0));
1202 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1203 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1205 tree op1 = TREE_OPERAND (t, 1);
1206 if (TREE_INT_CST_HIGH (op1) == 0
1207 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1208 == TREE_INT_CST_LOW (op1))
1219 /* Given T, an expression, return a folded tree for -T or NULL_TREE, if no
1220 simplification is possible.
1221 If negate_expr_p would return true for T, NULL_TREE will never be
1225 fold_negate_expr (tree t)
1227 tree type = TREE_TYPE (t);
1230 switch (TREE_CODE (t))
1232 /* Convert - (~A) to A + 1. */
1234 if (INTEGRAL_TYPE_P (type))
1235 return fold_build2 (PLUS_EXPR, type, TREE_OPERAND (t, 0),
1236 build_int_cst (type, 1));
1240 tem = fold_negate_const (t, type);
1241 if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
1242 || !TYPE_OVERFLOW_TRAPS (type))
1247 tem = fold_negate_const (t, type);
1248 /* Two's complement FP formats, such as c4x, may overflow. */
1249 if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
1255 tree rpart = negate_expr (TREE_REALPART (t));
1256 tree ipart = negate_expr (TREE_IMAGPART (t));
1258 if ((TREE_CODE (rpart) == REAL_CST
1259 && TREE_CODE (ipart) == REAL_CST)
1260 || (TREE_CODE (rpart) == INTEGER_CST
1261 && TREE_CODE (ipart) == INTEGER_CST))
1262 return build_complex (type, rpart, ipart);
1267 if (negate_expr_p (t))
1268 return fold_build2 (COMPLEX_EXPR, type,
1269 fold_negate_expr (TREE_OPERAND (t, 0)),
1270 fold_negate_expr (TREE_OPERAND (t, 1)));
1274 if (negate_expr_p (t))
1275 return fold_build1 (CONJ_EXPR, type,
1276 fold_negate_expr (TREE_OPERAND (t, 0)));
1280 return TREE_OPERAND (t, 0);
1283 if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1284 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
1286 /* -(A + B) -> (-B) - A. */
1287 if (negate_expr_p (TREE_OPERAND (t, 1))
1288 && reorder_operands_p (TREE_OPERAND (t, 0),
1289 TREE_OPERAND (t, 1)))
1291 tem = negate_expr (TREE_OPERAND (t, 1));
1292 return fold_build2 (MINUS_EXPR, type,
1293 tem, TREE_OPERAND (t, 0));
1296 /* -(A + B) -> (-A) - B. */
1297 if (negate_expr_p (TREE_OPERAND (t, 0)))
1299 tem = negate_expr (TREE_OPERAND (t, 0));
1300 return fold_build2 (MINUS_EXPR, type,
1301 tem, TREE_OPERAND (t, 1));
1307 /* - (A - B) -> B - A */
1308 if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1309 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
1310 && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)))
1311 return fold_build2 (MINUS_EXPR, type,
1312 TREE_OPERAND (t, 1), TREE_OPERAND (t, 0));
1316 if (TYPE_UNSIGNED (type))
1322 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type)))
1324 tem = TREE_OPERAND (t, 1);
1325 if (negate_expr_p (tem))
1326 return fold_build2 (TREE_CODE (t), type,
1327 TREE_OPERAND (t, 0), negate_expr (tem));
1328 tem = TREE_OPERAND (t, 0);
1329 if (negate_expr_p (tem))
1330 return fold_build2 (TREE_CODE (t), type,
1331 negate_expr (tem), TREE_OPERAND (t, 1));
1335 case TRUNC_DIV_EXPR:
1336 case ROUND_DIV_EXPR:
1337 case FLOOR_DIV_EXPR:
1339 case EXACT_DIV_EXPR:
1340 /* In general we can't negate A / B, because if A is INT_MIN and
1341 B is 1, we may turn this into INT_MIN / -1 which is undefined
1342 and actually traps on some architectures. But if overflow is
1343 undefined, we can negate, because - (INT_MIN / 1) is an
1345 if (!INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type))
1347 const char * const warnmsg = G_("assuming signed overflow does not "
1348 "occur when negating a division");
1349 tem = TREE_OPERAND (t, 1);
1350 if (negate_expr_p (tem))
1352 if (INTEGRAL_TYPE_P (type)
1353 && (TREE_CODE (tem) != INTEGER_CST
1354 || integer_onep (tem)))
1355 fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
1356 return fold_build2 (TREE_CODE (t), type,
1357 TREE_OPERAND (t, 0), negate_expr (tem));
1359 tem = TREE_OPERAND (t, 0);
1360 if (negate_expr_p (tem))
1362 if (INTEGRAL_TYPE_P (type)
1363 && (TREE_CODE (tem) != INTEGER_CST
1364 || tree_int_cst_equal (tem, TYPE_MIN_VALUE (type))))
1365 fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
1366 return fold_build2 (TREE_CODE (t), type,
1367 negate_expr (tem), TREE_OPERAND (t, 1));
1373 /* Convert -((double)float) into (double)(-float). */
1374 if (TREE_CODE (type) == REAL_TYPE)
1376 tem = strip_float_extensions (t);
1377 if (tem != t && negate_expr_p (tem))
1378 return negate_expr (tem);
1383 /* Negate -f(x) as f(-x). */
1384 if (negate_mathfn_p (builtin_mathfn_code (t))
1385 && negate_expr_p (CALL_EXPR_ARG (t, 0)))
1389 fndecl = get_callee_fndecl (t);
1390 arg = negate_expr (CALL_EXPR_ARG (t, 0));
1391 return build_call_expr (fndecl, 1, arg);
1396 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1397 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1399 tree op1 = TREE_OPERAND (t, 1);
1400 if (TREE_INT_CST_HIGH (op1) == 0
1401 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1402 == TREE_INT_CST_LOW (op1))
1404 tree ntype = TYPE_UNSIGNED (type)
1405 ? lang_hooks.types.signed_type (type)
1406 : lang_hooks.types.unsigned_type (type);
1407 tree temp = fold_convert (ntype, TREE_OPERAND (t, 0));
1408 temp = fold_build2 (RSHIFT_EXPR, ntype, temp, op1);
1409 return fold_convert (type, temp);
1421 /* Like fold_negate_expr, but return a NEGATE_EXPR tree, if T can not be
1422 negated in a simpler way. Also allow for T to be NULL_TREE, in which case
1423 return NULL_TREE. */
1426 negate_expr (tree t)
1433 type = TREE_TYPE (t);
1434 STRIP_SIGN_NOPS (t);
1436 tem = fold_negate_expr (t);
1438 tem = build1 (NEGATE_EXPR, TREE_TYPE (t), t);
1439 return fold_convert (type, tem);
1442 /* Split a tree IN into a constant, literal and variable parts that could be
1443 combined with CODE to make IN. "constant" means an expression with
1444 TREE_CONSTANT but that isn't an actual constant. CODE must be a
1445 commutative arithmetic operation. Store the constant part into *CONP,
1446 the literal in *LITP and return the variable part. If a part isn't
1447 present, set it to null. If the tree does not decompose in this way,
1448 return the entire tree as the variable part and the other parts as null.
1450 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
1451 case, we negate an operand that was subtracted. Except if it is a
1452 literal for which we use *MINUS_LITP instead.
1454 If NEGATE_P is true, we are negating all of IN, again except a literal
1455 for which we use *MINUS_LITP instead.
1457 If IN is itself a literal or constant, return it as appropriate.
1459 Note that we do not guarantee that any of the three values will be the
1460 same type as IN, but they will have the same signedness and mode. */
1463 split_tree (tree in, enum tree_code code, tree *conp, tree *litp,
1464 tree *minus_litp, int negate_p)
1472 /* Strip any conversions that don't change the machine mode or signedness. */
1473 STRIP_SIGN_NOPS (in);
1475 if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST)
1477 else if (TREE_CODE (in) == code
1478 || (! FLOAT_TYPE_P (TREE_TYPE (in))
1479 /* We can associate addition and subtraction together (even
1480 though the C standard doesn't say so) for integers because
1481 the value is not affected. For reals, the value might be
1482 affected, so we can't. */
1483 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1484 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1486 tree op0 = TREE_OPERAND (in, 0);
1487 tree op1 = TREE_OPERAND (in, 1);
1488 int neg1_p = TREE_CODE (in) == MINUS_EXPR;
1489 int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
1491 /* First see if either of the operands is a literal, then a constant. */
1492 if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST)
1493 *litp = op0, op0 = 0;
1494 else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST)
1495 *litp = op1, neg_litp_p = neg1_p, op1 = 0;
1497 if (op0 != 0 && TREE_CONSTANT (op0))
1498 *conp = op0, op0 = 0;
1499 else if (op1 != 0 && TREE_CONSTANT (op1))
1500 *conp = op1, neg_conp_p = neg1_p, op1 = 0;
1502 /* If we haven't dealt with either operand, this is not a case we can
1503 decompose. Otherwise, VAR is either of the ones remaining, if any. */
1504 if (op0 != 0 && op1 != 0)
1509 var = op1, neg_var_p = neg1_p;
1511 /* Now do any needed negations. */
1513 *minus_litp = *litp, *litp = 0;
1515 *conp = negate_expr (*conp);
1517 var = negate_expr (var);
1519 else if (TREE_CONSTANT (in))
1527 *minus_litp = *litp, *litp = 0;
1528 else if (*minus_litp)
1529 *litp = *minus_litp, *minus_litp = 0;
1530 *conp = negate_expr (*conp);
1531 var = negate_expr (var);
1537 /* Re-associate trees split by the above function. T1 and T2 are either
1538 expressions to associate or null. Return the new expression, if any. If
1539 we build an operation, do it in TYPE and with CODE. */
1542 associate_trees (tree t1, tree t2, enum tree_code code, tree type)
1549 /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
1550 try to fold this since we will have infinite recursion. But do
1551 deal with any NEGATE_EXPRs. */
1552 if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
1553 || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
1555 if (code == PLUS_EXPR)
1557 if (TREE_CODE (t1) == NEGATE_EXPR)
1558 return build2 (MINUS_EXPR, type, fold_convert (type, t2),
1559 fold_convert (type, TREE_OPERAND (t1, 0)));
1560 else if (TREE_CODE (t2) == NEGATE_EXPR)
1561 return build2 (MINUS_EXPR, type, fold_convert (type, t1),
1562 fold_convert (type, TREE_OPERAND (t2, 0)));
1563 else if (integer_zerop (t2))
1564 return fold_convert (type, t1);
1566 else if (code == MINUS_EXPR)
1568 if (integer_zerop (t2))
1569 return fold_convert (type, t1);
1572 return build2 (code, type, fold_convert (type, t1),
1573 fold_convert (type, t2));
1576 return fold_build2 (code, type, fold_convert (type, t1),
1577 fold_convert (type, t2));
1580 /* Check whether TYPE1 and TYPE2 are equivalent integer types, suitable
1581 for use in int_const_binop, size_binop and size_diffop. */
1584 int_binop_types_match_p (enum tree_code code, tree type1, tree type2)
1586 if (TREE_CODE (type1) != INTEGER_TYPE && !POINTER_TYPE_P (type1))
1588 if (TREE_CODE (type2) != INTEGER_TYPE && !POINTER_TYPE_P (type2))
1603 return TYPE_UNSIGNED (type1) == TYPE_UNSIGNED (type2)
1604 && TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
1605 && TYPE_MODE (type1) == TYPE_MODE (type2);
1609 /* Combine two integer constants ARG1 and ARG2 under operation CODE
1610 to produce a new constant. Return NULL_TREE if we don't know how
1611 to evaluate CODE at compile-time.
1613 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1616 int_const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1618 unsigned HOST_WIDE_INT int1l, int2l;
1619 HOST_WIDE_INT int1h, int2h;
1620 unsigned HOST_WIDE_INT low;
1622 unsigned HOST_WIDE_INT garbagel;
1623 HOST_WIDE_INT garbageh;
1625 tree type = TREE_TYPE (arg1);
1626 int uns = TYPE_UNSIGNED (type);
1628 = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
1631 int1l = TREE_INT_CST_LOW (arg1);
1632 int1h = TREE_INT_CST_HIGH (arg1);
1633 int2l = TREE_INT_CST_LOW (arg2);
1634 int2h = TREE_INT_CST_HIGH (arg2);
1639 low = int1l | int2l, hi = int1h | int2h;
1643 low = int1l ^ int2l, hi = int1h ^ int2h;
1647 low = int1l & int2l, hi = int1h & int2h;
1653 /* It's unclear from the C standard whether shifts can overflow.
1654 The following code ignores overflow; perhaps a C standard
1655 interpretation ruling is needed. */
1656 lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1663 lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1668 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1672 neg_double (int2l, int2h, &low, &hi);
1673 add_double (int1l, int1h, low, hi, &low, &hi);
1674 overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
1678 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1681 case TRUNC_DIV_EXPR:
1682 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1683 case EXACT_DIV_EXPR:
1684 /* This is a shortcut for a common special case. */
1685 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1686 && !TREE_OVERFLOW (arg1)
1687 && !TREE_OVERFLOW (arg2)
1688 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1690 if (code == CEIL_DIV_EXPR)
1693 low = int1l / int2l, hi = 0;
1697 /* ... fall through ... */
1699 case ROUND_DIV_EXPR:
1700 if (int2h == 0 && int2l == 0)
1702 if (int2h == 0 && int2l == 1)
1704 low = int1l, hi = int1h;
1707 if (int1l == int2l && int1h == int2h
1708 && ! (int1l == 0 && int1h == 0))
1713 overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
1714 &low, &hi, &garbagel, &garbageh);
1717 case TRUNC_MOD_EXPR:
1718 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1719 /* This is a shortcut for a common special case. */
1720 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1721 && !TREE_OVERFLOW (arg1)
1722 && !TREE_OVERFLOW (arg2)
1723 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1725 if (code == CEIL_MOD_EXPR)
1727 low = int1l % int2l, hi = 0;
1731 /* ... fall through ... */
1733 case ROUND_MOD_EXPR:
1734 if (int2h == 0 && int2l == 0)
1736 overflow = div_and_round_double (code, uns,
1737 int1l, int1h, int2l, int2h,
1738 &garbagel, &garbageh, &low, &hi);
1744 low = (((unsigned HOST_WIDE_INT) int1h
1745 < (unsigned HOST_WIDE_INT) int2h)
1746 || (((unsigned HOST_WIDE_INT) int1h
1747 == (unsigned HOST_WIDE_INT) int2h)
1750 low = (int1h < int2h
1751 || (int1h == int2h && int1l < int2l));
1753 if (low == (code == MIN_EXPR))
1754 low = int1l, hi = int1h;
1756 low = int2l, hi = int2h;
1765 t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
1767 /* Propagate overflow flags ourselves. */
1768 if (((!uns || is_sizetype) && overflow)
1769 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1772 TREE_OVERFLOW (t) = 1;
1776 t = force_fit_type_double (TREE_TYPE (arg1), low, hi, 1,
1777 ((!uns || is_sizetype) && overflow)
1778 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1783 /* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
1784 constant. We assume ARG1 and ARG2 have the same data type, or at least
1785 are the same kind of constant and the same machine mode. Return zero if
1786 combining the constants is not allowed in the current operating mode.
1788 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1791 const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1793 /* Sanity check for the recursive cases. */
1800 if (TREE_CODE (arg1) == INTEGER_CST)
1801 return int_const_binop (code, arg1, arg2, notrunc);
1803 if (TREE_CODE (arg1) == REAL_CST)
1805 enum machine_mode mode;
1808 REAL_VALUE_TYPE value;
1809 REAL_VALUE_TYPE result;
1813 /* The following codes are handled by real_arithmetic. */
1828 d1 = TREE_REAL_CST (arg1);
1829 d2 = TREE_REAL_CST (arg2);
1831 type = TREE_TYPE (arg1);
1832 mode = TYPE_MODE (type);
1834 /* Don't perform operation if we honor signaling NaNs and
1835 either operand is a NaN. */
1836 if (HONOR_SNANS (mode)
1837 && (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2)))
1840 /* Don't perform operation if it would raise a division
1841 by zero exception. */
1842 if (code == RDIV_EXPR
1843 && REAL_VALUES_EQUAL (d2, dconst0)
1844 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1847 /* If either operand is a NaN, just return it. Otherwise, set up
1848 for floating-point trap; we return an overflow. */
1849 if (REAL_VALUE_ISNAN (d1))
1851 else if (REAL_VALUE_ISNAN (d2))
1854 inexact = real_arithmetic (&value, code, &d1, &d2);
1855 real_convert (&result, mode, &value);
1857 /* Don't constant fold this floating point operation if
1858 the result has overflowed and flag_trapping_math. */
1859 if (flag_trapping_math
1860 && MODE_HAS_INFINITIES (mode)
1861 && REAL_VALUE_ISINF (result)
1862 && !REAL_VALUE_ISINF (d1)
1863 && !REAL_VALUE_ISINF (d2))
1866 /* Don't constant fold this floating point operation if the
1867 result may dependent upon the run-time rounding mode and
1868 flag_rounding_math is set, or if GCC's software emulation
1869 is unable to accurately represent the result. */
1870 if ((flag_rounding_math
1871 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
1872 && !flag_unsafe_math_optimizations))
1873 && (inexact || !real_identical (&result, &value)))
1876 t = build_real (type, result);
1878 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1882 if (TREE_CODE (arg1) == COMPLEX_CST)
1884 tree type = TREE_TYPE (arg1);
1885 tree r1 = TREE_REALPART (arg1);
1886 tree i1 = TREE_IMAGPART (arg1);
1887 tree r2 = TREE_REALPART (arg2);
1888 tree i2 = TREE_IMAGPART (arg2);
1895 real = const_binop (code, r1, r2, notrunc);
1896 imag = const_binop (code, i1, i2, notrunc);
1900 real = const_binop (MINUS_EXPR,
1901 const_binop (MULT_EXPR, r1, r2, notrunc),
1902 const_binop (MULT_EXPR, i1, i2, notrunc),
1904 imag = const_binop (PLUS_EXPR,
1905 const_binop (MULT_EXPR, r1, i2, notrunc),
1906 const_binop (MULT_EXPR, i1, r2, notrunc),
1913 = const_binop (PLUS_EXPR,
1914 const_binop (MULT_EXPR, r2, r2, notrunc),
1915 const_binop (MULT_EXPR, i2, i2, notrunc),
1918 = const_binop (PLUS_EXPR,
1919 const_binop (MULT_EXPR, r1, r2, notrunc),
1920 const_binop (MULT_EXPR, i1, i2, notrunc),
1923 = const_binop (MINUS_EXPR,
1924 const_binop (MULT_EXPR, i1, r2, notrunc),
1925 const_binop (MULT_EXPR, r1, i2, notrunc),
1928 if (INTEGRAL_TYPE_P (TREE_TYPE (r1)))
1929 code = TRUNC_DIV_EXPR;
1931 real = const_binop (code, t1, magsquared, notrunc);
1932 imag = const_binop (code, t2, magsquared, notrunc);
1941 return build_complex (type, real, imag);
1947 /* Create a size type INT_CST node with NUMBER sign extended. KIND
1948 indicates which particular sizetype to create. */
1951 size_int_kind (HOST_WIDE_INT number, enum size_type_kind kind)
1953 return build_int_cst (sizetype_tab[(int) kind], number);
1956 /* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
1957 is a tree code. The type of the result is taken from the operands.
1958 Both must be equivalent integer types, ala int_binop_types_match_p.
1959 If the operands are constant, so is the result. */
1962 size_binop (enum tree_code code, tree arg0, tree arg1)
1964 tree type = TREE_TYPE (arg0);
1966 if (arg0 == error_mark_node || arg1 == error_mark_node)
1967 return error_mark_node;
1969 gcc_assert (int_binop_types_match_p (code, TREE_TYPE (arg0),
1972 /* Handle the special case of two integer constants faster. */
1973 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1975 /* And some specific cases even faster than that. */
1976 if (code == PLUS_EXPR)
1978 if (integer_zerop (arg0) && !TREE_OVERFLOW (arg0))
1980 if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
1983 else if (code == MINUS_EXPR)
1985 if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
1988 else if (code == MULT_EXPR)
1990 if (integer_onep (arg0) && !TREE_OVERFLOW (arg0))
1994 /* Handle general case of two integer constants. */
1995 return int_const_binop (code, arg0, arg1, 0);
1998 return fold_build2 (code, type, arg0, arg1);
2001 /* Given two values, either both of sizetype or both of bitsizetype,
2002 compute the difference between the two values. Return the value
2003 in signed type corresponding to the type of the operands. */
2006 size_diffop (tree arg0, tree arg1)
2008 tree type = TREE_TYPE (arg0);
2011 gcc_assert (int_binop_types_match_p (MINUS_EXPR, TREE_TYPE (arg0),
2014 /* If the type is already signed, just do the simple thing. */
2015 if (!TYPE_UNSIGNED (type))
2016 return size_binop (MINUS_EXPR, arg0, arg1);
2018 if (type == sizetype)
2020 else if (type == bitsizetype)
2021 ctype = sbitsizetype;
2023 ctype = lang_hooks.types.signed_type (type);
2025 /* If either operand is not a constant, do the conversions to the signed
2026 type and subtract. The hardware will do the right thing with any
2027 overflow in the subtraction. */
2028 if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST)
2029 return size_binop (MINUS_EXPR, fold_convert (ctype, arg0),
2030 fold_convert (ctype, arg1));
2032 /* If ARG0 is larger than ARG1, subtract and return the result in CTYPE.
2033 Otherwise, subtract the other way, convert to CTYPE (we know that can't
2034 overflow) and negate (which can't either). Special-case a result
2035 of zero while we're here. */
2036 if (tree_int_cst_equal (arg0, arg1))
2037 return build_int_cst (ctype, 0);
2038 else if (tree_int_cst_lt (arg1, arg0))
2039 return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1));
2041 return size_binop (MINUS_EXPR, build_int_cst (ctype, 0),
2042 fold_convert (ctype, size_binop (MINUS_EXPR,
2046 /* A subroutine of fold_convert_const handling conversions of an
2047 INTEGER_CST to another integer type. */
2050 fold_convert_const_int_from_int (tree type, tree arg1)
2054 /* Given an integer constant, make new constant with new type,
2055 appropriately sign-extended or truncated. */
2056 t = force_fit_type_double (type, TREE_INT_CST_LOW (arg1),
2057 TREE_INT_CST_HIGH (arg1),
2058 /* Don't set the overflow when
2059 converting a pointer */
2060 !POINTER_TYPE_P (TREE_TYPE (arg1)),
2061 (TREE_INT_CST_HIGH (arg1) < 0
2062 && (TYPE_UNSIGNED (type)
2063 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
2064 | TREE_OVERFLOW (arg1));
2069 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2070 to an integer type. */
2073 fold_convert_const_int_from_real (enum tree_code code, tree type, tree arg1)
2078 /* The following code implements the floating point to integer
2079 conversion rules required by the Java Language Specification,
2080 that IEEE NaNs are mapped to zero and values that overflow
2081 the target precision saturate, i.e. values greater than
2082 INT_MAX are mapped to INT_MAX, and values less than INT_MIN
2083 are mapped to INT_MIN. These semantics are allowed by the
2084 C and C++ standards that simply state that the behavior of
2085 FP-to-integer conversion is unspecified upon overflow. */
2087 HOST_WIDE_INT high, low;
2089 REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
2093 case FIX_TRUNC_EXPR:
2094 real_trunc (&r, VOIDmode, &x);
2101 /* If R is NaN, return zero and show we have an overflow. */
2102 if (REAL_VALUE_ISNAN (r))
2109 /* See if R is less than the lower bound or greater than the
2114 tree lt = TYPE_MIN_VALUE (type);
2115 REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
2116 if (REAL_VALUES_LESS (r, l))
2119 high = TREE_INT_CST_HIGH (lt);
2120 low = TREE_INT_CST_LOW (lt);
2126 tree ut = TYPE_MAX_VALUE (type);
2129 REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
2130 if (REAL_VALUES_LESS (u, r))
2133 high = TREE_INT_CST_HIGH (ut);
2134 low = TREE_INT_CST_LOW (ut);
2140 REAL_VALUE_TO_INT (&low, &high, r);
2142 t = force_fit_type_double (type, low, high, -1,
2143 overflow | TREE_OVERFLOW (arg1));
2147 /* A subroutine of fold_convert_const handling conversions a REAL_CST
2148 to another floating point type. */
2151 fold_convert_const_real_from_real (tree type, tree arg1)
2153 REAL_VALUE_TYPE value;
2156 real_convert (&value, TYPE_MODE (type), &TREE_REAL_CST (arg1));
2157 t = build_real (type, value);
2159 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
2163 /* Attempt to fold type conversion operation CODE of expression ARG1 to
2164 type TYPE. If no simplification can be done return NULL_TREE. */
2167 fold_convert_const (enum tree_code code, tree type, tree arg1)
2169 if (TREE_TYPE (arg1) == type)
2172 if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
2174 if (TREE_CODE (arg1) == INTEGER_CST)
2175 return fold_convert_const_int_from_int (type, arg1);
2176 else if (TREE_CODE (arg1) == REAL_CST)
2177 return fold_convert_const_int_from_real (code, type, arg1);
2179 else if (TREE_CODE (type) == REAL_TYPE)
2181 if (TREE_CODE (arg1) == INTEGER_CST)
2182 return build_real_from_int_cst (type, arg1);
2183 if (TREE_CODE (arg1) == REAL_CST)
2184 return fold_convert_const_real_from_real (type, arg1);
2189 /* Construct a vector of zero elements of vector type TYPE. */
2192 build_zero_vector (tree type)
2197 elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
2198 units = TYPE_VECTOR_SUBPARTS (type);
2201 for (i = 0; i < units; i++)
2202 list = tree_cons (NULL_TREE, elem, list);
2203 return build_vector (type, list);
2206 /* Convert expression ARG to type TYPE. Used by the middle-end for
2207 simple conversions in preference to calling the front-end's convert. */
2210 fold_convert (tree type, tree arg)
2212 tree orig = TREE_TYPE (arg);
2218 if (TREE_CODE (arg) == ERROR_MARK
2219 || TREE_CODE (type) == ERROR_MARK
2220 || TREE_CODE (orig) == ERROR_MARK)
2221 return error_mark_node;
2223 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig)
2224 || lang_hooks.types_compatible_p (TYPE_MAIN_VARIANT (type),
2225 TYPE_MAIN_VARIANT (orig)))
2226 return fold_build1 (NOP_EXPR, type, arg);
2228 switch (TREE_CODE (type))
2230 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
2231 case POINTER_TYPE: case REFERENCE_TYPE:
2233 if (TREE_CODE (arg) == INTEGER_CST)
2235 tem = fold_convert_const (NOP_EXPR, type, arg);
2236 if (tem != NULL_TREE)
2239 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2240 || TREE_CODE (orig) == OFFSET_TYPE)
2241 return fold_build1 (NOP_EXPR, type, arg);
2242 if (TREE_CODE (orig) == COMPLEX_TYPE)
2244 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2245 return fold_convert (type, tem);
2247 gcc_assert (TREE_CODE (orig) == VECTOR_TYPE
2248 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2249 return fold_build1 (NOP_EXPR, type, arg);
2252 if (TREE_CODE (arg) == INTEGER_CST)
2254 tem = fold_convert_const (FLOAT_EXPR, type, arg);
2255 if (tem != NULL_TREE)
2258 else if (TREE_CODE (arg) == REAL_CST)
2260 tem = fold_convert_const (NOP_EXPR, type, arg);
2261 if (tem != NULL_TREE)
2265 switch (TREE_CODE (orig))
2268 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2269 case POINTER_TYPE: case REFERENCE_TYPE:
2270 return fold_build1 (FLOAT_EXPR, type, arg);
2273 return fold_build1 (NOP_EXPR, type, arg);
2276 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2277 return fold_convert (type, tem);
2284 switch (TREE_CODE (orig))
2287 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2288 case POINTER_TYPE: case REFERENCE_TYPE:
2290 return build2 (COMPLEX_EXPR, type,
2291 fold_convert (TREE_TYPE (type), arg),
2292 fold_convert (TREE_TYPE (type), integer_zero_node));
2297 if (TREE_CODE (arg) == COMPLEX_EXPR)
2299 rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0));
2300 ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1));
2301 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2304 arg = save_expr (arg);
2305 rpart = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2306 ipart = fold_build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg);
2307 rpart = fold_convert (TREE_TYPE (type), rpart);
2308 ipart = fold_convert (TREE_TYPE (type), ipart);
2309 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2317 if (integer_zerop (arg))
2318 return build_zero_vector (type);
2319 gcc_assert (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2320 gcc_assert (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2321 || TREE_CODE (orig) == VECTOR_TYPE);
2322 return fold_build1 (VIEW_CONVERT_EXPR, type, arg);
2325 tem = fold_ignored_result (arg);
2326 if (TREE_CODE (tem) == GIMPLE_MODIFY_STMT)
2328 return fold_build1 (NOP_EXPR, type, tem);
2335 /* Return false if expr can be assumed not to be an lvalue, true
2339 maybe_lvalue_p (tree x)
2341 /* We only need to wrap lvalue tree codes. */
2342 switch (TREE_CODE (x))
2353 case ALIGN_INDIRECT_REF:
2354 case MISALIGNED_INDIRECT_REF:
2356 case ARRAY_RANGE_REF:
2362 case PREINCREMENT_EXPR:
2363 case PREDECREMENT_EXPR:
2365 case TRY_CATCH_EXPR:
2366 case WITH_CLEANUP_EXPR:
2369 case GIMPLE_MODIFY_STMT:
2378 /* Assume the worst for front-end tree codes. */
2379 if ((int)TREE_CODE (x) >= NUM_TREE_CODES)
2387 /* Return an expr equal to X but certainly not valid as an lvalue. */
2392 /* While we are in GIMPLE, NON_LVALUE_EXPR doesn't mean anything to
2397 if (! maybe_lvalue_p (x))
2399 return build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
2402 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
2403 Zero means allow extended lvalues. */
2405 int pedantic_lvalues;
2407 /* When pedantic, return an expr equal to X but certainly not valid as a
2408 pedantic lvalue. Otherwise, return X. */
2411 pedantic_non_lvalue (tree x)
2413 if (pedantic_lvalues)
2414 return non_lvalue (x);
2419 /* Given a tree comparison code, return the code that is the logical inverse
2420 of the given code. It is not safe to do this for floating-point
2421 comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode
2422 as well: if reversing the comparison is unsafe, return ERROR_MARK. */
2425 invert_tree_comparison (enum tree_code code, bool honor_nans)
2427 if (honor_nans && flag_trapping_math)
2437 return honor_nans ? UNLE_EXPR : LE_EXPR;
2439 return honor_nans ? UNLT_EXPR : LT_EXPR;
2441 return honor_nans ? UNGE_EXPR : GE_EXPR;
2443 return honor_nans ? UNGT_EXPR : GT_EXPR;
2457 return UNORDERED_EXPR;
2458 case UNORDERED_EXPR:
2459 return ORDERED_EXPR;
2465 /* Similar, but return the comparison that results if the operands are
2466 swapped. This is safe for floating-point. */
2469 swap_tree_comparison (enum tree_code code)
2476 case UNORDERED_EXPR:
2502 /* Convert a comparison tree code from an enum tree_code representation
2503 into a compcode bit-based encoding. This function is the inverse of
2504 compcode_to_comparison. */
2506 static enum comparison_code
2507 comparison_to_compcode (enum tree_code code)
2524 return COMPCODE_ORD;
2525 case UNORDERED_EXPR:
2526 return COMPCODE_UNORD;
2528 return COMPCODE_UNLT;
2530 return COMPCODE_UNEQ;
2532 return COMPCODE_UNLE;
2534 return COMPCODE_UNGT;
2536 return COMPCODE_LTGT;
2538 return COMPCODE_UNGE;
2544 /* Convert a compcode bit-based encoding of a comparison operator back
2545 to GCC's enum tree_code representation. This function is the
2546 inverse of comparison_to_compcode. */
2548 static enum tree_code
2549 compcode_to_comparison (enum comparison_code code)
2566 return ORDERED_EXPR;
2567 case COMPCODE_UNORD:
2568 return UNORDERED_EXPR;
2586 /* Return a tree for the comparison which is the combination of
2587 doing the AND or OR (depending on CODE) of the two operations LCODE
2588 and RCODE on the identical operands LL_ARG and LR_ARG. Take into account
2589 the possibility of trapping if the mode has NaNs, and return NULL_TREE
2590 if this makes the transformation invalid. */
2593 combine_comparisons (enum tree_code code, enum tree_code lcode,
2594 enum tree_code rcode, tree truth_type,
2595 tree ll_arg, tree lr_arg)
2597 bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg)));
2598 enum comparison_code lcompcode = comparison_to_compcode (lcode);
2599 enum comparison_code rcompcode = comparison_to_compcode (rcode);
2600 enum comparison_code compcode;
2604 case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR:
2605 compcode = lcompcode & rcompcode;
2608 case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR:
2609 compcode = lcompcode | rcompcode;
2618 /* Eliminate unordered comparisons, as well as LTGT and ORD
2619 which are not used unless the mode has NaNs. */
2620 compcode &= ~COMPCODE_UNORD;
2621 if (compcode == COMPCODE_LTGT)
2622 compcode = COMPCODE_NE;
2623 else if (compcode == COMPCODE_ORD)
2624 compcode = COMPCODE_TRUE;
2626 else if (flag_trapping_math)
2628 /* Check that the original operation and the optimized ones will trap
2629 under the same condition. */
2630 bool ltrap = (lcompcode & COMPCODE_UNORD) == 0
2631 && (lcompcode != COMPCODE_EQ)
2632 && (lcompcode != COMPCODE_ORD);
2633 bool rtrap = (rcompcode & COMPCODE_UNORD) == 0
2634 && (rcompcode != COMPCODE_EQ)
2635 && (rcompcode != COMPCODE_ORD);
2636 bool trap = (compcode & COMPCODE_UNORD) == 0
2637 && (compcode != COMPCODE_EQ)
2638 && (compcode != COMPCODE_ORD);
2640 /* In a short-circuited boolean expression the LHS might be
2641 such that the RHS, if evaluated, will never trap. For
2642 example, in ORD (x, y) && (x < y), we evaluate the RHS only
2643 if neither x nor y is NaN. (This is a mixed blessing: for
2644 example, the expression above will never trap, hence
2645 optimizing it to x < y would be invalid). */
2646 if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD))
2647 || (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD)))
2650 /* If the comparison was short-circuited, and only the RHS
2651 trapped, we may now generate a spurious trap. */
2653 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2656 /* If we changed the conditions that cause a trap, we lose. */
2657 if ((ltrap || rtrap) != trap)
2661 if (compcode == COMPCODE_TRUE)
2662 return constant_boolean_node (true, truth_type);
2663 else if (compcode == COMPCODE_FALSE)
2664 return constant_boolean_node (false, truth_type);
2666 return fold_build2 (compcode_to_comparison (compcode),
2667 truth_type, ll_arg, lr_arg);
2670 /* Return nonzero if CODE is a tree code that represents a truth value. */
2673 truth_value_p (enum tree_code code)
2675 return (TREE_CODE_CLASS (code) == tcc_comparison
2676 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
2677 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
2678 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
2681 /* Return nonzero if two operands (typically of the same tree node)
2682 are necessarily equal. If either argument has side-effects this
2683 function returns zero. FLAGS modifies behavior as follows:
2685 If OEP_ONLY_CONST is set, only return nonzero for constants.
2686 This function tests whether the operands are indistinguishable;
2687 it does not test whether they are equal using C's == operation.
2688 The distinction is important for IEEE floating point, because
2689 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
2690 (2) two NaNs may be indistinguishable, but NaN!=NaN.
2692 If OEP_ONLY_CONST is unset, a VAR_DECL is considered equal to itself
2693 even though it may hold multiple values during a function.
2694 This is because a GCC tree node guarantees that nothing else is
2695 executed between the evaluation of its "operands" (which may often
2696 be evaluated in arbitrary order). Hence if the operands themselves
2697 don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the
2698 same value in each operand/subexpression. Hence leaving OEP_ONLY_CONST
2699 unset means assuming isochronic (or instantaneous) tree equivalence.
2700 Unless comparing arbitrary expression trees, such as from different
2701 statements, this flag can usually be left unset.
2703 If OEP_PURE_SAME is set, then pure functions with identical arguments
2704 are considered the same. It is used when the caller has other ways
2705 to ensure that global memory is unchanged in between. */
2708 operand_equal_p (tree arg0, tree arg1, unsigned int flags)
2710 /* If either is ERROR_MARK, they aren't equal. */
2711 if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK)
2714 /* If both types don't have the same signedness, then we can't consider
2715 them equal. We must check this before the STRIP_NOPS calls
2716 because they may change the signedness of the arguments. */
2717 if (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1)))
2720 /* If both types don't have the same precision, then it is not safe
2722 if (TYPE_PRECISION (TREE_TYPE (arg0)) != TYPE_PRECISION (TREE_TYPE (arg1)))
2728 /* In case both args are comparisons but with different comparison
2729 code, try to swap the comparison operands of one arg to produce
2730 a match and compare that variant. */
2731 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2732 && COMPARISON_CLASS_P (arg0)
2733 && COMPARISON_CLASS_P (arg1))
2735 enum tree_code swap_code = swap_tree_comparison (TREE_CODE (arg1));
2737 if (TREE_CODE (arg0) == swap_code)
2738 return operand_equal_p (TREE_OPERAND (arg0, 0),
2739 TREE_OPERAND (arg1, 1), flags)
2740 && operand_equal_p (TREE_OPERAND (arg0, 1),
2741 TREE_OPERAND (arg1, 0), flags);
2744 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2745 /* This is needed for conversions and for COMPONENT_REF.
2746 Might as well play it safe and always test this. */
2747 || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
2748 || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
2749 || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
2752 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
2753 We don't care about side effects in that case because the SAVE_EXPR
2754 takes care of that for us. In all other cases, two expressions are
2755 equal if they have no side effects. If we have two identical
2756 expressions with side effects that should be treated the same due
2757 to the only side effects being identical SAVE_EXPR's, that will
2758 be detected in the recursive calls below. */
2759 if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST)
2760 && (TREE_CODE (arg0) == SAVE_EXPR
2761 || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
2764 /* Next handle constant cases, those for which we can return 1 even
2765 if ONLY_CONST is set. */
2766 if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
2767 switch (TREE_CODE (arg0))
2770 return tree_int_cst_equal (arg0, arg1);
2773 if (REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
2774 TREE_REAL_CST (arg1)))
2778 if (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))))
2780 /* If we do not distinguish between signed and unsigned zero,
2781 consider them equal. */
2782 if (real_zerop (arg0) && real_zerop (arg1))
2791 v1 = TREE_VECTOR_CST_ELTS (arg0);
2792 v2 = TREE_VECTOR_CST_ELTS (arg1);
2795 if (!operand_equal_p (TREE_VALUE (v1), TREE_VALUE (v2),
2798 v1 = TREE_CHAIN (v1);
2799 v2 = TREE_CHAIN (v2);
2806 return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
2808 && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
2812 return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
2813 && ! memcmp (TREE_STRING_POINTER (arg0),
2814 TREE_STRING_POINTER (arg1),
2815 TREE_STRING_LENGTH (arg0)));
2818 return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
2824 if (flags & OEP_ONLY_CONST)
2827 /* Define macros to test an operand from arg0 and arg1 for equality and a
2828 variant that allows null and views null as being different from any
2829 non-null value. In the latter case, if either is null, the both
2830 must be; otherwise, do the normal comparison. */
2831 #define OP_SAME(N) operand_equal_p (TREE_OPERAND (arg0, N), \
2832 TREE_OPERAND (arg1, N), flags)
2834 #define OP_SAME_WITH_NULL(N) \
2835 ((!TREE_OPERAND (arg0, N) || !TREE_OPERAND (arg1, N)) \
2836 ? TREE_OPERAND (arg0, N) == TREE_OPERAND (arg1, N) : OP_SAME (N))
2838 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
2841 /* Two conversions are equal only if signedness and modes match. */
2842 switch (TREE_CODE (arg0))
2846 case FIX_TRUNC_EXPR:
2847 if (TYPE_UNSIGNED (TREE_TYPE (arg0))
2848 != TYPE_UNSIGNED (TREE_TYPE (arg1)))
2858 case tcc_comparison:
2860 if (OP_SAME (0) && OP_SAME (1))
2863 /* For commutative ops, allow the other order. */
2864 return (commutative_tree_code (TREE_CODE (arg0))
2865 && operand_equal_p (TREE_OPERAND (arg0, 0),
2866 TREE_OPERAND (arg1, 1), flags)
2867 && operand_equal_p (TREE_OPERAND (arg0, 1),
2868 TREE_OPERAND (arg1, 0), flags));
2871 /* If either of the pointer (or reference) expressions we are
2872 dereferencing contain a side effect, these cannot be equal. */
2873 if (TREE_SIDE_EFFECTS (arg0)
2874 || TREE_SIDE_EFFECTS (arg1))
2877 switch (TREE_CODE (arg0))
2880 case ALIGN_INDIRECT_REF:
2881 case MISALIGNED_INDIRECT_REF:
2887 case ARRAY_RANGE_REF:
2888 /* Operands 2 and 3 may be null. */
2891 && OP_SAME_WITH_NULL (2)
2892 && OP_SAME_WITH_NULL (3));
2895 /* Handle operand 2 the same as for ARRAY_REF. Operand 0
2896 may be NULL when we're called to compare MEM_EXPRs. */
2897 return OP_SAME_WITH_NULL (0)
2899 && OP_SAME_WITH_NULL (2);
2902 return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
2908 case tcc_expression:
2909 switch (TREE_CODE (arg0))
2912 case TRUTH_NOT_EXPR:
2915 case TRUTH_ANDIF_EXPR:
2916 case TRUTH_ORIF_EXPR:
2917 return OP_SAME (0) && OP_SAME (1);
2919 case TRUTH_AND_EXPR:
2921 case TRUTH_XOR_EXPR:
2922 if (OP_SAME (0) && OP_SAME (1))
2925 /* Otherwise take into account this is a commutative operation. */
2926 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2927 TREE_OPERAND (arg1, 1), flags)
2928 && operand_equal_p (TREE_OPERAND (arg0, 1),
2929 TREE_OPERAND (arg1, 0), flags));
2936 switch (TREE_CODE (arg0))
2939 /* If the CALL_EXPRs call different functions, then they
2940 clearly can not be equal. */
2941 if (! operand_equal_p (CALL_EXPR_FN (arg0), CALL_EXPR_FN (arg1),
2946 unsigned int cef = call_expr_flags (arg0);
2947 if (flags & OEP_PURE_SAME)
2948 cef &= ECF_CONST | ECF_PURE;
2955 /* Now see if all the arguments are the same. */
2957 call_expr_arg_iterator iter0, iter1;
2959 for (a0 = first_call_expr_arg (arg0, &iter0),
2960 a1 = first_call_expr_arg (arg1, &iter1);
2962 a0 = next_call_expr_arg (&iter0),
2963 a1 = next_call_expr_arg (&iter1))
2964 if (! operand_equal_p (a0, a1, flags))
2967 /* If we get here and both argument lists are exhausted
2968 then the CALL_EXPRs are equal. */
2969 return ! (a0 || a1);
2975 case tcc_declaration:
2976 /* Consider __builtin_sqrt equal to sqrt. */
2977 return (TREE_CODE (arg0) == FUNCTION_DECL
2978 && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1)
2979 && DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1)
2980 && DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1));
2987 #undef OP_SAME_WITH_NULL
2990 /* Similar to operand_equal_p, but see if ARG0 might have been made by
2991 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
2993 When in doubt, return 0. */
2996 operand_equal_for_comparison_p (tree arg0, tree arg1, tree other)
2998 int unsignedp1, unsignedpo;
2999 tree primarg0, primarg1, primother;
3000 unsigned int correct_width;
3002 if (operand_equal_p (arg0, arg1, 0))
3005 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
3006 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
3009 /* Discard any conversions that don't change the modes of ARG0 and ARG1
3010 and see if the inner values are the same. This removes any
3011 signedness comparison, which doesn't matter here. */
3012 primarg0 = arg0, primarg1 = arg1;
3013 STRIP_NOPS (primarg0);
3014 STRIP_NOPS (primarg1);
3015 if (operand_equal_p (primarg0, primarg1, 0))
3018 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
3019 actual comparison operand, ARG0.
3021 First throw away any conversions to wider types
3022 already present in the operands. */
3024 primarg1 = get_narrower (arg1, &unsignedp1);
3025 primother = get_narrower (other, &unsignedpo);
3027 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
3028 if (unsignedp1 == unsignedpo
3029 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
3030 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
3032 tree type = TREE_TYPE (arg0);
3034 /* Make sure shorter operand is extended the right way
3035 to match the longer operand. */
3036 primarg1 = fold_convert (lang_hooks.types.signed_or_unsigned_type
3037 (unsignedp1, TREE_TYPE (primarg1)), primarg1);
3039 if (operand_equal_p (arg0, fold_convert (type, primarg1), 0))
3046 /* See if ARG is an expression that is either a comparison or is performing
3047 arithmetic on comparisons. The comparisons must only be comparing
3048 two different values, which will be stored in *CVAL1 and *CVAL2; if
3049 they are nonzero it means that some operands have already been found.
3050 No variables may be used anywhere else in the expression except in the
3051 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
3052 the expression and save_expr needs to be called with CVAL1 and CVAL2.
3054 If this is true, return 1. Otherwise, return zero. */
3057 twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p)
3059 enum tree_code code = TREE_CODE (arg);
3060 enum tree_code_class class = TREE_CODE_CLASS (code);
3062 /* We can handle some of the tcc_expression cases here. */
3063 if (class == tcc_expression && code == TRUTH_NOT_EXPR)
3065 else if (class == tcc_expression
3066 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
3067 || code == COMPOUND_EXPR))
3070 else if (class == tcc_expression && code == SAVE_EXPR
3071 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
3073 /* If we've already found a CVAL1 or CVAL2, this expression is
3074 two complex to handle. */
3075 if (*cval1 || *cval2)
3085 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
3088 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
3089 && twoval_comparison_p (TREE_OPERAND (arg, 1),
3090 cval1, cval2, save_p));
3095 case tcc_expression:
3096 if (code == COND_EXPR)
3097 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
3098 cval1, cval2, save_p)
3099 && twoval_comparison_p (TREE_OPERAND (arg, 1),
3100 cval1, cval2, save_p)
3101 && twoval_comparison_p (TREE_OPERAND (arg, 2),
3102 cval1, cval2, save_p));
3105 case tcc_comparison:
3106 /* First see if we can handle the first operand, then the second. For
3107 the second operand, we know *CVAL1 can't be zero. It must be that
3108 one side of the comparison is each of the values; test for the
3109 case where this isn't true by failing if the two operands
3112 if (operand_equal_p (TREE_OPERAND (arg, 0),
3113 TREE_OPERAND (arg, 1), 0))
3117 *cval1 = TREE_OPERAND (arg, 0);
3118 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
3120 else if (*cval2 == 0)
3121 *cval2 = TREE_OPERAND (arg, 0);
3122 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
3127 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
3129 else if (*cval2 == 0)
3130 *cval2 = TREE_OPERAND (arg, 1);
3131 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
3143 /* ARG is a tree that is known to contain just arithmetic operations and
3144 comparisons. Evaluate the operations in the tree substituting NEW0 for
3145 any occurrence of OLD0 as an operand of a comparison and likewise for
3149 eval_subst (tree arg, tree old0, tree new0, tree old1, tree new1)
3151 tree type = TREE_TYPE (arg);
3152 enum tree_code code = TREE_CODE (arg);
3153 enum tree_code_class class = TREE_CODE_CLASS (code);
3155 /* We can handle some of the tcc_expression cases here. */
3156 if (class == tcc_expression && code == TRUTH_NOT_EXPR)
3158 else if (class == tcc_expression
3159 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
3165 return fold_build1 (code, type,
3166 eval_subst (TREE_OPERAND (arg, 0),
3167 old0, new0, old1, new1));
3170 return fold_build2 (code, type,
3171 eval_subst (TREE_OPERAND (arg, 0),
3172 old0, new0, old1, new1),
3173 eval_subst (TREE_OPERAND (arg, 1),
3174 old0, new0, old1, new1));
3176 case tcc_expression:
3180 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
3183 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
3186 return fold_build3 (code, type,
3187 eval_subst (TREE_OPERAND (arg, 0),
3188 old0, new0, old1, new1),
3189 eval_subst (TREE_OPERAND (arg, 1),
3190 old0, new0, old1, new1),
3191 eval_subst (TREE_OPERAND (arg, 2),
3192 old0, new0, old1, new1));
3196 /* Fall through - ??? */
3198 case tcc_comparison:
3200 tree arg0 = TREE_OPERAND (arg, 0);
3201 tree arg1 = TREE_OPERAND (arg, 1);
3203 /* We need to check both for exact equality and tree equality. The
3204 former will be true if the operand has a side-effect. In that
3205 case, we know the operand occurred exactly once. */
3207 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
3209 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
3212 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
3214 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
3217 return fold_build2 (code, type, arg0, arg1);
3225 /* Return a tree for the case when the result of an expression is RESULT
3226 converted to TYPE and OMITTED was previously an operand of the expression
3227 but is now not needed (e.g., we folded OMITTED * 0).
3229 If OMITTED has side effects, we must evaluate it. Otherwise, just do
3230 the conversion of RESULT to TYPE. */
3233 omit_one_operand (tree type, tree result, tree omitted)
3235 tree t = fold_convert (type, result);
3237 if (TREE_SIDE_EFFECTS (omitted))
3238 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
3240 return non_lvalue (t);
3243 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
3246 pedantic_omit_one_operand (tree type, tree result, tree omitted)
3248 tree t = fold_convert (type, result);
3250 if (TREE_SIDE_EFFECTS (omitted))
3251 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
3253 return pedantic_non_lvalue (t);
3256 /* Return a tree for the case when the result of an expression is RESULT
3257 converted to TYPE and OMITTED1 and OMITTED2 were previously operands
3258 of the expression but are now not needed.
3260 If OMITTED1 or OMITTED2 has side effects, they must be evaluated.
3261 If both OMITTED1 and OMITTED2 have side effects, OMITTED1 is
3262 evaluated before OMITTED2. Otherwise, if neither has side effects,
3263 just do the conversion of RESULT to TYPE. */
3266 omit_two_operands (tree type, tree result, tree omitted1, tree omitted2)
3268 tree t = fold_convert (type, result);
3270 if (TREE_SIDE_EFFECTS (omitted2))
3271 t = build2 (COMPOUND_EXPR, type, omitted2, t);
3272 if (TREE_SIDE_EFFECTS (omitted1))
3273 t = build2 (COMPOUND_EXPR, type, omitted1, t);
3275 return TREE_CODE (t) != COMPOUND_EXPR ? non_lvalue (t) : t;
3279 /* Return a simplified tree node for the truth-negation of ARG. This
3280 never alters ARG itself. We assume that ARG is an operation that
3281 returns a truth value (0 or 1).
3283 FIXME: one would think we would fold the result, but it causes
3284 problems with the dominator optimizer. */
3287 fold_truth_not_expr (tree arg)
3289 tree type = TREE_TYPE (arg);
3290 enum tree_code code = TREE_CODE (arg);
3292 /* If this is a comparison, we can simply invert it, except for
3293 floating-point non-equality comparisons, in which case we just
3294 enclose a TRUTH_NOT_EXPR around what we have. */
3296 if (TREE_CODE_CLASS (code) == tcc_comparison)
3298 tree op_type = TREE_TYPE (TREE_OPERAND (arg, 0));
3299 if (FLOAT_TYPE_P (op_type)
3300 && flag_trapping_math
3301 && code != ORDERED_EXPR && code != UNORDERED_EXPR
3302 && code != NE_EXPR && code != EQ_EXPR)
3306 code = invert_tree_comparison (code,
3307 HONOR_NANS (TYPE_MODE (op_type)));
3308 if (code == ERROR_MARK)
3311 return build2 (code, type,
3312 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
3319 return constant_boolean_node (integer_zerop (arg), type);
3321 case TRUTH_AND_EXPR:
3322 return build2 (TRUTH_OR_EXPR, type,
3323 invert_truthvalue (TREE_OPERAND (arg, 0)),
3324 invert_truthvalue (TREE_OPERAND (arg, 1)));
3327 return build2 (TRUTH_AND_EXPR, type,
3328 invert_truthvalue (TREE_OPERAND (arg, 0)),
3329 invert_truthvalue (TREE_OPERAND (arg, 1)));
3331 case TRUTH_XOR_EXPR:
3332 /* Here we can invert either operand. We invert the first operand
3333 unless the second operand is a TRUTH_NOT_EXPR in which case our
3334 result is the XOR of the first operand with the inside of the
3335 negation of the second operand. */
3337 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
3338 return build2 (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
3339 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
3341 return build2 (TRUTH_XOR_EXPR, type,
3342 invert_truthvalue (TREE_OPERAND (arg, 0)),
3343 TREE_OPERAND (arg, 1));
3345 case TRUTH_ANDIF_EXPR:
3346 return build2 (TRUTH_ORIF_EXPR, type,
3347 invert_truthvalue (TREE_OPERAND (arg, 0)),
3348 invert_truthvalue (TREE_OPERAND (arg, 1)));
3350 case TRUTH_ORIF_EXPR:
3351 return build2 (TRUTH_ANDIF_EXPR, type,
3352 invert_truthvalue (TREE_OPERAND (arg, 0)),
3353 invert_truthvalue (TREE_OPERAND (arg, 1)));
3355 case TRUTH_NOT_EXPR:
3356 return TREE_OPERAND (arg, 0);
3360 tree arg1 = TREE_OPERAND (arg, 1);
3361 tree arg2 = TREE_OPERAND (arg, 2);
3362 /* A COND_EXPR may have a throw as one operand, which
3363 then has void type. Just leave void operands
3365 return build3 (COND_EXPR, type, TREE_OPERAND (arg, 0),
3366 VOID_TYPE_P (TREE_TYPE (arg1))
3367 ? arg1 : invert_truthvalue (arg1),
3368 VOID_TYPE_P (TREE_TYPE (arg2))
3369 ? arg2 : invert_truthvalue (arg2));
3373 return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
3374 invert_truthvalue (TREE_OPERAND (arg, 1)));
3376 case NON_LVALUE_EXPR:
3377 return invert_truthvalue (TREE_OPERAND (arg, 0));
3380 if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
3381 return build1 (TRUTH_NOT_EXPR, type, arg);
3385 return build1 (TREE_CODE (arg), type,
3386 invert_truthvalue (TREE_OPERAND (arg, 0)));
3389 if (!integer_onep (TREE_OPERAND (arg, 1)))
3391 return build2 (EQ_EXPR, type, arg,
3392 build_int_cst (type, 0));
3395 return build1 (TRUTH_NOT_EXPR, type, arg);
3397 case CLEANUP_POINT_EXPR:
3398 return build1 (CLEANUP_POINT_EXPR, type,
3399 invert_truthvalue (TREE_OPERAND (arg, 0)));
3408 /* Return a simplified tree node for the truth-negation of ARG. This
3409 never alters ARG itself. We assume that ARG is an operation that
3410 returns a truth value (0 or 1).
3412 FIXME: one would think we would fold the result, but it causes
3413 problems with the dominator optimizer. */
3416 invert_truthvalue (tree arg)
3420 if (TREE_CODE (arg) == ERROR_MARK)
3423 tem = fold_truth_not_expr (arg);
3425 tem = build1 (TRUTH_NOT_EXPR, TREE_TYPE (arg), arg);
3430 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
3431 operands are another bit-wise operation with a common input. If so,
3432 distribute the bit operations to save an operation and possibly two if
3433 constants are involved. For example, convert
3434 (A | B) & (A | C) into A | (B & C)
3435 Further simplification will occur if B and C are constants.
3437 If this optimization cannot be done, 0 will be returned. */
3440 distribute_bit_expr (enum tree_code code, tree type, tree arg0, tree arg1)
3445 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3446 || TREE_CODE (arg0) == code
3447 || (TREE_CODE (arg0) != BIT_AND_EXPR
3448 && TREE_CODE (arg0) != BIT_IOR_EXPR))
3451 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
3453 common = TREE_OPERAND (arg0, 0);
3454 left = TREE_OPERAND (arg0, 1);
3455 right = TREE_OPERAND (arg1, 1);
3457 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
3459 common = TREE_OPERAND (arg0, 0);
3460 left = TREE_OPERAND (arg0, 1);
3461 right = TREE_OPERAND (arg1, 0);
3463 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
3465 common = TREE_OPERAND (arg0, 1);
3466 left = TREE_OPERAND (arg0, 0);
3467 right = TREE_OPERAND (arg1, 1);
3469 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
3471 common = TREE_OPERAND (arg0, 1);
3472 left = TREE_OPERAND (arg0, 0);
3473 right = TREE_OPERAND (arg1, 0);
3478 return fold_build2 (TREE_CODE (arg0), type, common,
3479 fold_build2 (code, type, left, right));
3482 /* Knowing that ARG0 and ARG1 are both RDIV_EXPRs, simplify a binary operation
3483 with code CODE. This optimization is unsafe. */
3485 distribute_real_division (enum tree_code code, tree type, tree arg0, tree arg1)
3487 bool mul0 = TREE_CODE (arg0) == MULT_EXPR;
3488 bool mul1 = TREE_CODE (arg1) == MULT_EXPR;
3490 /* (A / C) +- (B / C) -> (A +- B) / C. */
3492 && operand_equal_p (TREE_OPERAND (arg0, 1),
3493 TREE_OPERAND (arg1, 1), 0))
3494 return fold_build2 (mul0 ? MULT_EXPR : RDIV_EXPR, type,
3495 fold_build2 (code, type,
3496 TREE_OPERAND (arg0, 0),
3497 TREE_OPERAND (arg1, 0)),
3498 TREE_OPERAND (arg0, 1));
3500 /* (A / C1) +- (A / C2) -> A * (1 / C1 +- 1 / C2). */
3501 if (operand_equal_p (TREE_OPERAND (arg0, 0),
3502 TREE_OPERAND (arg1, 0), 0)
3503 && TREE_CODE (TREE_OPERAND (arg0, 1)) == REAL_CST
3504 && TREE_CODE (TREE_OPERAND (arg1, 1)) == REAL_CST)
3506 REAL_VALUE_TYPE r0, r1;
3507 r0 = TREE_REAL_CST (TREE_OPERAND (arg0, 1));
3508 r1 = TREE_REAL_CST (TREE_OPERAND (arg1, 1));
3510 real_arithmetic (&r0, RDIV_EXPR, &dconst1, &r0);
3512 real_arithmetic (&r1, RDIV_EXPR, &dconst1, &r1);
3513 real_arithmetic (&r0, code, &r0, &r1);
3514 return fold_build2 (MULT_EXPR, type,
3515 TREE_OPERAND (arg0, 0),