1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
30 /* The entry points in this file are fold, size_int_wide, size_binop
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'.
42 force_fit_type takes a constant, an overflowable flag and prior
43 overflow indicators. It forces the value to fit the type and sets
44 TREE_OVERFLOW and TREE_CONSTANT_OVERFLOW as appropriate. */
48 #include "coretypes.h"
59 #include "langhooks.h"
62 /* The following constants represent a bit based encoding of GCC's
63 comparison operators. This encoding simplifies transformations
64 on relational comparison operators, such as AND and OR. */
65 enum comparison_code {
84 static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
85 static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
86 static bool negate_mathfn_p (enum built_in_function);
87 static bool negate_expr_p (tree);
88 static tree negate_expr (tree);
89 static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
90 static tree associate_trees (tree, tree, enum tree_code, tree);
91 static tree const_binop (enum tree_code, tree, tree, int);
92 static enum comparison_code comparison_to_compcode (enum tree_code);
93 static enum tree_code compcode_to_comparison (enum comparison_code);
94 static tree combine_comparisons (enum tree_code, enum tree_code,
95 enum tree_code, tree, tree, tree);
96 static int truth_value_p (enum tree_code);
97 static int operand_equal_for_comparison_p (tree, tree, tree);
98 static int twoval_comparison_p (tree, tree *, tree *, int *);
99 static tree eval_subst (tree, tree, tree, tree, tree);
100 static tree pedantic_omit_one_operand (tree, tree, tree);
101 static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
102 static tree make_bit_field_ref (tree, tree, int, int, int);
103 static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree);
104 static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
105 enum machine_mode *, int *, int *,
107 static int all_ones_mask_p (tree, int);
108 static tree sign_bit_p (tree, tree);
109 static int simple_operand_p (tree);
110 static tree range_binop (enum tree_code, tree, tree, int, tree, int);
111 static tree make_range (tree, int *, tree *, tree *);
112 static tree build_range_check (tree, tree, int, tree, tree);
113 static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree,
115 static tree fold_range_test (enum tree_code, tree, tree, tree);
116 static tree fold_cond_expr_with_comparison (tree, tree, tree, tree);
117 static tree unextend (tree, int, int, tree);
118 static tree fold_truthop (enum tree_code, tree, tree, tree);
119 static tree optimize_minmax_comparison (enum tree_code, tree, tree, tree);
120 static tree extract_muldiv (tree, tree, enum tree_code, tree);
121 static tree extract_muldiv_1 (tree, tree, enum tree_code, tree);
122 static int multiple_of_p (tree, tree, tree);
123 static tree fold_binary_op_with_conditional_arg (enum tree_code, tree,
126 static bool fold_real_zero_addition_p (tree, tree, int);
127 static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
129 static tree fold_inf_compare (enum tree_code, tree, tree, tree);
130 static tree fold_div_compare (enum tree_code, tree, tree, tree);
131 static bool reorder_operands_p (tree, tree);
132 static tree fold_negate_const (tree, tree);
133 static tree fold_not_const (tree, tree);
134 static tree fold_relational_const (enum tree_code, tree, tree, tree);
136 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
137 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
138 and SUM1. Then this yields nonzero if overflow occurred during the
141 Overflow occurs if A and B have the same sign, but A and SUM differ in
142 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
144 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
146 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
147 We do that by representing the two-word integer in 4 words, with only
148 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
149 number. The value of the word is LOWPART + HIGHPART * BASE. */
152 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
153 #define HIGHPART(x) \
154 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
155 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
157 /* Unpack a two-word integer into 4 words.
158 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
159 WORDS points to the array of HOST_WIDE_INTs. */
162 encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
164 words[0] = LOWPART (low);
165 words[1] = HIGHPART (low);
166 words[2] = LOWPART (hi);
167 words[3] = HIGHPART (hi);
170 /* Pack an array of 4 words into a two-word integer.
171 WORDS points to the array of words.
172 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
175 decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
178 *low = words[0] + words[1] * BASE;
179 *hi = words[2] + words[3] * BASE;
182 /* T is an INT_CST node. OVERFLOWABLE indicates if we are interested
183 in overflow of the value, when >0 we are only interested in signed
184 overflow, for <0 we are interested in any overflow. OVERFLOWED
185 indicates whether overflow has already occurred. CONST_OVERFLOWED
186 indicates whether constant overflow has already occurred. We force
187 T's value to be within range of T's type (by setting to 0 or 1 all
188 the bits outside the type's range). We set TREE_OVERFLOWED if,
189 OVERFLOWED is nonzero,
190 or OVERFLOWABLE is >0 and signed overflow occurs
191 or OVERFLOWABLE is <0 and any overflow occurs
192 We set TREE_CONSTANT_OVERFLOWED if,
193 CONST_OVERFLOWED is nonzero
194 or we set TREE_OVERFLOWED.
195 We return either the original T, or a copy. */
198 force_fit_type (tree t, int overflowable,
199 bool overflowed, bool overflowed_const)
201 unsigned HOST_WIDE_INT low;
204 int sign_extended_type;
206 gcc_assert (TREE_CODE (t) == INTEGER_CST);
208 low = TREE_INT_CST_LOW (t);
209 high = TREE_INT_CST_HIGH (t);
211 if (POINTER_TYPE_P (TREE_TYPE (t))
212 || TREE_CODE (TREE_TYPE (t)) == OFFSET_TYPE)
215 prec = TYPE_PRECISION (TREE_TYPE (t));
216 /* Size types *are* sign extended. */
217 sign_extended_type = (!TYPE_UNSIGNED (TREE_TYPE (t))
218 || (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
219 && TYPE_IS_SIZETYPE (TREE_TYPE (t))));
221 /* First clear all bits that are beyond the type's precision. */
223 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
225 else if (prec > HOST_BITS_PER_WIDE_INT)
226 high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
230 if (prec < HOST_BITS_PER_WIDE_INT)
231 low &= ~((HOST_WIDE_INT) (-1) << prec);
234 if (!sign_extended_type)
235 /* No sign extension */;
236 else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
237 /* Correct width already. */;
238 else if (prec > HOST_BITS_PER_WIDE_INT)
240 /* Sign extend top half? */
241 if (high & ((unsigned HOST_WIDE_INT)1
242 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
243 high |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
245 else if (prec == HOST_BITS_PER_WIDE_INT)
247 if ((HOST_WIDE_INT)low < 0)
252 /* Sign extend bottom half? */
253 if (low & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
256 low |= (HOST_WIDE_INT)(-1) << prec;
260 /* If the value changed, return a new node. */
261 if (overflowed || overflowed_const
262 || low != TREE_INT_CST_LOW (t) || high != TREE_INT_CST_HIGH (t))
264 t = build_int_cst_wide (TREE_TYPE (t), low, high);
268 || (overflowable > 0 && sign_extended_type))
271 TREE_OVERFLOW (t) = 1;
272 TREE_CONSTANT_OVERFLOW (t) = 1;
274 else if (overflowed_const)
277 TREE_CONSTANT_OVERFLOW (t) = 1;
284 /* Add two doubleword integers with doubleword result.
285 Each argument is given as two `HOST_WIDE_INT' pieces.
286 One argument is L1 and H1; the other, L2 and H2.
287 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
290 add_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
291 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
292 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
294 unsigned HOST_WIDE_INT l;
298 h = h1 + h2 + (l < l1);
302 return OVERFLOW_SUM_SIGN (h1, h2, h);
305 /* Negate a doubleword integer with doubleword result.
306 Return nonzero if the operation overflows, assuming it's signed.
307 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
308 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
311 neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
312 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
318 return (*hv & h1) < 0;
328 /* Multiply two doubleword integers with doubleword result.
329 Return nonzero if the operation overflows, assuming it's signed.
330 Each argument is given as two `HOST_WIDE_INT' pieces.
331 One argument is L1 and H1; the other, L2 and H2.
332 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
335 mul_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
336 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
337 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
339 HOST_WIDE_INT arg1[4];
340 HOST_WIDE_INT arg2[4];
341 HOST_WIDE_INT prod[4 * 2];
342 unsigned HOST_WIDE_INT carry;
344 unsigned HOST_WIDE_INT toplow, neglow;
345 HOST_WIDE_INT tophigh, neghigh;
347 encode (arg1, l1, h1);
348 encode (arg2, l2, h2);
350 memset (prod, 0, sizeof prod);
352 for (i = 0; i < 4; i++)
355 for (j = 0; j < 4; j++)
358 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
359 carry += arg1[i] * arg2[j];
360 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
362 prod[k] = LOWPART (carry);
363 carry = HIGHPART (carry);
368 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
370 /* Check for overflow by calculating the top half of the answer in full;
371 it should agree with the low half's sign bit. */
372 decode (prod + 4, &toplow, &tophigh);
375 neg_double (l2, h2, &neglow, &neghigh);
376 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
380 neg_double (l1, h1, &neglow, &neghigh);
381 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
383 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
386 /* Shift the doubleword integer in L1, H1 left by COUNT places
387 keeping only PREC bits of result.
388 Shift right if COUNT is negative.
389 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
390 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
393 lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
394 HOST_WIDE_INT count, unsigned int prec,
395 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
397 unsigned HOST_WIDE_INT signmask;
401 rshift_double (l1, h1, -count, prec, lv, hv, arith);
405 if (SHIFT_COUNT_TRUNCATED)
408 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
410 /* Shifting by the host word size is undefined according to the
411 ANSI standard, so we must handle this as a special case. */
415 else if (count >= HOST_BITS_PER_WIDE_INT)
417 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
422 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
423 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
427 /* Sign extend all bits that are beyond the precision. */
429 signmask = -((prec > HOST_BITS_PER_WIDE_INT
430 ? ((unsigned HOST_WIDE_INT) *hv
431 >> (prec - HOST_BITS_PER_WIDE_INT - 1))
432 : (*lv >> (prec - 1))) & 1);
434 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
436 else if (prec >= HOST_BITS_PER_WIDE_INT)
438 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
439 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
444 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
445 *lv |= signmask << prec;
449 /* Shift the doubleword integer in L1, H1 right by COUNT places
450 keeping only PREC bits of result. COUNT must be positive.
451 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
452 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
455 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
456 HOST_WIDE_INT count, unsigned int prec,
457 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
460 unsigned HOST_WIDE_INT signmask;
463 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
466 if (SHIFT_COUNT_TRUNCATED)
469 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
471 /* Shifting by the host word size is undefined according to the
472 ANSI standard, so we must handle this as a special case. */
476 else if (count >= HOST_BITS_PER_WIDE_INT)
479 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
483 *hv = (unsigned HOST_WIDE_INT) h1 >> count;
485 | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
488 /* Zero / sign extend all bits that are beyond the precision. */
490 if (count >= (HOST_WIDE_INT)prec)
495 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
497 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
499 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
500 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
505 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
506 *lv |= signmask << (prec - count);
510 /* Rotate the doubleword integer in L1, H1 left by COUNT places
511 keeping only PREC bits of result.
512 Rotate right if COUNT is negative.
513 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
516 lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
517 HOST_WIDE_INT count, unsigned int prec,
518 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
520 unsigned HOST_WIDE_INT s1l, s2l;
521 HOST_WIDE_INT s1h, s2h;
527 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
528 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
533 /* Rotate the doubleword integer in L1, H1 left by COUNT places
534 keeping only PREC bits of result. COUNT must be positive.
535 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
538 rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
539 HOST_WIDE_INT count, unsigned int prec,
540 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
542 unsigned HOST_WIDE_INT s1l, s2l;
543 HOST_WIDE_INT s1h, s2h;
549 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
550 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
555 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
556 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
557 CODE is a tree code for a kind of division, one of
558 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
560 It controls how the quotient is rounded to an integer.
561 Return nonzero if the operation overflows.
562 UNS nonzero says do unsigned division. */
565 div_and_round_double (enum tree_code code, int uns,
566 unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
567 HOST_WIDE_INT hnum_orig,
568 unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
569 HOST_WIDE_INT hden_orig,
570 unsigned HOST_WIDE_INT *lquo,
571 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
575 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
576 HOST_WIDE_INT den[4], quo[4];
578 unsigned HOST_WIDE_INT work;
579 unsigned HOST_WIDE_INT carry = 0;
580 unsigned HOST_WIDE_INT lnum = lnum_orig;
581 HOST_WIDE_INT hnum = hnum_orig;
582 unsigned HOST_WIDE_INT lden = lden_orig;
583 HOST_WIDE_INT hden = hden_orig;
586 if (hden == 0 && lden == 0)
587 overflow = 1, lden = 1;
589 /* Calculate quotient sign and convert operands to unsigned. */
595 /* (minimum integer) / (-1) is the only overflow case. */
596 if (neg_double (lnum, hnum, &lnum, &hnum)
597 && ((HOST_WIDE_INT) lden & hden) == -1)
603 neg_double (lden, hden, &lden, &hden);
607 if (hnum == 0 && hden == 0)
608 { /* single precision */
610 /* This unsigned division rounds toward zero. */
616 { /* trivial case: dividend < divisor */
617 /* hden != 0 already checked. */
624 memset (quo, 0, sizeof quo);
626 memset (num, 0, sizeof num); /* to zero 9th element */
627 memset (den, 0, sizeof den);
629 encode (num, lnum, hnum);
630 encode (den, lden, hden);
632 /* Special code for when the divisor < BASE. */
633 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
635 /* hnum != 0 already checked. */
636 for (i = 4 - 1; i >= 0; i--)
638 work = num[i] + carry * BASE;
639 quo[i] = work / lden;
645 /* Full double precision division,
646 with thanks to Don Knuth's "Seminumerical Algorithms". */
647 int num_hi_sig, den_hi_sig;
648 unsigned HOST_WIDE_INT quo_est, scale;
650 /* Find the highest nonzero divisor digit. */
651 for (i = 4 - 1;; i--)
658 /* Insure that the first digit of the divisor is at least BASE/2.
659 This is required by the quotient digit estimation algorithm. */
661 scale = BASE / (den[den_hi_sig] + 1);
663 { /* scale divisor and dividend */
665 for (i = 0; i <= 4 - 1; i++)
667 work = (num[i] * scale) + carry;
668 num[i] = LOWPART (work);
669 carry = HIGHPART (work);
674 for (i = 0; i <= 4 - 1; i++)
676 work = (den[i] * scale) + carry;
677 den[i] = LOWPART (work);
678 carry = HIGHPART (work);
679 if (den[i] != 0) den_hi_sig = i;
686 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
688 /* Guess the next quotient digit, quo_est, by dividing the first
689 two remaining dividend digits by the high order quotient digit.
690 quo_est is never low and is at most 2 high. */
691 unsigned HOST_WIDE_INT tmp;
693 num_hi_sig = i + den_hi_sig + 1;
694 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
695 if (num[num_hi_sig] != den[den_hi_sig])
696 quo_est = work / den[den_hi_sig];
700 /* Refine quo_est so it's usually correct, and at most one high. */
701 tmp = work - quo_est * den[den_hi_sig];
703 && (den[den_hi_sig - 1] * quo_est
704 > (tmp * BASE + num[num_hi_sig - 2])))
707 /* Try QUO_EST as the quotient digit, by multiplying the
708 divisor by QUO_EST and subtracting from the remaining dividend.
709 Keep in mind that QUO_EST is the I - 1st digit. */
712 for (j = 0; j <= den_hi_sig; j++)
714 work = quo_est * den[j] + carry;
715 carry = HIGHPART (work);
716 work = num[i + j] - LOWPART (work);
717 num[i + j] = LOWPART (work);
718 carry += HIGHPART (work) != 0;
721 /* If quo_est was high by one, then num[i] went negative and
722 we need to correct things. */
723 if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
726 carry = 0; /* add divisor back in */
727 for (j = 0; j <= den_hi_sig; j++)
729 work = num[i + j] + den[j] + carry;
730 carry = HIGHPART (work);
731 num[i + j] = LOWPART (work);
734 num [num_hi_sig] += carry;
737 /* Store the quotient digit. */
742 decode (quo, lquo, hquo);
745 /* If result is negative, make it so. */
747 neg_double (*lquo, *hquo, lquo, hquo);
749 /* Compute trial remainder: rem = num - (quo * den) */
750 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
751 neg_double (*lrem, *hrem, lrem, hrem);
752 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
757 case TRUNC_MOD_EXPR: /* round toward zero */
758 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
762 case FLOOR_MOD_EXPR: /* round toward negative infinity */
763 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
766 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
774 case CEIL_MOD_EXPR: /* round toward positive infinity */
775 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
777 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
785 case ROUND_MOD_EXPR: /* round to closest integer */
787 unsigned HOST_WIDE_INT labs_rem = *lrem;
788 HOST_WIDE_INT habs_rem = *hrem;
789 unsigned HOST_WIDE_INT labs_den = lden, ltwice;
790 HOST_WIDE_INT habs_den = hden, htwice;
792 /* Get absolute values. */
794 neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
796 neg_double (lden, hden, &labs_den, &habs_den);
798 /* If (2 * abs (lrem) >= abs (lden)) */
799 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
800 labs_rem, habs_rem, <wice, &htwice);
802 if (((unsigned HOST_WIDE_INT) habs_den
803 < (unsigned HOST_WIDE_INT) htwice)
804 || (((unsigned HOST_WIDE_INT) habs_den
805 == (unsigned HOST_WIDE_INT) htwice)
806 && (labs_den < ltwice)))
810 add_double (*lquo, *hquo,
811 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
814 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
826 /* Compute true remainder: rem = num - (quo * den) */
827 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
828 neg_double (*lrem, *hrem, lrem, hrem);
829 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
833 /* If ARG2 divides ARG1 with zero remainder, carries out the division
834 of type CODE and returns the quotient.
835 Otherwise returns NULL_TREE. */
838 div_if_zero_remainder (enum tree_code code, tree arg1, tree arg2)
840 unsigned HOST_WIDE_INT int1l, int2l;
841 HOST_WIDE_INT int1h, int2h;
842 unsigned HOST_WIDE_INT quol, reml;
843 HOST_WIDE_INT quoh, remh;
844 tree type = TREE_TYPE (arg1);
845 int uns = TYPE_UNSIGNED (type);
847 int1l = TREE_INT_CST_LOW (arg1);
848 int1h = TREE_INT_CST_HIGH (arg1);
849 int2l = TREE_INT_CST_LOW (arg2);
850 int2h = TREE_INT_CST_HIGH (arg2);
852 div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
853 &quol, &quoh, &reml, &remh);
854 if (remh != 0 || reml != 0)
857 return build_int_cst_wide (type, quol, quoh);
860 /* Return true if the built-in mathematical function specified by CODE
861 is odd, i.e. -f(x) == f(-x). */
864 negate_mathfn_p (enum built_in_function code)
868 CASE_FLT_FN (BUILT_IN_ASIN):
869 CASE_FLT_FN (BUILT_IN_ASINH):
870 CASE_FLT_FN (BUILT_IN_ATAN):
871 CASE_FLT_FN (BUILT_IN_ATANH):
872 CASE_FLT_FN (BUILT_IN_CBRT):
873 CASE_FLT_FN (BUILT_IN_SIN):
874 CASE_FLT_FN (BUILT_IN_SINH):
875 CASE_FLT_FN (BUILT_IN_TAN):
876 CASE_FLT_FN (BUILT_IN_TANH):
885 /* Check whether we may negate an integer constant T without causing
889 may_negate_without_overflow_p (tree t)
891 unsigned HOST_WIDE_INT val;
895 gcc_assert (TREE_CODE (t) == INTEGER_CST);
897 type = TREE_TYPE (t);
898 if (TYPE_UNSIGNED (type))
901 prec = TYPE_PRECISION (type);
902 if (prec > HOST_BITS_PER_WIDE_INT)
904 if (TREE_INT_CST_LOW (t) != 0)
906 prec -= HOST_BITS_PER_WIDE_INT;
907 val = TREE_INT_CST_HIGH (t);
910 val = TREE_INT_CST_LOW (t);
911 if (prec < HOST_BITS_PER_WIDE_INT)
912 val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
913 return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
916 /* Determine whether an expression T can be cheaply negated using
917 the function negate_expr. */
920 negate_expr_p (tree t)
927 type = TREE_TYPE (t);
930 switch (TREE_CODE (t))
933 if (TYPE_UNSIGNED (type) || ! flag_trapv)
936 /* Check that -CST will not overflow type. */
937 return may_negate_without_overflow_p (t);
939 return INTEGRAL_TYPE_P (type);
946 return negate_expr_p (TREE_REALPART (t))
947 && negate_expr_p (TREE_IMAGPART (t));
950 if (FLOAT_TYPE_P (type) && !flag_unsafe_math_optimizations)
952 /* -(A + B) -> (-B) - A. */
953 if (negate_expr_p (TREE_OPERAND (t, 1))
954 && reorder_operands_p (TREE_OPERAND (t, 0),
955 TREE_OPERAND (t, 1)))
957 /* -(A + B) -> (-A) - B. */
958 return negate_expr_p (TREE_OPERAND (t, 0));
961 /* We can't turn -(A-B) into B-A when we honor signed zeros. */
962 return (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
963 && reorder_operands_p (TREE_OPERAND (t, 0),
964 TREE_OPERAND (t, 1));
967 if (TYPE_UNSIGNED (TREE_TYPE (t)))
973 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
974 return negate_expr_p (TREE_OPERAND (t, 1))
975 || negate_expr_p (TREE_OPERAND (t, 0));
983 if (TYPE_UNSIGNED (TREE_TYPE (t)) || flag_wrapv)
985 return negate_expr_p (TREE_OPERAND (t, 1))
986 || negate_expr_p (TREE_OPERAND (t, 0));
989 /* Negate -((double)float) as (double)(-float). */
990 if (TREE_CODE (type) == REAL_TYPE)
992 tree tem = strip_float_extensions (t);
994 return negate_expr_p (tem);
999 /* Negate -f(x) as f(-x). */
1000 if (negate_mathfn_p (builtin_mathfn_code (t)))
1001 return negate_expr_p (TREE_VALUE (TREE_OPERAND (t, 1)));
1005 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1006 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1008 tree op1 = TREE_OPERAND (t, 1);
1009 if (TREE_INT_CST_HIGH (op1) == 0
1010 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1011 == TREE_INT_CST_LOW (op1))
1022 /* Given T, an expression, return the negation of T. Allow for T to be
1023 null, in which case return null. */
1026 negate_expr (tree t)
1034 type = TREE_TYPE (t);
1035 STRIP_SIGN_NOPS (t);
1037 switch (TREE_CODE (t))
1039 /* Convert - (~A) to A + 1. */
1041 if (INTEGRAL_TYPE_P (type))
1042 return fold_build2 (PLUS_EXPR, type, TREE_OPERAND (t, 0),
1043 build_int_cst (type, 1));
1047 tem = fold_negate_const (t, type);
1048 if (! TREE_OVERFLOW (tem)
1049 || TYPE_UNSIGNED (type)
1055 tem = fold_negate_const (t, type);
1056 /* Two's complement FP formats, such as c4x, may overflow. */
1057 if (! TREE_OVERFLOW (tem) || ! flag_trapping_math)
1058 return fold_convert (type, tem);
1063 tree rpart = negate_expr (TREE_REALPART (t));
1064 tree ipart = negate_expr (TREE_IMAGPART (t));
1066 if ((TREE_CODE (rpart) == REAL_CST
1067 && TREE_CODE (ipart) == REAL_CST)
1068 || (TREE_CODE (rpart) == INTEGER_CST
1069 && TREE_CODE (ipart) == INTEGER_CST))
1070 return build_complex (type, rpart, ipart);
1075 return fold_convert (type, TREE_OPERAND (t, 0));
1078 if (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
1080 /* -(A + B) -> (-B) - A. */
1081 if (negate_expr_p (TREE_OPERAND (t, 1))
1082 && reorder_operands_p (TREE_OPERAND (t, 0),
1083 TREE_OPERAND (t, 1)))
1085 tem = negate_expr (TREE_OPERAND (t, 1));
1086 tem = fold_build2 (MINUS_EXPR, TREE_TYPE (t),
1087 tem, TREE_OPERAND (t, 0));
1088 return fold_convert (type, tem);
1091 /* -(A + B) -> (-A) - B. */
1092 if (negate_expr_p (TREE_OPERAND (t, 0)))
1094 tem = negate_expr (TREE_OPERAND (t, 0));
1095 tem = fold_build2 (MINUS_EXPR, TREE_TYPE (t),
1096 tem, TREE_OPERAND (t, 1));
1097 return fold_convert (type, tem);
1103 /* - (A - B) -> B - A */
1104 if ((! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
1105 && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)))
1106 return fold_convert (type,
1107 fold_build2 (MINUS_EXPR, TREE_TYPE (t),
1108 TREE_OPERAND (t, 1),
1109 TREE_OPERAND (t, 0)));
1113 if (TYPE_UNSIGNED (TREE_TYPE (t)))
1119 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
1121 tem = TREE_OPERAND (t, 1);
1122 if (negate_expr_p (tem))
1123 return fold_convert (type,
1124 fold_build2 (TREE_CODE (t), TREE_TYPE (t),
1125 TREE_OPERAND (t, 0),
1126 negate_expr (tem)));
1127 tem = TREE_OPERAND (t, 0);
1128 if (negate_expr_p (tem))
1129 return fold_convert (type,
1130 fold_build2 (TREE_CODE (t), TREE_TYPE (t),
1132 TREE_OPERAND (t, 1)));
1136 case TRUNC_DIV_EXPR:
1137 case ROUND_DIV_EXPR:
1138 case FLOOR_DIV_EXPR:
1140 case EXACT_DIV_EXPR:
1141 if (!TYPE_UNSIGNED (TREE_TYPE (t)) && !flag_wrapv)
1143 tem = TREE_OPERAND (t, 1);
1144 if (negate_expr_p (tem))
1145 return fold_convert (type,
1146 fold_build2 (TREE_CODE (t), TREE_TYPE (t),
1147 TREE_OPERAND (t, 0),
1148 negate_expr (tem)));
1149 tem = TREE_OPERAND (t, 0);
1150 if (negate_expr_p (tem))
1151 return fold_convert (type,
1152 fold_build2 (TREE_CODE (t), TREE_TYPE (t),
1154 TREE_OPERAND (t, 1)));
1159 /* Convert -((double)float) into (double)(-float). */
1160 if (TREE_CODE (type) == REAL_TYPE)
1162 tem = strip_float_extensions (t);
1163 if (tem != t && negate_expr_p (tem))
1164 return fold_convert (type, negate_expr (tem));
1169 /* Negate -f(x) as f(-x). */
1170 if (negate_mathfn_p (builtin_mathfn_code (t))
1171 && negate_expr_p (TREE_VALUE (TREE_OPERAND (t, 1))))
1173 tree fndecl, arg, arglist;
1175 fndecl = get_callee_fndecl (t);
1176 arg = negate_expr (TREE_VALUE (TREE_OPERAND (t, 1)));
1177 arglist = build_tree_list (NULL_TREE, arg);
1178 return build_function_call_expr (fndecl, arglist);
1183 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1184 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1186 tree op1 = TREE_OPERAND (t, 1);
1187 if (TREE_INT_CST_HIGH (op1) == 0
1188 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1189 == TREE_INT_CST_LOW (op1))
1191 tree ntype = TYPE_UNSIGNED (type)
1192 ? lang_hooks.types.signed_type (type)
1193 : lang_hooks.types.unsigned_type (type);
1194 tree temp = fold_convert (ntype, TREE_OPERAND (t, 0));
1195 temp = fold_build2 (RSHIFT_EXPR, ntype, temp, op1);
1196 return fold_convert (type, temp);
1205 tem = fold_build1 (NEGATE_EXPR, TREE_TYPE (t), t);
1206 return fold_convert (type, tem);
1209 /* Split a tree IN into a constant, literal and variable parts that could be
1210 combined with CODE to make IN. "constant" means an expression with
1211 TREE_CONSTANT but that isn't an actual constant. CODE must be a
1212 commutative arithmetic operation. Store the constant part into *CONP,
1213 the literal in *LITP and return the variable part. If a part isn't
1214 present, set it to null. If the tree does not decompose in this way,
1215 return the entire tree as the variable part and the other parts as null.
1217 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
1218 case, we negate an operand that was subtracted. Except if it is a
1219 literal for which we use *MINUS_LITP instead.
1221 If NEGATE_P is true, we are negating all of IN, again except a literal
1222 for which we use *MINUS_LITP instead.
1224 If IN is itself a literal or constant, return it as appropriate.
1226 Note that we do not guarantee that any of the three values will be the
1227 same type as IN, but they will have the same signedness and mode. */
1230 split_tree (tree in, enum tree_code code, tree *conp, tree *litp,
1231 tree *minus_litp, int negate_p)
1239 /* Strip any conversions that don't change the machine mode or signedness. */
1240 STRIP_SIGN_NOPS (in);
1242 if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST)
1244 else if (TREE_CODE (in) == code
1245 || (! FLOAT_TYPE_P (TREE_TYPE (in))
1246 /* We can associate addition and subtraction together (even
1247 though the C standard doesn't say so) for integers because
1248 the value is not affected. For reals, the value might be
1249 affected, so we can't. */
1250 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1251 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1253 tree op0 = TREE_OPERAND (in, 0);
1254 tree op1 = TREE_OPERAND (in, 1);
1255 int neg1_p = TREE_CODE (in) == MINUS_EXPR;
1256 int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
1258 /* First see if either of the operands is a literal, then a constant. */
1259 if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST)
1260 *litp = op0, op0 = 0;
1261 else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST)
1262 *litp = op1, neg_litp_p = neg1_p, op1 = 0;
1264 if (op0 != 0 && TREE_CONSTANT (op0))
1265 *conp = op0, op0 = 0;
1266 else if (op1 != 0 && TREE_CONSTANT (op1))
1267 *conp = op1, neg_conp_p = neg1_p, op1 = 0;
1269 /* If we haven't dealt with either operand, this is not a case we can
1270 decompose. Otherwise, VAR is either of the ones remaining, if any. */
1271 if (op0 != 0 && op1 != 0)
1276 var = op1, neg_var_p = neg1_p;
1278 /* Now do any needed negations. */
1280 *minus_litp = *litp, *litp = 0;
1282 *conp = negate_expr (*conp);
1284 var = negate_expr (var);
1286 else if (TREE_CONSTANT (in))
1294 *minus_litp = *litp, *litp = 0;
1295 else if (*minus_litp)
1296 *litp = *minus_litp, *minus_litp = 0;
1297 *conp = negate_expr (*conp);
1298 var = negate_expr (var);
1304 /* Re-associate trees split by the above function. T1 and T2 are either
1305 expressions to associate or null. Return the new expression, if any. If
1306 we build an operation, do it in TYPE and with CODE. */
1309 associate_trees (tree t1, tree t2, enum tree_code code, tree type)
1316 /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
1317 try to fold this since we will have infinite recursion. But do
1318 deal with any NEGATE_EXPRs. */
1319 if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
1320 || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
1322 if (code == PLUS_EXPR)
1324 if (TREE_CODE (t1) == NEGATE_EXPR)
1325 return build2 (MINUS_EXPR, type, fold_convert (type, t2),
1326 fold_convert (type, TREE_OPERAND (t1, 0)));
1327 else if (TREE_CODE (t2) == NEGATE_EXPR)
1328 return build2 (MINUS_EXPR, type, fold_convert (type, t1),
1329 fold_convert (type, TREE_OPERAND (t2, 0)));
1330 else if (integer_zerop (t2))
1331 return fold_convert (type, t1);
1333 else if (code == MINUS_EXPR)
1335 if (integer_zerop (t2))
1336 return fold_convert (type, t1);
1339 return build2 (code, type, fold_convert (type, t1),
1340 fold_convert (type, t2));
1343 return fold_build2 (code, type, fold_convert (type, t1),
1344 fold_convert (type, t2));
1347 /* Combine two integer constants ARG1 and ARG2 under operation CODE
1348 to produce a new constant. Return NULL_TREE if we don't know how
1349 to evaluate CODE at compile-time.
1351 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1354 int_const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1356 unsigned HOST_WIDE_INT int1l, int2l;
1357 HOST_WIDE_INT int1h, int2h;
1358 unsigned HOST_WIDE_INT low;
1360 unsigned HOST_WIDE_INT garbagel;
1361 HOST_WIDE_INT garbageh;
1363 tree type = TREE_TYPE (arg1);
1364 int uns = TYPE_UNSIGNED (type);
1366 = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
1369 int1l = TREE_INT_CST_LOW (arg1);
1370 int1h = TREE_INT_CST_HIGH (arg1);
1371 int2l = TREE_INT_CST_LOW (arg2);
1372 int2h = TREE_INT_CST_HIGH (arg2);
1377 low = int1l | int2l, hi = int1h | int2h;
1381 low = int1l ^ int2l, hi = int1h ^ int2h;
1385 low = int1l & int2l, hi = int1h & int2h;
1391 /* It's unclear from the C standard whether shifts can overflow.
1392 The following code ignores overflow; perhaps a C standard
1393 interpretation ruling is needed. */
1394 lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1401 lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1406 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1410 neg_double (int2l, int2h, &low, &hi);
1411 add_double (int1l, int1h, low, hi, &low, &hi);
1412 overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
1416 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1419 case TRUNC_DIV_EXPR:
1420 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1421 case EXACT_DIV_EXPR:
1422 /* This is a shortcut for a common special case. */
1423 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1424 && ! TREE_CONSTANT_OVERFLOW (arg1)
1425 && ! TREE_CONSTANT_OVERFLOW (arg2)
1426 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1428 if (code == CEIL_DIV_EXPR)
1431 low = int1l / int2l, hi = 0;
1435 /* ... fall through ... */
1437 case ROUND_DIV_EXPR:
1438 if (int2h == 0 && int2l == 0)
1440 if (int2h == 0 && int2l == 1)
1442 low = int1l, hi = int1h;
1445 if (int1l == int2l && int1h == int2h
1446 && ! (int1l == 0 && int1h == 0))
1451 overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
1452 &low, &hi, &garbagel, &garbageh);
1455 case TRUNC_MOD_EXPR:
1456 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1457 /* This is a shortcut for a common special case. */
1458 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1459 && ! TREE_CONSTANT_OVERFLOW (arg1)
1460 && ! TREE_CONSTANT_OVERFLOW (arg2)
1461 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1463 if (code == CEIL_MOD_EXPR)
1465 low = int1l % int2l, hi = 0;
1469 /* ... fall through ... */
1471 case ROUND_MOD_EXPR:
1472 if (int2h == 0 && int2l == 0)
1474 overflow = div_and_round_double (code, uns,
1475 int1l, int1h, int2l, int2h,
1476 &garbagel, &garbageh, &low, &hi);
1482 low = (((unsigned HOST_WIDE_INT) int1h
1483 < (unsigned HOST_WIDE_INT) int2h)
1484 || (((unsigned HOST_WIDE_INT) int1h
1485 == (unsigned HOST_WIDE_INT) int2h)
1488 low = (int1h < int2h
1489 || (int1h == int2h && int1l < int2l));
1491 if (low == (code == MIN_EXPR))
1492 low = int1l, hi = int1h;
1494 low = int2l, hi = int2h;
1501 t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
1505 /* Propagate overflow flags ourselves. */
1506 if (((!uns || is_sizetype) && overflow)
1507 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1510 TREE_OVERFLOW (t) = 1;
1511 TREE_CONSTANT_OVERFLOW (t) = 1;
1513 else if (TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2))
1516 TREE_CONSTANT_OVERFLOW (t) = 1;
1520 t = force_fit_type (t, 1,
1521 ((!uns || is_sizetype) && overflow)
1522 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2),
1523 TREE_CONSTANT_OVERFLOW (arg1)
1524 | TREE_CONSTANT_OVERFLOW (arg2));
1529 /* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
1530 constant. We assume ARG1 and ARG2 have the same data type, or at least
1531 are the same kind of constant and the same machine mode.
1533 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1536 const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1541 if (TREE_CODE (arg1) == INTEGER_CST)
1542 return int_const_binop (code, arg1, arg2, notrunc);
1544 if (TREE_CODE (arg1) == REAL_CST)
1546 enum machine_mode mode;
1549 REAL_VALUE_TYPE value;
1550 REAL_VALUE_TYPE result;
1554 /* The following codes are handled by real_arithmetic. */
1569 d1 = TREE_REAL_CST (arg1);
1570 d2 = TREE_REAL_CST (arg2);
1572 type = TREE_TYPE (arg1);
1573 mode = TYPE_MODE (type);
1575 /* Don't perform operation if we honor signaling NaNs and
1576 either operand is a NaN. */
1577 if (HONOR_SNANS (mode)
1578 && (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2)))
1581 /* Don't perform operation if it would raise a division
1582 by zero exception. */
1583 if (code == RDIV_EXPR
1584 && REAL_VALUES_EQUAL (d2, dconst0)
1585 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1588 /* If either operand is a NaN, just return it. Otherwise, set up
1589 for floating-point trap; we return an overflow. */
1590 if (REAL_VALUE_ISNAN (d1))
1592 else if (REAL_VALUE_ISNAN (d2))
1595 inexact = real_arithmetic (&value, code, &d1, &d2);
1596 real_convert (&result, mode, &value);
1598 /* Don't constant fold this floating point operation if
1599 the result has overflowed and flag_trapping_math. */
1601 if (flag_trapping_math
1602 && MODE_HAS_INFINITIES (mode)
1603 && REAL_VALUE_ISINF (result)
1604 && !REAL_VALUE_ISINF (d1)
1605 && !REAL_VALUE_ISINF (d2))
1608 /* Don't constant fold this floating point operation if the
1609 result may dependent upon the run-time rounding mode and
1610 flag_rounding_math is set, or if GCC's software emulation
1611 is unable to accurately represent the result. */
1613 if ((flag_rounding_math
1614 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
1615 && !flag_unsafe_math_optimizations))
1616 && (inexact || !real_identical (&result, &value)))
1619 t = build_real (type, result);
1621 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1622 TREE_CONSTANT_OVERFLOW (t)
1624 | TREE_CONSTANT_OVERFLOW (arg1)
1625 | TREE_CONSTANT_OVERFLOW (arg2);
1629 if (TREE_CODE (arg1) == COMPLEX_CST)
1631 tree type = TREE_TYPE (arg1);
1632 tree r1 = TREE_REALPART (arg1);
1633 tree i1 = TREE_IMAGPART (arg1);
1634 tree r2 = TREE_REALPART (arg2);
1635 tree i2 = TREE_IMAGPART (arg2);
1641 t = build_complex (type,
1642 const_binop (PLUS_EXPR, r1, r2, notrunc),
1643 const_binop (PLUS_EXPR, i1, i2, notrunc));
1647 t = build_complex (type,
1648 const_binop (MINUS_EXPR, r1, r2, notrunc),
1649 const_binop (MINUS_EXPR, i1, i2, notrunc));
1653 t = build_complex (type,
1654 const_binop (MINUS_EXPR,
1655 const_binop (MULT_EXPR,
1657 const_binop (MULT_EXPR,
1660 const_binop (PLUS_EXPR,
1661 const_binop (MULT_EXPR,
1663 const_binop (MULT_EXPR,
1670 tree t1, t2, real, imag;
1672 = const_binop (PLUS_EXPR,
1673 const_binop (MULT_EXPR, r2, r2, notrunc),
1674 const_binop (MULT_EXPR, i2, i2, notrunc),
1677 t1 = const_binop (PLUS_EXPR,
1678 const_binop (MULT_EXPR, r1, r2, notrunc),
1679 const_binop (MULT_EXPR, i1, i2, notrunc),
1681 t2 = const_binop (MINUS_EXPR,
1682 const_binop (MULT_EXPR, i1, r2, notrunc),
1683 const_binop (MULT_EXPR, r1, i2, notrunc),
1686 if (INTEGRAL_TYPE_P (TREE_TYPE (r1)))
1688 real = const_binop (TRUNC_DIV_EXPR, t1, magsquared, notrunc);
1689 imag = const_binop (TRUNC_DIV_EXPR, t2, magsquared, notrunc);
1693 real = const_binop (RDIV_EXPR, t1, magsquared, notrunc);
1694 imag = const_binop (RDIV_EXPR, t2, magsquared, notrunc);
1699 t = build_complex (type, real, imag);
1711 /* Create a size type INT_CST node with NUMBER sign extended. KIND
1712 indicates which particular sizetype to create. */
1715 size_int_kind (HOST_WIDE_INT number, enum size_type_kind kind)
1717 return build_int_cst (sizetype_tab[(int) kind], number);
1720 /* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
1721 is a tree code. The type of the result is taken from the operands.
1722 Both must be the same type integer type and it must be a size type.
1723 If the operands are constant, so is the result. */
1726 size_binop (enum tree_code code, tree arg0, tree arg1)
1728 tree type = TREE_TYPE (arg0);
1730 gcc_assert (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
1731 && type == TREE_TYPE (arg1));
1733 /* Handle the special case of two integer constants faster. */
1734 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1736 /* And some specific cases even faster than that. */
1737 if (code == PLUS_EXPR && integer_zerop (arg0))
1739 else if ((code == MINUS_EXPR || code == PLUS_EXPR)
1740 && integer_zerop (arg1))
1742 else if (code == MULT_EXPR && integer_onep (arg0))
1745 /* Handle general case of two integer constants. */
1746 return int_const_binop (code, arg0, arg1, 0);
1749 if (arg0 == error_mark_node || arg1 == error_mark_node)
1750 return error_mark_node;
1752 return fold_build2 (code, type, arg0, arg1);
1755 /* Given two values, either both of sizetype or both of bitsizetype,
1756 compute the difference between the two values. Return the value
1757 in signed type corresponding to the type of the operands. */
1760 size_diffop (tree arg0, tree arg1)
1762 tree type = TREE_TYPE (arg0);
1765 gcc_assert (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
1766 && type == TREE_TYPE (arg1));
1768 /* If the type is already signed, just do the simple thing. */
1769 if (!TYPE_UNSIGNED (type))
1770 return size_binop (MINUS_EXPR, arg0, arg1);
1772 ctype = type == bitsizetype ? sbitsizetype : ssizetype;
1774 /* If either operand is not a constant, do the conversions to the signed
1775 type and subtract. The hardware will do the right thing with any
1776 overflow in the subtraction. */
1777 if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST)
1778 return size_binop (MINUS_EXPR, fold_convert (ctype, arg0),
1779 fold_convert (ctype, arg1));
1781 /* If ARG0 is larger than ARG1, subtract and return the result in CTYPE.
1782 Otherwise, subtract the other way, convert to CTYPE (we know that can't
1783 overflow) and negate (which can't either). Special-case a result
1784 of zero while we're here. */
1785 if (tree_int_cst_equal (arg0, arg1))
1786 return build_int_cst (ctype, 0);
1787 else if (tree_int_cst_lt (arg1, arg0))
1788 return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1));
1790 return size_binop (MINUS_EXPR, build_int_cst (ctype, 0),
1791 fold_convert (ctype, size_binop (MINUS_EXPR,
1795 /* A subroutine of fold_convert_const handling conversions of an
1796 INTEGER_CST to another integer type. */
1799 fold_convert_const_int_from_int (tree type, tree arg1)
1803 /* Given an integer constant, make new constant with new type,
1804 appropriately sign-extended or truncated. */
1805 t = build_int_cst_wide (type, TREE_INT_CST_LOW (arg1),
1806 TREE_INT_CST_HIGH (arg1));
1808 t = force_fit_type (t,
1809 /* Don't set the overflow when
1810 converting a pointer */
1811 !POINTER_TYPE_P (TREE_TYPE (arg1)),
1812 (TREE_INT_CST_HIGH (arg1) < 0
1813 && (TYPE_UNSIGNED (type)
1814 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
1815 | TREE_OVERFLOW (arg1),
1816 TREE_CONSTANT_OVERFLOW (arg1));
1821 /* A subroutine of fold_convert_const handling conversions a REAL_CST
1822 to an integer type. */
1825 fold_convert_const_int_from_real (enum tree_code code, tree type, tree arg1)
1830 /* The following code implements the floating point to integer
1831 conversion rules required by the Java Language Specification,
1832 that IEEE NaNs are mapped to zero and values that overflow
1833 the target precision saturate, i.e. values greater than
1834 INT_MAX are mapped to INT_MAX, and values less than INT_MIN
1835 are mapped to INT_MIN. These semantics are allowed by the
1836 C and C++ standards that simply state that the behavior of
1837 FP-to-integer conversion is unspecified upon overflow. */
1839 HOST_WIDE_INT high, low;
1841 REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
1845 case FIX_TRUNC_EXPR:
1846 real_trunc (&r, VOIDmode, &x);
1850 real_ceil (&r, VOIDmode, &x);
1853 case FIX_FLOOR_EXPR:
1854 real_floor (&r, VOIDmode, &x);
1857 case FIX_ROUND_EXPR:
1858 real_round (&r, VOIDmode, &x);
1865 /* If R is NaN, return zero and show we have an overflow. */
1866 if (REAL_VALUE_ISNAN (r))
1873 /* See if R is less than the lower bound or greater than the
1878 tree lt = TYPE_MIN_VALUE (type);
1879 REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
1880 if (REAL_VALUES_LESS (r, l))
1883 high = TREE_INT_CST_HIGH (lt);
1884 low = TREE_INT_CST_LOW (lt);
1890 tree ut = TYPE_MAX_VALUE (type);
1893 REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
1894 if (REAL_VALUES_LESS (u, r))
1897 high = TREE_INT_CST_HIGH (ut);
1898 low = TREE_INT_CST_LOW (ut);
1904 REAL_VALUE_TO_INT (&low, &high, r);
1906 t = build_int_cst_wide (type, low, high);
1908 t = force_fit_type (t, -1, overflow | TREE_OVERFLOW (arg1),
1909 TREE_CONSTANT_OVERFLOW (arg1));
1913 /* A subroutine of fold_convert_const handling conversions a REAL_CST
1914 to another floating point type. */
1917 fold_convert_const_real_from_real (tree type, tree arg1)
1919 REAL_VALUE_TYPE value;
1922 real_convert (&value, TYPE_MODE (type), &TREE_REAL_CST (arg1));
1923 t = build_real (type, value);
1925 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
1926 TREE_CONSTANT_OVERFLOW (t)
1927 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1931 /* Attempt to fold type conversion operation CODE of expression ARG1 to
1932 type TYPE. If no simplification can be done return NULL_TREE. */
1935 fold_convert_const (enum tree_code code, tree type, tree arg1)
1937 if (TREE_TYPE (arg1) == type)
1940 if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
1942 if (TREE_CODE (arg1) == INTEGER_CST)
1943 return fold_convert_const_int_from_int (type, arg1);
1944 else if (TREE_CODE (arg1) == REAL_CST)
1945 return fold_convert_const_int_from_real (code, type, arg1);
1947 else if (TREE_CODE (type) == REAL_TYPE)
1949 if (TREE_CODE (arg1) == INTEGER_CST)
1950 return build_real_from_int_cst (type, arg1);
1951 if (TREE_CODE (arg1) == REAL_CST)
1952 return fold_convert_const_real_from_real (type, arg1);
1957 /* Construct a vector of zero elements of vector type TYPE. */
1960 build_zero_vector (tree type)
1965 elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
1966 units = TYPE_VECTOR_SUBPARTS (type);
1969 for (i = 0; i < units; i++)
1970 list = tree_cons (NULL_TREE, elem, list);
1971 return build_vector (type, list);
1974 /* Convert expression ARG to type TYPE. Used by the middle-end for
1975 simple conversions in preference to calling the front-end's convert. */
1978 fold_convert (tree type, tree arg)
1980 tree orig = TREE_TYPE (arg);
1986 if (TREE_CODE (arg) == ERROR_MARK
1987 || TREE_CODE (type) == ERROR_MARK
1988 || TREE_CODE (orig) == ERROR_MARK)
1989 return error_mark_node;
1991 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig)
1992 || lang_hooks.types_compatible_p (TYPE_MAIN_VARIANT (type),
1993 TYPE_MAIN_VARIANT (orig)))
1994 return fold_build1 (NOP_EXPR, type, arg);
1996 switch (TREE_CODE (type))
1998 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
1999 case POINTER_TYPE: case REFERENCE_TYPE:
2001 if (TREE_CODE (arg) == INTEGER_CST)
2003 tem = fold_convert_const (NOP_EXPR, type, arg);
2004 if (tem != NULL_TREE)
2007 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2008 || TREE_CODE (orig) == OFFSET_TYPE)
2009 return fold_build1 (NOP_EXPR, type, arg);
2010 if (TREE_CODE (orig) == COMPLEX_TYPE)
2012 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2013 return fold_convert (type, tem);
2015 gcc_assert (TREE_CODE (orig) == VECTOR_TYPE
2016 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2017 return fold_build1 (NOP_EXPR, type, arg);
2020 if (TREE_CODE (arg) == INTEGER_CST)
2022 tem = fold_convert_const (FLOAT_EXPR, type, arg);
2023 if (tem != NULL_TREE)
2026 else if (TREE_CODE (arg) == REAL_CST)
2028 tem = fold_convert_const (NOP_EXPR, type, arg);
2029 if (tem != NULL_TREE)
2033 switch (TREE_CODE (orig))
2036 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2037 case POINTER_TYPE: case REFERENCE_TYPE:
2038 return fold_build1 (FLOAT_EXPR, type, arg);
2041 return fold_build1 (NOP_EXPR, type, arg);
2044 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2045 return fold_convert (type, tem);
2052 switch (TREE_CODE (orig))
2055 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2056 case POINTER_TYPE: case REFERENCE_TYPE:
2058 return build2 (COMPLEX_EXPR, type,
2059 fold_convert (TREE_TYPE (type), arg),
2060 fold_convert (TREE_TYPE (type), integer_zero_node));
2065 if (TREE_CODE (arg) == COMPLEX_EXPR)
2067 rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0));
2068 ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1));
2069 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2072 arg = save_expr (arg);
2073 rpart = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2074 ipart = fold_build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg);
2075 rpart = fold_convert (TREE_TYPE (type), rpart);
2076 ipart = fold_convert (TREE_TYPE (type), ipart);
2077 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2085 if (integer_zerop (arg))
2086 return build_zero_vector (type);
2087 gcc_assert (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2088 gcc_assert (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2089 || TREE_CODE (orig) == VECTOR_TYPE);
2090 return fold_build1 (VIEW_CONVERT_EXPR, type, arg);
2093 return fold_build1 (NOP_EXPR, type, fold_ignored_result (arg));
2100 /* Return false if expr can be assumed not to be an lvalue, true
2104 maybe_lvalue_p (tree x)
2106 /* We only need to wrap lvalue tree codes. */
2107 switch (TREE_CODE (x))
2118 case ALIGN_INDIRECT_REF:
2119 case MISALIGNED_INDIRECT_REF:
2121 case ARRAY_RANGE_REF:
2127 case PREINCREMENT_EXPR:
2128 case PREDECREMENT_EXPR:
2130 case TRY_CATCH_EXPR:
2131 case WITH_CLEANUP_EXPR:
2142 /* Assume the worst for front-end tree codes. */
2143 if ((int)TREE_CODE (x) >= NUM_TREE_CODES)
2151 /* Return an expr equal to X but certainly not valid as an lvalue. */
2156 /* While we are in GIMPLE, NON_LVALUE_EXPR doesn't mean anything to
2161 if (! maybe_lvalue_p (x))
2163 return build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
2166 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
2167 Zero means allow extended lvalues. */
2169 int pedantic_lvalues;
2171 /* When pedantic, return an expr equal to X but certainly not valid as a
2172 pedantic lvalue. Otherwise, return X. */
2175 pedantic_non_lvalue (tree x)
2177 if (pedantic_lvalues)
2178 return non_lvalue (x);
2183 /* Given a tree comparison code, return the code that is the logical inverse
2184 of the given code. It is not safe to do this for floating-point
2185 comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode
2186 as well: if reversing the comparison is unsafe, return ERROR_MARK. */
2189 invert_tree_comparison (enum tree_code code, bool honor_nans)
2191 if (honor_nans && flag_trapping_math)
2201 return honor_nans ? UNLE_EXPR : LE_EXPR;
2203 return honor_nans ? UNLT_EXPR : LT_EXPR;
2205 return honor_nans ? UNGE_EXPR : GE_EXPR;
2207 return honor_nans ? UNGT_EXPR : GT_EXPR;
2221 return UNORDERED_EXPR;
2222 case UNORDERED_EXPR:
2223 return ORDERED_EXPR;
2229 /* Similar, but return the comparison that results if the operands are
2230 swapped. This is safe for floating-point. */
2233 swap_tree_comparison (enum tree_code code)
2240 case UNORDERED_EXPR:
2266 /* Convert a comparison tree code from an enum tree_code representation
2267 into a compcode bit-based encoding. This function is the inverse of
2268 compcode_to_comparison. */
2270 static enum comparison_code
2271 comparison_to_compcode (enum tree_code code)
2288 return COMPCODE_ORD;
2289 case UNORDERED_EXPR:
2290 return COMPCODE_UNORD;
2292 return COMPCODE_UNLT;
2294 return COMPCODE_UNEQ;
2296 return COMPCODE_UNLE;
2298 return COMPCODE_UNGT;
2300 return COMPCODE_LTGT;
2302 return COMPCODE_UNGE;
2308 /* Convert a compcode bit-based encoding of a comparison operator back
2309 to GCC's enum tree_code representation. This function is the
2310 inverse of comparison_to_compcode. */
2312 static enum tree_code
2313 compcode_to_comparison (enum comparison_code code)
2330 return ORDERED_EXPR;
2331 case COMPCODE_UNORD:
2332 return UNORDERED_EXPR;
2350 /* Return a tree for the comparison which is the combination of
2351 doing the AND or OR (depending on CODE) of the two operations LCODE
2352 and RCODE on the identical operands LL_ARG and LR_ARG. Take into account
2353 the possibility of trapping if the mode has NaNs, and return NULL_TREE
2354 if this makes the transformation invalid. */
2357 combine_comparisons (enum tree_code code, enum tree_code lcode,
2358 enum tree_code rcode, tree truth_type,
2359 tree ll_arg, tree lr_arg)
2361 bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg)));
2362 enum comparison_code lcompcode = comparison_to_compcode (lcode);
2363 enum comparison_code rcompcode = comparison_to_compcode (rcode);
2364 enum comparison_code compcode;
2368 case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR:
2369 compcode = lcompcode & rcompcode;
2372 case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR:
2373 compcode = lcompcode | rcompcode;
2382 /* Eliminate unordered comparisons, as well as LTGT and ORD
2383 which are not used unless the mode has NaNs. */
2384 compcode &= ~COMPCODE_UNORD;
2385 if (compcode == COMPCODE_LTGT)
2386 compcode = COMPCODE_NE;
2387 else if (compcode == COMPCODE_ORD)
2388 compcode = COMPCODE_TRUE;
2390 else if (flag_trapping_math)
2392 /* Check that the original operation and the optimized ones will trap
2393 under the same condition. */
2394 bool ltrap = (lcompcode & COMPCODE_UNORD) == 0
2395 && (lcompcode != COMPCODE_EQ)
2396 && (lcompcode != COMPCODE_ORD);
2397 bool rtrap = (rcompcode & COMPCODE_UNORD) == 0
2398 && (rcompcode != COMPCODE_EQ)
2399 && (rcompcode != COMPCODE_ORD);
2400 bool trap = (compcode & COMPCODE_UNORD) == 0
2401 && (compcode != COMPCODE_EQ)
2402 && (compcode != COMPCODE_ORD);
2404 /* In a short-circuited boolean expression the LHS might be
2405 such that the RHS, if evaluated, will never trap. For
2406 example, in ORD (x, y) && (x < y), we evaluate the RHS only
2407 if neither x nor y is NaN. (This is a mixed blessing: for
2408 example, the expression above will never trap, hence
2409 optimizing it to x < y would be invalid). */
2410 if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD))
2411 || (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD)))
2414 /* If the comparison was short-circuited, and only the RHS
2415 trapped, we may now generate a spurious trap. */
2417 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2420 /* If we changed the conditions that cause a trap, we lose. */
2421 if ((ltrap || rtrap) != trap)
2425 if (compcode == COMPCODE_TRUE)
2426 return constant_boolean_node (true, truth_type);
2427 else if (compcode == COMPCODE_FALSE)
2428 return constant_boolean_node (false, truth_type);
2430 return fold_build2 (compcode_to_comparison (compcode),
2431 truth_type, ll_arg, lr_arg);
2434 /* Return nonzero if CODE is a tree code that represents a truth value. */
2437 truth_value_p (enum tree_code code)
2439 return (TREE_CODE_CLASS (code) == tcc_comparison
2440 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
2441 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
2442 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
2445 /* Return nonzero if two operands (typically of the same tree node)
2446 are necessarily equal. If either argument has side-effects this
2447 function returns zero. FLAGS modifies behavior as follows:
2449 If OEP_ONLY_CONST is set, only return nonzero for constants.
2450 This function tests whether the operands are indistinguishable;
2451 it does not test whether they are equal using C's == operation.
2452 The distinction is important for IEEE floating point, because
2453 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
2454 (2) two NaNs may be indistinguishable, but NaN!=NaN.
2456 If OEP_ONLY_CONST is unset, a VAR_DECL is considered equal to itself
2457 even though it may hold multiple values during a function.
2458 This is because a GCC tree node guarantees that nothing else is
2459 executed between the evaluation of its "operands" (which may often
2460 be evaluated in arbitrary order). Hence if the operands themselves
2461 don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the
2462 same value in each operand/subexpression. Hence leaving OEP_ONLY_CONST
2463 unset means assuming isochronic (or instantaneous) tree equivalence.
2464 Unless comparing arbitrary expression trees, such as from different
2465 statements, this flag can usually be left unset.
2467 If OEP_PURE_SAME is set, then pure functions with identical arguments
2468 are considered the same. It is used when the caller has other ways
2469 to ensure that global memory is unchanged in between. */
2472 operand_equal_p (tree arg0, tree arg1, unsigned int flags)
2474 /* If either is ERROR_MARK, they aren't equal. */
2475 if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK)
2478 /* If both types don't have the same signedness, then we can't consider
2479 them equal. We must check this before the STRIP_NOPS calls
2480 because they may change the signedness of the arguments. */
2481 if (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1)))
2487 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2488 /* This is needed for conversions and for COMPONENT_REF.
2489 Might as well play it safe and always test this. */
2490 || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
2491 || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
2492 || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
2495 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
2496 We don't care about side effects in that case because the SAVE_EXPR
2497 takes care of that for us. In all other cases, two expressions are
2498 equal if they have no side effects. If we have two identical
2499 expressions with side effects that should be treated the same due
2500 to the only side effects being identical SAVE_EXPR's, that will
2501 be detected in the recursive calls below. */
2502 if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST)
2503 && (TREE_CODE (arg0) == SAVE_EXPR
2504 || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
2507 /* Next handle constant cases, those for which we can return 1 even
2508 if ONLY_CONST is set. */
2509 if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
2510 switch (TREE_CODE (arg0))
2513 return (! TREE_CONSTANT_OVERFLOW (arg0)
2514 && ! TREE_CONSTANT_OVERFLOW (arg1)
2515 && tree_int_cst_equal (arg0, arg1));
2518 return (! TREE_CONSTANT_OVERFLOW (arg0)
2519 && ! TREE_CONSTANT_OVERFLOW (arg1)
2520 && REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
2521 TREE_REAL_CST (arg1)));
2527 if (TREE_CONSTANT_OVERFLOW (arg0)
2528 || TREE_CONSTANT_OVERFLOW (arg1))
2531 v1 = TREE_VECTOR_CST_ELTS (arg0);
2532 v2 = TREE_VECTOR_CST_ELTS (arg1);
2535 if (!operand_equal_p (TREE_VALUE (v1), TREE_VALUE (v2),
2538 v1 = TREE_CHAIN (v1);
2539 v2 = TREE_CHAIN (v2);
2546 return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
2548 && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
2552 return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
2553 && ! memcmp (TREE_STRING_POINTER (arg0),
2554 TREE_STRING_POINTER (arg1),
2555 TREE_STRING_LENGTH (arg0)));
2558 return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
2564 if (flags & OEP_ONLY_CONST)
2567 /* Define macros to test an operand from arg0 and arg1 for equality and a
2568 variant that allows null and views null as being different from any
2569 non-null value. In the latter case, if either is null, the both
2570 must be; otherwise, do the normal comparison. */
2571 #define OP_SAME(N) operand_equal_p (TREE_OPERAND (arg0, N), \
2572 TREE_OPERAND (arg1, N), flags)
2574 #define OP_SAME_WITH_NULL(N) \
2575 ((!TREE_OPERAND (arg0, N) || !TREE_OPERAND (arg1, N)) \
2576 ? TREE_OPERAND (arg0, N) == TREE_OPERAND (arg1, N) : OP_SAME (N))
2578 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
2581 /* Two conversions are equal only if signedness and modes match. */
2582 switch (TREE_CODE (arg0))
2587 case FIX_TRUNC_EXPR:
2588 case FIX_FLOOR_EXPR:
2589 case FIX_ROUND_EXPR:
2590 if (TYPE_UNSIGNED (TREE_TYPE (arg0))
2591 != TYPE_UNSIGNED (TREE_TYPE (arg1)))
2601 case tcc_comparison:
2603 if (OP_SAME (0) && OP_SAME (1))
2606 /* For commutative ops, allow the other order. */
2607 return (commutative_tree_code (TREE_CODE (arg0))
2608 && operand_equal_p (TREE_OPERAND (arg0, 0),
2609 TREE_OPERAND (arg1, 1), flags)
2610 && operand_equal_p (TREE_OPERAND (arg0, 1),
2611 TREE_OPERAND (arg1, 0), flags));
2614 /* If either of the pointer (or reference) expressions we are
2615 dereferencing contain a side effect, these cannot be equal. */
2616 if (TREE_SIDE_EFFECTS (arg0)
2617 || TREE_SIDE_EFFECTS (arg1))
2620 switch (TREE_CODE (arg0))
2623 case ALIGN_INDIRECT_REF:
2624 case MISALIGNED_INDIRECT_REF:
2630 case ARRAY_RANGE_REF:
2631 /* Operands 2 and 3 may be null. */
2634 && OP_SAME_WITH_NULL (2)
2635 && OP_SAME_WITH_NULL (3));
2638 /* Handle operand 2 the same as for ARRAY_REF. Operand 0
2639 may be NULL when we're called to compare MEM_EXPRs. */
2640 return OP_SAME_WITH_NULL (0)
2642 && OP_SAME_WITH_NULL (2);
2645 return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
2651 case tcc_expression:
2652 switch (TREE_CODE (arg0))
2655 case TRUTH_NOT_EXPR:
2658 case TRUTH_ANDIF_EXPR:
2659 case TRUTH_ORIF_EXPR:
2660 return OP_SAME (0) && OP_SAME (1);
2662 case TRUTH_AND_EXPR:
2664 case TRUTH_XOR_EXPR:
2665 if (OP_SAME (0) && OP_SAME (1))
2668 /* Otherwise take into account this is a commutative operation. */
2669 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2670 TREE_OPERAND (arg1, 1), flags)
2671 && operand_equal_p (TREE_OPERAND (arg0, 1),
2672 TREE_OPERAND (arg1, 0), flags));
2675 /* If the CALL_EXPRs call different functions, then they
2676 clearly can not be equal. */
2681 unsigned int cef = call_expr_flags (arg0);
2682 if (flags & OEP_PURE_SAME)
2683 cef &= ECF_CONST | ECF_PURE;
2690 /* Now see if all the arguments are the same. operand_equal_p
2691 does not handle TREE_LIST, so we walk the operands here
2692 feeding them to operand_equal_p. */
2693 arg0 = TREE_OPERAND (arg0, 1);
2694 arg1 = TREE_OPERAND (arg1, 1);
2695 while (arg0 && arg1)
2697 if (! operand_equal_p (TREE_VALUE (arg0), TREE_VALUE (arg1),
2701 arg0 = TREE_CHAIN (arg0);
2702 arg1 = TREE_CHAIN (arg1);
2705 /* If we get here and both argument lists are exhausted
2706 then the CALL_EXPRs are equal. */
2707 return ! (arg0 || arg1);
2713 case tcc_declaration:
2714 /* Consider __builtin_sqrt equal to sqrt. */
2715 return (TREE_CODE (arg0) == FUNCTION_DECL
2716 && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1)
2717 && DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1)
2718 && DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1));
2725 #undef OP_SAME_WITH_NULL
2728 /* Similar to operand_equal_p, but see if ARG0 might have been made by
2729 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
2731 When in doubt, return 0. */
2734 operand_equal_for_comparison_p (tree arg0, tree arg1, tree other)
2736 int unsignedp1, unsignedpo;
2737 tree primarg0, primarg1, primother;
2738 unsigned int correct_width;
2740 if (operand_equal_p (arg0, arg1, 0))
2743 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
2744 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
2747 /* Discard any conversions that don't change the modes of ARG0 and ARG1
2748 and see if the inner values are the same. This removes any
2749 signedness comparison, which doesn't matter here. */
2750 primarg0 = arg0, primarg1 = arg1;
2751 STRIP_NOPS (primarg0);
2752 STRIP_NOPS (primarg1);
2753 if (operand_equal_p (primarg0, primarg1, 0))
2756 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
2757 actual comparison operand, ARG0.
2759 First throw away any conversions to wider types
2760 already present in the operands. */
2762 primarg1 = get_narrower (arg1, &unsignedp1);
2763 primother = get_narrower (other, &unsignedpo);
2765 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
2766 if (unsignedp1 == unsignedpo
2767 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
2768 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
2770 tree type = TREE_TYPE (arg0);
2772 /* Make sure shorter operand is extended the right way
2773 to match the longer operand. */
2774 primarg1 = fold_convert (lang_hooks.types.signed_or_unsigned_type
2775 (unsignedp1, TREE_TYPE (primarg1)), primarg1);
2777 if (operand_equal_p (arg0, fold_convert (type, primarg1), 0))
2784 /* See if ARG is an expression that is either a comparison or is performing
2785 arithmetic on comparisons. The comparisons must only be comparing
2786 two different values, which will be stored in *CVAL1 and *CVAL2; if
2787 they are nonzero it means that some operands have already been found.
2788 No variables may be used anywhere else in the expression except in the
2789 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
2790 the expression and save_expr needs to be called with CVAL1 and CVAL2.
2792 If this is true, return 1. Otherwise, return zero. */
2795 twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p)
2797 enum tree_code code = TREE_CODE (arg);
2798 enum tree_code_class class = TREE_CODE_CLASS (code);
2800 /* We can handle some of the tcc_expression cases here. */
2801 if (class == tcc_expression && code == TRUTH_NOT_EXPR)
2803 else if (class == tcc_expression
2804 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
2805 || code == COMPOUND_EXPR))
2808 else if (class == tcc_expression && code == SAVE_EXPR
2809 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
2811 /* If we've already found a CVAL1 or CVAL2, this expression is
2812 two complex to handle. */
2813 if (*cval1 || *cval2)
2823 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
2826 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
2827 && twoval_comparison_p (TREE_OPERAND (arg, 1),
2828 cval1, cval2, save_p));
2833 case tcc_expression:
2834 if (code == COND_EXPR)
2835 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
2836 cval1, cval2, save_p)
2837 && twoval_comparison_p (TREE_OPERAND (arg, 1),
2838 cval1, cval2, save_p)
2839 && twoval_comparison_p (TREE_OPERAND (arg, 2),
2840 cval1, cval2, save_p));
2843 case tcc_comparison:
2844 /* First see if we can handle the first operand, then the second. For
2845 the second operand, we know *CVAL1 can't be zero. It must be that
2846 one side of the comparison is each of the values; test for the
2847 case where this isn't true by failing if the two operands
2850 if (operand_equal_p (TREE_OPERAND (arg, 0),
2851 TREE_OPERAND (arg, 1), 0))
2855 *cval1 = TREE_OPERAND (arg, 0);
2856 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
2858 else if (*cval2 == 0)
2859 *cval2 = TREE_OPERAND (arg, 0);
2860 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
2865 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
2867 else if (*cval2 == 0)
2868 *cval2 = TREE_OPERAND (arg, 1);
2869 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
2881 /* ARG is a tree that is known to contain just arithmetic operations and
2882 comparisons. Evaluate the operations in the tree substituting NEW0 for
2883 any occurrence of OLD0 as an operand of a comparison and likewise for
2887 eval_subst (tree arg, tree old0, tree new0, tree old1, tree new1)
2889 tree type = TREE_TYPE (arg);
2890 enum tree_code code = TREE_CODE (arg);
2891 enum tree_code_class class = TREE_CODE_CLASS (code);
2893 /* We can handle some of the tcc_expression cases here. */
2894 if (class == tcc_expression && code == TRUTH_NOT_EXPR)
2896 else if (class == tcc_expression
2897 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2903 return fold_build1 (code, type,
2904 eval_subst (TREE_OPERAND (arg, 0),
2905 old0, new0, old1, new1));
2908 return fold_build2 (code, type,
2909 eval_subst (TREE_OPERAND (arg, 0),
2910 old0, new0, old1, new1),
2911 eval_subst (TREE_OPERAND (arg, 1),
2912 old0, new0, old1, new1));
2914 case tcc_expression:
2918 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
2921 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
2924 return fold_build3 (code, type,
2925 eval_subst (TREE_OPERAND (arg, 0),
2926 old0, new0, old1, new1),
2927 eval_subst (TREE_OPERAND (arg, 1),
2928 old0, new0, old1, new1),
2929 eval_subst (TREE_OPERAND (arg, 2),
2930 old0, new0, old1, new1));
2934 /* Fall through - ??? */
2936 case tcc_comparison:
2938 tree arg0 = TREE_OPERAND (arg, 0);
2939 tree arg1 = TREE_OPERAND (arg, 1);
2941 /* We need to check both for exact equality and tree equality. The
2942 former will be true if the operand has a side-effect. In that
2943 case, we know the operand occurred exactly once. */
2945 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
2947 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
2950 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2952 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2955 return fold_build2 (code, type, arg0, arg1);
2963 /* Return a tree for the case when the result of an expression is RESULT
2964 converted to TYPE and OMITTED was previously an operand of the expression
2965 but is now not needed (e.g., we folded OMITTED * 0).
2967 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2968 the conversion of RESULT to TYPE. */
2971 omit_one_operand (tree type, tree result, tree omitted)
2973 tree t = fold_convert (type, result);
2975 if (TREE_SIDE_EFFECTS (omitted))
2976 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
2978 return non_lvalue (t);
2981 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
2984 pedantic_omit_one_operand (tree type, tree result, tree omitted)
2986 tree t = fold_convert (type, result);
2988 if (TREE_SIDE_EFFECTS (omitted))
2989 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
2991 return pedantic_non_lvalue (t);
2994 /* Return a tree for the case when the result of an expression is RESULT
2995 converted to TYPE and OMITTED1 and OMITTED2 were previously operands
2996 of the expression but are now not needed.
2998 If OMITTED1 or OMITTED2 has side effects, they must be evaluated.
2999 If both OMITTED1 and OMITTED2 have side effects, OMITTED1 is
3000 evaluated before OMITTED2. Otherwise, if neither has side effects,
3001 just do the conversion of RESULT to TYPE. */
3004 omit_two_operands (tree type, tree result, tree omitted1, tree omitted2)
3006 tree t = fold_convert (type, result);
3008 if (TREE_SIDE_EFFECTS (omitted2))
3009 t = build2 (COMPOUND_EXPR, type, omitted2, t);
3010 if (TREE_SIDE_EFFECTS (omitted1))
3011 t = build2 (COMPOUND_EXPR, type, omitted1, t);
3013 return TREE_CODE (t) != COMPOUND_EXPR ? non_lvalue (t) : t;
3017 /* Return a simplified tree node for the truth-negation of ARG. This
3018 never alters ARG itself. We assume that ARG is an operation that
3019 returns a truth value (0 or 1).
3021 FIXME: one would think we would fold the result, but it causes
3022 problems with the dominator optimizer. */
3024 invert_truthvalue (tree arg)
3026 tree type = TREE_TYPE (arg);
3027 enum tree_code code = TREE_CODE (arg);
3029 if (code == ERROR_MARK)
3032 /* If this is a comparison, we can simply invert it, except for
3033 floating-point non-equality comparisons, in which case we just
3034 enclose a TRUTH_NOT_EXPR around what we have. */
3036 if (TREE_CODE_CLASS (code) == tcc_comparison)
3038 tree op_type = TREE_TYPE (TREE_OPERAND (arg, 0));
3039 if (FLOAT_TYPE_P (op_type)
3040 && flag_trapping_math
3041 && code != ORDERED_EXPR && code != UNORDERED_EXPR
3042 && code != NE_EXPR && code != EQ_EXPR)
3043 return build1 (TRUTH_NOT_EXPR, type, arg);
3046 code = invert_tree_comparison (code,
3047 HONOR_NANS (TYPE_MODE (op_type)));
3048 if (code == ERROR_MARK)
3049 return build1 (TRUTH_NOT_EXPR, type, arg);
3051 return build2 (code, type,
3052 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
3059 return constant_boolean_node (integer_zerop (arg), type);
3061 case TRUTH_AND_EXPR:
3062 return build2 (TRUTH_OR_EXPR, type,
3063 invert_truthvalue (TREE_OPERAND (arg, 0)),
3064 invert_truthvalue (TREE_OPERAND (arg, 1)));
3067 return build2 (TRUTH_AND_EXPR, type,
3068 invert_truthvalue (TREE_OPERAND (arg, 0)),
3069 invert_truthvalue (TREE_OPERAND (arg, 1)));
3071 case TRUTH_XOR_EXPR:
3072 /* Here we can invert either operand. We invert the first operand
3073 unless the second operand is a TRUTH_NOT_EXPR in which case our
3074 result is the XOR of the first operand with the inside of the
3075 negation of the second operand. */
3077 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
3078 return build2 (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
3079 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
3081 return build2 (TRUTH_XOR_EXPR, type,
3082 invert_truthvalue (TREE_OPERAND (arg, 0)),
3083 TREE_OPERAND (arg, 1));
3085 case TRUTH_ANDIF_EXPR:
3086 return build2 (TRUTH_ORIF_EXPR, type,
3087 invert_truthvalue (TREE_OPERAND (arg, 0)),
3088 invert_truthvalue (TREE_OPERAND (arg, 1)));
3090 case TRUTH_ORIF_EXPR:
3091 return build2 (TRUTH_ANDIF_EXPR, type,
3092 invert_truthvalue (TREE_OPERAND (arg, 0)),
3093 invert_truthvalue (TREE_OPERAND (arg, 1)));
3095 case TRUTH_NOT_EXPR:
3096 return TREE_OPERAND (arg, 0);
3100 tree arg1 = TREE_OPERAND (arg, 1);
3101 tree arg2 = TREE_OPERAND (arg, 2);
3102 /* A COND_EXPR may have a throw as one operand, which
3103 then has void type. Just leave void operands
3105 return build3 (COND_EXPR, type, TREE_OPERAND (arg, 0),
3106 VOID_TYPE_P (TREE_TYPE (arg1))
3107 ? arg1 : invert_truthvalue (arg1),
3108 VOID_TYPE_P (TREE_TYPE (arg2))
3109 ? arg2 : invert_truthvalue (arg2));
3113 return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
3114 invert_truthvalue (TREE_OPERAND (arg, 1)));
3116 case NON_LVALUE_EXPR:
3117 return invert_truthvalue (TREE_OPERAND (arg, 0));
3120 if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
3125 return build1 (TREE_CODE (arg), type,
3126 invert_truthvalue (TREE_OPERAND (arg, 0)));
3129 if (!integer_onep (TREE_OPERAND (arg, 1)))
3131 return build2 (EQ_EXPR, type, arg,
3132 build_int_cst (type, 0));
3135 return build1 (TRUTH_NOT_EXPR, type, arg);
3137 case CLEANUP_POINT_EXPR:
3138 return build1 (CLEANUP_POINT_EXPR, type,
3139 invert_truthvalue (TREE_OPERAND (arg, 0)));
3144 gcc_assert (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE);
3145 return build1 (TRUTH_NOT_EXPR, type, arg);
3148 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
3149 operands are another bit-wise operation with a common input. If so,
3150 distribute the bit operations to save an operation and possibly two if
3151 constants are involved. For example, convert
3152 (A | B) & (A | C) into A | (B & C)
3153 Further simplification will occur if B and C are constants.
3155 If this optimization cannot be done, 0 will be returned. */
3158 distribute_bit_expr (enum tree_code code, tree type, tree arg0, tree arg1)
3163 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3164 || TREE_CODE (arg0) == code
3165 || (TREE_CODE (arg0) != BIT_AND_EXPR
3166 && TREE_CODE (arg0) != BIT_IOR_EXPR))
3169 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
3171 common = TREE_OPERAND (arg0, 0);
3172 left = TREE_OPERAND (arg0, 1);
3173 right = TREE_OPERAND (arg1, 1);
3175 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
3177 common = TREE_OPERAND (arg0, 0);
3178 left = TREE_OPERAND (arg0, 1);
3179 right = TREE_OPERAND (arg1, 0);
3181 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
3183 common = TREE_OPERAND (arg0, 1);
3184 left = TREE_OPERAND (arg0, 0);
3185 right = TREE_OPERAND (arg1, 1);
3187 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
3189 common = TREE_OPERAND (arg0, 1);
3190 left = TREE_OPERAND (arg0, 0);
3191 right = TREE_OPERAND (arg1, 0);
3196 return fold_build2 (TREE_CODE (arg0), type, common,
3197 fold_build2 (code, type, left, right));
3200 /* Knowing that ARG0 and ARG1 are both RDIV_EXPRs, simplify a binary operation
3201 with code CODE. This optimization is unsafe. */
3203 distribute_real_division (enum tree_code code, tree type, tree arg0, tree arg1)
3205 bool mul0 = TREE_CODE (arg0) == MULT_EXPR;
3206 bool mul1 = TREE_CODE (arg1) == MULT_EXPR;
3208 /* (A / C) +- (B / C) -> (A +- B) / C. */
3210 && operand_equal_p (TREE_OPERAND (arg0, 1),
3211 TREE_OPERAND (arg1, 1), 0))
3212 return fold_build2 (mul0 ? MULT_EXPR : RDIV_EXPR, type,
3213 fold_build2 (code, type,
3214 TREE_OPERAND (arg0, 0),
3215 TREE_OPERAND (arg1, 0)),
3216 TREE_OPERAND (arg0, 1));
3218 /* (A / C1) +- (A / C2) -> A * (1 / C1 +- 1 / C2). */
3219 if (operand_equal_p (TREE_OPERAND (arg0, 0),
3220 TREE_OPERAND (arg1, 0), 0)
3221 && TREE_CODE (TREE_OPERAND (arg0, 1)) == REAL_CST
3222 && TREE_CODE (TREE_OPERAND (arg1, 1)) == REAL_CST)
3224 REAL_VALUE_TYPE r0, r1;
3225 r0 = TREE_REAL_CST (TREE_OPERAND (arg0, 1));
3226 r1 = TREE_REAL_CST (TREE_OPERAND (arg1, 1));
3228 real_arithmetic (&r0, RDIV_EXPR, &dconst1, &r0);
3230 real_arithmetic (&r1, RDIV_EXPR, &dconst1, &r1);
3231 real_arithmetic (&r0, code, &r0, &r1);
3232 return fold_build2 (MULT_EXPR, type,
3233 TREE_OPERAND (arg0, 0),
3234 build_real (type, r0));
3240 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
3241 starting at BITPOS. The field is unsigned if UNSIGNEDP is nonzero. */
3244 make_bit_field_ref (tree inner, tree type, int bitsize, int bitpos,
3251 tree size = TYPE_SIZE (TREE_TYPE (inner));
3252 if ((INTEGRAL_TYPE_P (TREE_TYPE (inner))
3253 || POINTER_TYPE_P (TREE_TYPE (inner)))
3254 && host_integerp (size, 0)
3255 && tree_low_cst (size, 0) == bitsize)
3256 return fold_convert (type, inner);
3259 result = build3 (BIT_FIELD_REF, type, inner,
3260 size_int (bitsize), bitsize_int (bitpos));
3262 BIT_FIELD_REF_UNSIGNED (result) = unsignedp;
3267 /* Optimize a bit-field compare.
3269 There are two cases: First is a compare against a constant and the
3270 second is a comparison of two items where the fields are at the same
3271 bit position relative to the start of a chunk (byte, halfword, word)
3272 large enough to contain it. In these cases we can avoid the shift
3273 implicit in bitfield extractions.
3275 For constants, we emit a compare of the shifted constant with the
3276 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
3277 compared. For two fields at the same position, we do the ANDs with the
3278 similar mask and compare the result of the ANDs.
3280 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
3281 COMPARE_TYPE is the type of the comparison, and LHS and RHS
3282 are the left and right operands of the comparison, respectively.
3284 If the optimization described above can be done, we return the resulting
3285 tree. Otherwise we return zero. */
3288 optimize_bit_field_compare (enum tree_code code, tree compare_type,
3291 HOST_WIDE_INT lbitpos, lbitsize, rbitpos, rbitsize, nbitpos, nbitsize;
3292 tree type = TREE_TYPE (lhs);
3293 tree signed_type, unsigned_type;
3294 int const_p = TREE_CODE (rhs) == INTEGER_CST;
3295 enum machine_mode lmode, rmode, nmode;
3296 int lunsignedp, runsignedp;
3297 int lvolatilep = 0, rvolatilep = 0;
3298 tree linner, rinner = NULL_TREE;
3302 /* Get all the information about the extractions being done. If the bit size
3303 if the same as the size of the underlying object, we aren't doing an
3304 extraction at all and so can do nothing. We also don't want to
3305 do anything if the inner expression is a PLACEHOLDER_EXPR since we
3306 then will no longer be able to replace it. */
3307 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
3308 &lunsignedp, &lvolatilep, false);
3309 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
3310 || offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR)
3315 /* If this is not a constant, we can only do something if bit positions,
3316 sizes, and signedness are the same. */
3317 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
3318 &runsignedp, &rvolatilep, false);
3320 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
3321 || lunsignedp != runsignedp || offset != 0
3322 || TREE_CODE (rinner) == PLACEHOLDER_EXPR)
3326 /* See if we can find a mode to refer to this field. We should be able to,
3327 but fail if we can't. */
3328 nmode = get_best_mode (lbitsize, lbitpos,
3329 const_p ? TYPE_ALIGN (TREE_TYPE (linner))
3330 : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
3331 TYPE_ALIGN (TREE_TYPE (rinner))),
3332 word_mode, lvolatilep || rvolatilep);
3333 if (nmode == VOIDmode)
3336 /* Set signed and unsigned types of the precision of this mode for the
3338 signed_type = lang_hooks.types.type_for_mode (nmode, 0);
3339 unsigned_type = lang_hooks.types.type_for_mode (nmode, 1);
3341 /* Compute the bit position and size for the new reference and our offset
3342 within it. If the new reference is the same size as the original, we
3343 won't optimize anything, so return zero. */
3344 nbitsize = GET_MODE_BITSIZE (nmode);
3345 nbitpos = lbitpos & ~ (nbitsize - 1);
3347 if (nbitsize == lbitsize)
3350 if (BYTES_BIG_ENDIAN)
3351 lbitpos = nbitsize - lbitsize - lbitpos;
3353 /* Make the mask to be used against the extracted field. */
3354 mask = build_int_cst (unsigned_type, -1);
3355 mask = force_fit_type (mask, 0, false, false);
3356 mask = fold_convert (unsigned_type, mask);
3357 mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize), 0);
3358 mask = const_binop (RSHIFT_EXPR, mask,
3359 size_int (nbitsize - lbitsize - lbitpos), 0);
3362 /* If not comparing with constant, just rework the comparison
3364 return build2 (code, compare_type,
3365 build2 (BIT_AND_EXPR, unsigned_type,
3366 make_bit_field_ref (linner, unsigned_type,
3367 nbitsize, nbitpos, 1),
3369 build2 (BIT_AND_EXPR, unsigned_type,
3370 make_bit_field_ref (rinner, unsigned_type,
3371 nbitsize, nbitpos, 1),
3374 /* Otherwise, we are handling the constant case. See if the constant is too
3375 big for the field. Warn and return a tree of for 0 (false) if so. We do
3376 this not only for its own sake, but to avoid having to test for this
3377 error case below. If we didn't, we might generate wrong code.
3379 For unsigned fields, the constant shifted right by the field length should
3380 be all zero. For signed fields, the high-order bits should agree with
3385 if (! integer_zerop (const_binop (RSHIFT_EXPR,
3386 fold_convert (unsigned_type, rhs),
3387 size_int (lbitsize), 0)))
3389 warning (0, "comparison is always %d due to width of bit-field",
3391 return constant_boolean_node (code == NE_EXPR, compare_type);
3396 tree tem = const_binop (RSHIFT_EXPR, fold_convert (signed_type, rhs),
3397 size_int (lbitsize - 1), 0);
3398 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
3400 warning (0, "comparison is always %d due to width of bit-field",
3402 return constant_boolean_node (code == NE_EXPR, compare_type);
3406 /* Single-bit compares should always be against zero. */
3407 if (lbitsize == 1 && ! integer_zerop (rhs))
3409 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
3410 rhs = build_int_cst (type, 0);
3413 /* Make a new bitfield reference, shift the constant over the
3414 appropriate number of bits and mask it with the computed mask
3415 (in case this was a signed field). If we changed it, make a new one. */
3416 lhs = make_bit_field_ref (linner, unsigned_type, nbitsize, nbitpos, 1);
3419 TREE_SIDE_EFFECTS (lhs) = 1;
3420 TREE_THIS_VOLATILE (lhs) = 1;
3423 rhs = const_binop (BIT_AND_EXPR,
3424 const_binop (LSHIFT_EXPR,
3425 fold_convert (unsigned_type, rhs),
3426 size_int (lbitpos), 0),
3429 return build2 (code, compare_type,
3430 build2 (BIT_AND_EXPR, unsigned_type, lhs, mask),
3434 /* Subroutine for fold_truthop: decode a field reference.
3436 If EXP is a comparison reference, we return the innermost reference.
3438 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
3439 set to the starting bit number.
3441 If the innermost field can be completely contained in a mode-sized
3442 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
3444 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
3445 otherwise it is not changed.
3447 *PUNSIGNEDP is set to the signedness of the field.
3449 *PMASK is set to the mask used. This is either contained in a
3450 BIT_AND_EXPR or derived from the width of the field.
3452 *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.
3454 Return 0 if this is not a component reference or is one that we can't
3455 do anything with. */
3458 decode_field_reference (tree exp, HOST_WIDE_INT *pbitsize,
3459 HOST_WIDE_INT *pbitpos, enum machine_mode *pmode,
3460 int *punsignedp, int *pvolatilep,
3461 tree *pmask, tree *pand_mask)
3463 tree outer_type = 0;
3465 tree mask, inner, offset;
3467 unsigned int precision;
3469 /* All the optimizations using this function assume integer fields.
3470 There are problems with FP fields since the type_for_size call
3471 below can fail for, e.g., XFmode. */
3472 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
3475 /* We are interested in the bare arrangement of bits, so strip everything
3476 that doesn't affect the machine mode. However, record the type of the
3477 outermost expression if it may matter below. */
3478 if (TREE_CODE (exp) == NOP_EXPR
3479 || TREE_CODE (exp) == CONVERT_EXPR
3480 || TREE_CODE (exp) == NON_LVALUE_EXPR)
3481 outer_type = TREE_TYPE (exp);