1 /* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
30 /* The entry points in this file are fold, size_int_wide, size_binop
33 fold takes a tree as argument and returns a simplified tree.
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'.
42 force_fit_type takes a constant, an overflowable flag and prior
43 overflow indicators. It forces the value to fit the type and sets
44 TREE_OVERFLOW and TREE_CONSTANT_OVERFLOW as appropriate. */
48 #include "coretypes.h"
59 #include "langhooks.h"
62 /* The following constants represent a bit based encoding of GCC's
63 comparison operators. This encoding simplifies transformations
64 on relational comparison operators, such as AND and OR. */
65 enum comparison_code {
84 static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
85 static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
86 static bool negate_mathfn_p (enum built_in_function);
87 static bool negate_expr_p (tree);
88 static tree negate_expr (tree);
89 static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
90 static tree associate_trees (tree, tree, enum tree_code, tree);
91 static tree const_binop (enum tree_code, tree, tree, int);
92 static tree fold_convert_const (enum tree_code, tree, tree);
93 static enum tree_code invert_tree_comparison (enum tree_code, bool);
94 static enum comparison_code comparison_to_compcode (enum tree_code);
95 static enum tree_code compcode_to_comparison (enum comparison_code);
96 static tree combine_comparisons (enum tree_code, enum tree_code,
97 enum tree_code, tree, tree, tree);
98 static int truth_value_p (enum tree_code);
99 static int operand_equal_for_comparison_p (tree, tree, tree);
100 static int twoval_comparison_p (tree, tree *, tree *, int *);
101 static tree eval_subst (tree, tree, tree, tree, tree);
102 static tree pedantic_omit_one_operand (tree, tree, tree);
103 static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
104 static tree make_bit_field_ref (tree, tree, int, int, int);
105 static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree);
106 static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
107 enum machine_mode *, int *, int *,
109 static int all_ones_mask_p (tree, int);
110 static tree sign_bit_p (tree, tree);
111 static int simple_operand_p (tree);
112 static tree range_binop (enum tree_code, tree, tree, int, tree, int);
113 static tree make_range (tree, int *, tree *, tree *);
114 static tree build_range_check (tree, tree, int, tree, tree);
115 static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree,
117 static tree fold_range_test (tree);
118 static tree fold_cond_expr_with_comparison (tree, tree, tree, tree);
119 static tree unextend (tree, int, int, tree);
120 static tree fold_truthop (enum tree_code, tree, tree, tree);
121 static tree optimize_minmax_comparison (tree);
122 static tree extract_muldiv (tree, tree, enum tree_code, tree);
123 static tree extract_muldiv_1 (tree, tree, enum tree_code, tree);
124 static int multiple_of_p (tree, tree, tree);
125 static tree fold_binary_op_with_conditional_arg (enum tree_code, tree, tree,
127 static bool fold_real_zero_addition_p (tree, tree, int);
128 static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
130 static tree fold_inf_compare (enum tree_code, tree, tree, tree);
131 static tree fold_div_compare (enum tree_code, tree, tree, tree);
132 static bool reorder_operands_p (tree, tree);
133 static tree fold_negate_const (tree, tree);
134 static tree fold_not_const (tree, tree);
135 static tree fold_relational_const (enum tree_code, tree, tree, tree);
136 static tree fold_relational_hi_lo (enum tree_code *, const tree,
138 static bool tree_expr_nonzero_p (tree);
140 /* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
141 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
142 and SUM1. Then this yields nonzero if overflow occurred during the
145 Overflow occurs if A and B have the same sign, but A and SUM differ in
146 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
148 #define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
150 /* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
151 We do that by representing the two-word integer in 4 words, with only
152 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
153 number. The value of the word is LOWPART + HIGHPART * BASE. */
156 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
157 #define HIGHPART(x) \
158 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
159 #define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
161 /* Unpack a two-word integer into 4 words.
162 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
163 WORDS points to the array of HOST_WIDE_INTs. */
166 encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
168 words[0] = LOWPART (low);
169 words[1] = HIGHPART (low);
170 words[2] = LOWPART (hi);
171 words[3] = HIGHPART (hi);
174 /* Pack an array of 4 words into a two-word integer.
175 WORDS points to the array of words.
176 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
179 decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
182 *low = words[0] + words[1] * BASE;
183 *hi = words[2] + words[3] * BASE;
186 /* T is an INT_CST node. OVERFLOWABLE indicates if we are interested
187 in overflow of the value, when >0 we are only interested in signed
188 overflow, for <0 we are interested in any overflow. OVERFLOWED
189 indicates whether overflow has already occurred. CONST_OVERFLOWED
190 indicates whether constant overflow has already occurred. We force
191 T's value to be within range of T's type (by setting to 0 or 1 all
192 the bits outside the type's range). We set TREE_OVERFLOWED if,
193 OVERFLOWED is non-zero,
194 or OVERFLOWABLE is >0 and signed overflow occurs
195 or OVERFLOWABLE is <0 and any overflow occurs
196 We set TREE_CONSTANT_OVERFLOWED if,
197 CONST_OVERFLOWED is non-zero
198 or we set TREE_OVERFLOWED.
199 We return either the original T, or a copy. */
202 force_fit_type (tree t, int overflowable,
203 bool overflowed, bool overflowed_const)
205 unsigned HOST_WIDE_INT low;
208 int sign_extended_type;
210 gcc_assert (TREE_CODE (t) == INTEGER_CST);
212 low = TREE_INT_CST_LOW (t);
213 high = TREE_INT_CST_HIGH (t);
215 if (POINTER_TYPE_P (TREE_TYPE (t))
216 || TREE_CODE (TREE_TYPE (t)) == OFFSET_TYPE)
219 prec = TYPE_PRECISION (TREE_TYPE (t));
220 /* Size types *are* sign extended. */
221 sign_extended_type = (!TYPE_UNSIGNED (TREE_TYPE (t))
222 || (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE
223 && TYPE_IS_SIZETYPE (TREE_TYPE (t))));
225 /* First clear all bits that are beyond the type's precision. */
227 if (prec == 2 * HOST_BITS_PER_WIDE_INT)
229 else if (prec > HOST_BITS_PER_WIDE_INT)
230 high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
234 if (prec < HOST_BITS_PER_WIDE_INT)
235 low &= ~((HOST_WIDE_INT) (-1) << prec);
238 if (!sign_extended_type)
239 /* No sign extension */;
240 else if (prec == 2 * HOST_BITS_PER_WIDE_INT)
241 /* Correct width already. */;
242 else if (prec > HOST_BITS_PER_WIDE_INT)
244 /* Sign extend top half? */
245 if (high & ((unsigned HOST_WIDE_INT)1
246 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
247 high |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
249 else if (prec == HOST_BITS_PER_WIDE_INT)
251 if ((HOST_WIDE_INT)low < 0)
256 /* Sign extend bottom half? */
257 if (low & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
260 low |= (HOST_WIDE_INT)(-1) << prec;
264 /* If the value changed, return a new node. */
265 if (overflowed || overflowed_const
266 || low != TREE_INT_CST_LOW (t) || high != TREE_INT_CST_HIGH (t))
268 t = build_int_cst_wide (TREE_TYPE (t), low, high);
272 || (overflowable > 0 && sign_extended_type))
275 TREE_OVERFLOW (t) = 1;
276 TREE_CONSTANT_OVERFLOW (t) = 1;
278 else if (overflowed_const)
281 TREE_CONSTANT_OVERFLOW (t) = 1;
288 /* Add two doubleword integers with doubleword result.
289 Each argument is given as two `HOST_WIDE_INT' pieces.
290 One argument is L1 and H1; the other, L2 and H2.
291 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
294 add_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
295 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
296 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
298 unsigned HOST_WIDE_INT l;
302 h = h1 + h2 + (l < l1);
306 return OVERFLOW_SUM_SIGN (h1, h2, h);
309 /* Negate a doubleword integer with doubleword result.
310 Return nonzero if the operation overflows, assuming it's signed.
311 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
312 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
315 neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
316 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
322 return (*hv & h1) < 0;
332 /* Multiply two doubleword integers with doubleword result.
333 Return nonzero if the operation overflows, assuming it's signed.
334 Each argument is given as two `HOST_WIDE_INT' pieces.
335 One argument is L1 and H1; the other, L2 and H2.
336 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
339 mul_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
340 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
341 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
343 HOST_WIDE_INT arg1[4];
344 HOST_WIDE_INT arg2[4];
345 HOST_WIDE_INT prod[4 * 2];
346 unsigned HOST_WIDE_INT carry;
348 unsigned HOST_WIDE_INT toplow, neglow;
349 HOST_WIDE_INT tophigh, neghigh;
351 encode (arg1, l1, h1);
352 encode (arg2, l2, h2);
354 memset (prod, 0, sizeof prod);
356 for (i = 0; i < 4; i++)
359 for (j = 0; j < 4; j++)
362 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
363 carry += arg1[i] * arg2[j];
364 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
366 prod[k] = LOWPART (carry);
367 carry = HIGHPART (carry);
372 decode (prod, lv, hv); /* This ignores prod[4] through prod[4*2-1] */
374 /* Check for overflow by calculating the top half of the answer in full;
375 it should agree with the low half's sign bit. */
376 decode (prod + 4, &toplow, &tophigh);
379 neg_double (l2, h2, &neglow, &neghigh);
380 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
384 neg_double (l1, h1, &neglow, &neghigh);
385 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
387 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
390 /* Shift the doubleword integer in L1, H1 left by COUNT places
391 keeping only PREC bits of result.
392 Shift right if COUNT is negative.
393 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
394 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
397 lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
398 HOST_WIDE_INT count, unsigned int prec,
399 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
401 unsigned HOST_WIDE_INT signmask;
405 rshift_double (l1, h1, -count, prec, lv, hv, arith);
409 if (SHIFT_COUNT_TRUNCATED)
412 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
414 /* Shifting by the host word size is undefined according to the
415 ANSI standard, so we must handle this as a special case. */
419 else if (count >= HOST_BITS_PER_WIDE_INT)
421 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
426 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
427 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
431 /* Sign extend all bits that are beyond the precision. */
433 signmask = -((prec > HOST_BITS_PER_WIDE_INT
434 ? ((unsigned HOST_WIDE_INT) *hv
435 >> (prec - HOST_BITS_PER_WIDE_INT - 1))
436 : (*lv >> (prec - 1))) & 1);
438 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
440 else if (prec >= HOST_BITS_PER_WIDE_INT)
442 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
443 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
448 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
449 *lv |= signmask << prec;
453 /* Shift the doubleword integer in L1, H1 right by COUNT places
454 keeping only PREC bits of result. COUNT must be positive.
455 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
456 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
459 rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
460 HOST_WIDE_INT count, unsigned int prec,
461 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
464 unsigned HOST_WIDE_INT signmask;
467 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
470 if (SHIFT_COUNT_TRUNCATED)
473 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
475 /* Shifting by the host word size is undefined according to the
476 ANSI standard, so we must handle this as a special case. */
480 else if (count >= HOST_BITS_PER_WIDE_INT)
483 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
487 *hv = (unsigned HOST_WIDE_INT) h1 >> count;
489 | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
492 /* Zero / sign extend all bits that are beyond the precision. */
494 if (count >= (HOST_WIDE_INT)prec)
499 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
501 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
503 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
504 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
509 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
510 *lv |= signmask << (prec - count);
514 /* Rotate the doubleword integer in L1, H1 left by COUNT places
515 keeping only PREC bits of result.
516 Rotate right if COUNT is negative.
517 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
520 lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
521 HOST_WIDE_INT count, unsigned int prec,
522 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
524 unsigned HOST_WIDE_INT s1l, s2l;
525 HOST_WIDE_INT s1h, s2h;
531 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
532 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
537 /* Rotate the doubleword integer in L1, H1 left by COUNT places
538 keeping only PREC bits of result. COUNT must be positive.
539 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
542 rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
543 HOST_WIDE_INT count, unsigned int prec,
544 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
546 unsigned HOST_WIDE_INT s1l, s2l;
547 HOST_WIDE_INT s1h, s2h;
553 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
554 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
559 /* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
560 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
561 CODE is a tree code for a kind of division, one of
562 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
564 It controls how the quotient is rounded to an integer.
565 Return nonzero if the operation overflows.
566 UNS nonzero says do unsigned division. */
569 div_and_round_double (enum tree_code code, int uns,
570 unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
571 HOST_WIDE_INT hnum_orig,
572 unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
573 HOST_WIDE_INT hden_orig,
574 unsigned HOST_WIDE_INT *lquo,
575 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
579 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
580 HOST_WIDE_INT den[4], quo[4];
582 unsigned HOST_WIDE_INT work;
583 unsigned HOST_WIDE_INT carry = 0;
584 unsigned HOST_WIDE_INT lnum = lnum_orig;
585 HOST_WIDE_INT hnum = hnum_orig;
586 unsigned HOST_WIDE_INT lden = lden_orig;
587 HOST_WIDE_INT hden = hden_orig;
590 if (hden == 0 && lden == 0)
591 overflow = 1, lden = 1;
593 /* Calculate quotient sign and convert operands to unsigned. */
599 /* (minimum integer) / (-1) is the only overflow case. */
600 if (neg_double (lnum, hnum, &lnum, &hnum)
601 && ((HOST_WIDE_INT) lden & hden) == -1)
607 neg_double (lden, hden, &lden, &hden);
611 if (hnum == 0 && hden == 0)
612 { /* single precision */
614 /* This unsigned division rounds toward zero. */
620 { /* trivial case: dividend < divisor */
621 /* hden != 0 already checked. */
628 memset (quo, 0, sizeof quo);
630 memset (num, 0, sizeof num); /* to zero 9th element */
631 memset (den, 0, sizeof den);
633 encode (num, lnum, hnum);
634 encode (den, lden, hden);
636 /* Special code for when the divisor < BASE. */
637 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
639 /* hnum != 0 already checked. */
640 for (i = 4 - 1; i >= 0; i--)
642 work = num[i] + carry * BASE;
643 quo[i] = work / lden;
649 /* Full double precision division,
650 with thanks to Don Knuth's "Seminumerical Algorithms". */
651 int num_hi_sig, den_hi_sig;
652 unsigned HOST_WIDE_INT quo_est, scale;
654 /* Find the highest nonzero divisor digit. */
655 for (i = 4 - 1;; i--)
662 /* Insure that the first digit of the divisor is at least BASE/2.
663 This is required by the quotient digit estimation algorithm. */
665 scale = BASE / (den[den_hi_sig] + 1);
667 { /* scale divisor and dividend */
669 for (i = 0; i <= 4 - 1; i++)
671 work = (num[i] * scale) + carry;
672 num[i] = LOWPART (work);
673 carry = HIGHPART (work);
678 for (i = 0; i <= 4 - 1; i++)
680 work = (den[i] * scale) + carry;
681 den[i] = LOWPART (work);
682 carry = HIGHPART (work);
683 if (den[i] != 0) den_hi_sig = i;
690 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
692 /* Guess the next quotient digit, quo_est, by dividing the first
693 two remaining dividend digits by the high order quotient digit.
694 quo_est is never low and is at most 2 high. */
695 unsigned HOST_WIDE_INT tmp;
697 num_hi_sig = i + den_hi_sig + 1;
698 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
699 if (num[num_hi_sig] != den[den_hi_sig])
700 quo_est = work / den[den_hi_sig];
704 /* Refine quo_est so it's usually correct, and at most one high. */
705 tmp = work - quo_est * den[den_hi_sig];
707 && (den[den_hi_sig - 1] * quo_est
708 > (tmp * BASE + num[num_hi_sig - 2])))
711 /* Try QUO_EST as the quotient digit, by multiplying the
712 divisor by QUO_EST and subtracting from the remaining dividend.
713 Keep in mind that QUO_EST is the I - 1st digit. */
716 for (j = 0; j <= den_hi_sig; j++)
718 work = quo_est * den[j] + carry;
719 carry = HIGHPART (work);
720 work = num[i + j] - LOWPART (work);
721 num[i + j] = LOWPART (work);
722 carry += HIGHPART (work) != 0;
725 /* If quo_est was high by one, then num[i] went negative and
726 we need to correct things. */
727 if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
730 carry = 0; /* add divisor back in */
731 for (j = 0; j <= den_hi_sig; j++)
733 work = num[i + j] + den[j] + carry;
734 carry = HIGHPART (work);
735 num[i + j] = LOWPART (work);
738 num [num_hi_sig] += carry;
741 /* Store the quotient digit. */
746 decode (quo, lquo, hquo);
749 /* If result is negative, make it so. */
751 neg_double (*lquo, *hquo, lquo, hquo);
753 /* Compute trial remainder: rem = num - (quo * den) */
754 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
755 neg_double (*lrem, *hrem, lrem, hrem);
756 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
761 case TRUNC_MOD_EXPR: /* round toward zero */
762 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
766 case FLOOR_MOD_EXPR: /* round toward negative infinity */
767 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
770 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
778 case CEIL_MOD_EXPR: /* round toward positive infinity */
779 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
781 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
789 case ROUND_MOD_EXPR: /* round to closest integer */
791 unsigned HOST_WIDE_INT labs_rem = *lrem;
792 HOST_WIDE_INT habs_rem = *hrem;
793 unsigned HOST_WIDE_INT labs_den = lden, ltwice;
794 HOST_WIDE_INT habs_den = hden, htwice;
796 /* Get absolute values. */
798 neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
800 neg_double (lden, hden, &labs_den, &habs_den);
802 /* If (2 * abs (lrem) >= abs (lden)) */
803 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
804 labs_rem, habs_rem, <wice, &htwice);
806 if (((unsigned HOST_WIDE_INT) habs_den
807 < (unsigned HOST_WIDE_INT) htwice)
808 || (((unsigned HOST_WIDE_INT) habs_den
809 == (unsigned HOST_WIDE_INT) htwice)
810 && (labs_den < ltwice)))
814 add_double (*lquo, *hquo,
815 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
818 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
830 /* Compute true remainder: rem = num - (quo * den) */
831 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
832 neg_double (*lrem, *hrem, lrem, hrem);
833 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
837 /* Return true if built-in mathematical function specified by CODE
838 preserves the sign of it argument, i.e. -f(x) == f(-x). */
841 negate_mathfn_p (enum built_in_function code)
865 /* Check whether we may negate an integer constant T without causing
869 may_negate_without_overflow_p (tree t)
871 unsigned HOST_WIDE_INT val;
875 gcc_assert (TREE_CODE (t) == INTEGER_CST);
877 type = TREE_TYPE (t);
878 if (TYPE_UNSIGNED (type))
881 prec = TYPE_PRECISION (type);
882 if (prec > HOST_BITS_PER_WIDE_INT)
884 if (TREE_INT_CST_LOW (t) != 0)
886 prec -= HOST_BITS_PER_WIDE_INT;
887 val = TREE_INT_CST_HIGH (t);
890 val = TREE_INT_CST_LOW (t);
891 if (prec < HOST_BITS_PER_WIDE_INT)
892 val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
893 return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
896 /* Determine whether an expression T can be cheaply negated using
897 the function negate_expr. */
900 negate_expr_p (tree t)
907 type = TREE_TYPE (t);
910 switch (TREE_CODE (t))
913 if (TYPE_UNSIGNED (type) || ! flag_trapv)
916 /* Check that -CST will not overflow type. */
917 return may_negate_without_overflow_p (t);
924 return negate_expr_p (TREE_REALPART (t))
925 && negate_expr_p (TREE_IMAGPART (t));
928 if (FLOAT_TYPE_P (type) && !flag_unsafe_math_optimizations)
930 /* -(A + B) -> (-B) - A. */
931 if (negate_expr_p (TREE_OPERAND (t, 1))
932 && reorder_operands_p (TREE_OPERAND (t, 0),
933 TREE_OPERAND (t, 1)))
935 /* -(A + B) -> (-A) - B. */
936 return negate_expr_p (TREE_OPERAND (t, 0));
939 /* We can't turn -(A-B) into B-A when we honor signed zeros. */
940 return (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
941 && reorder_operands_p (TREE_OPERAND (t, 0),
942 TREE_OPERAND (t, 1));
945 if (TYPE_UNSIGNED (TREE_TYPE (t)))
951 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
952 return negate_expr_p (TREE_OPERAND (t, 1))
953 || negate_expr_p (TREE_OPERAND (t, 0));
957 /* Negate -((double)float) as (double)(-float). */
958 if (TREE_CODE (type) == REAL_TYPE)
960 tree tem = strip_float_extensions (t);
962 return negate_expr_p (tem);
967 /* Negate -f(x) as f(-x). */
968 if (negate_mathfn_p (builtin_mathfn_code (t)))
969 return negate_expr_p (TREE_VALUE (TREE_OPERAND (t, 1)));
973 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
974 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
976 tree op1 = TREE_OPERAND (t, 1);
977 if (TREE_INT_CST_HIGH (op1) == 0
978 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
979 == TREE_INT_CST_LOW (op1))
990 /* Given T, an expression, return the negation of T. Allow for T to be
991 null, in which case return null. */
1002 type = TREE_TYPE (t);
1003 STRIP_SIGN_NOPS (t);
1005 switch (TREE_CODE (t))
1008 tem = fold_negate_const (t, type);
1009 if (! TREE_OVERFLOW (tem)
1010 || TYPE_UNSIGNED (type)
1016 tem = fold_negate_const (t, type);
1017 /* Two's complement FP formats, such as c4x, may overflow. */
1018 if (! TREE_OVERFLOW (tem) || ! flag_trapping_math)
1019 return fold_convert (type, tem);
1024 tree rpart = negate_expr (TREE_REALPART (t));
1025 tree ipart = negate_expr (TREE_IMAGPART (t));
1027 if ((TREE_CODE (rpart) == REAL_CST
1028 && TREE_CODE (ipart) == REAL_CST)
1029 || (TREE_CODE (rpart) == INTEGER_CST
1030 && TREE_CODE (ipart) == INTEGER_CST))
1031 return build_complex (type, rpart, ipart);
1036 return fold_convert (type, TREE_OPERAND (t, 0));
1039 if (! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
1041 /* -(A + B) -> (-B) - A. */
1042 if (negate_expr_p (TREE_OPERAND (t, 1))
1043 && reorder_operands_p (TREE_OPERAND (t, 0),
1044 TREE_OPERAND (t, 1)))
1046 tem = negate_expr (TREE_OPERAND (t, 1));
1047 tem = fold (build2 (MINUS_EXPR, TREE_TYPE (t),
1048 tem, TREE_OPERAND (t, 0)));
1049 return fold_convert (type, tem);
1052 /* -(A + B) -> (-A) - B. */
1053 if (negate_expr_p (TREE_OPERAND (t, 0)))
1055 tem = negate_expr (TREE_OPERAND (t, 0));
1056 tem = fold (build2 (MINUS_EXPR, TREE_TYPE (t),
1057 tem, TREE_OPERAND (t, 1)));
1058 return fold_convert (type, tem);
1064 /* - (A - B) -> B - A */
1065 if ((! FLOAT_TYPE_P (type) || flag_unsafe_math_optimizations)
1066 && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)))
1067 return fold_convert (type,
1068 fold (build2 (MINUS_EXPR, TREE_TYPE (t),
1069 TREE_OPERAND (t, 1),
1070 TREE_OPERAND (t, 0))));
1074 if (TYPE_UNSIGNED (TREE_TYPE (t)))
1080 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
1082 tem = TREE_OPERAND (t, 1);
1083 if (negate_expr_p (tem))
1084 return fold_convert (type,
1085 fold (build2 (TREE_CODE (t), TREE_TYPE (t),
1086 TREE_OPERAND (t, 0),
1087 negate_expr (tem))));
1088 tem = TREE_OPERAND (t, 0);
1089 if (negate_expr_p (tem))
1090 return fold_convert (type,
1091 fold (build2 (TREE_CODE (t), TREE_TYPE (t),
1093 TREE_OPERAND (t, 1))));
1098 /* Convert -((double)float) into (double)(-float). */
1099 if (TREE_CODE (type) == REAL_TYPE)
1101 tem = strip_float_extensions (t);
1102 if (tem != t && negate_expr_p (tem))
1103 return fold_convert (type, negate_expr (tem));
1108 /* Negate -f(x) as f(-x). */
1109 if (negate_mathfn_p (builtin_mathfn_code (t))
1110 && negate_expr_p (TREE_VALUE (TREE_OPERAND (t, 1))))
1112 tree fndecl, arg, arglist;
1114 fndecl = get_callee_fndecl (t);
1115 arg = negate_expr (TREE_VALUE (TREE_OPERAND (t, 1)));
1116 arglist = build_tree_list (NULL_TREE, arg);
1117 return build_function_call_expr (fndecl, arglist);
1122 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1123 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1125 tree op1 = TREE_OPERAND (t, 1);
1126 if (TREE_INT_CST_HIGH (op1) == 0
1127 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1128 == TREE_INT_CST_LOW (op1))
1130 tree ntype = TYPE_UNSIGNED (type)
1131 ? lang_hooks.types.signed_type (type)
1132 : lang_hooks.types.unsigned_type (type);
1133 tree temp = fold_convert (ntype, TREE_OPERAND (t, 0));
1134 temp = fold (build2 (RSHIFT_EXPR, ntype, temp, op1));
1135 return fold_convert (type, temp);
1144 tem = fold (build1 (NEGATE_EXPR, TREE_TYPE (t), t));
1145 return fold_convert (type, tem);
1148 /* Split a tree IN into a constant, literal and variable parts that could be
1149 combined with CODE to make IN. "constant" means an expression with
1150 TREE_CONSTANT but that isn't an actual constant. CODE must be a
1151 commutative arithmetic operation. Store the constant part into *CONP,
1152 the literal in *LITP and return the variable part. If a part isn't
1153 present, set it to null. If the tree does not decompose in this way,
1154 return the entire tree as the variable part and the other parts as null.
1156 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
1157 case, we negate an operand that was subtracted. Except if it is a
1158 literal for which we use *MINUS_LITP instead.
1160 If NEGATE_P is true, we are negating all of IN, again except a literal
1161 for which we use *MINUS_LITP instead.
1163 If IN is itself a literal or constant, return it as appropriate.
1165 Note that we do not guarantee that any of the three values will be the
1166 same type as IN, but they will have the same signedness and mode. */
1169 split_tree (tree in, enum tree_code code, tree *conp, tree *litp,
1170 tree *minus_litp, int negate_p)
1178 /* Strip any conversions that don't change the machine mode or signedness. */
1179 STRIP_SIGN_NOPS (in);
1181 if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST)
1183 else if (TREE_CODE (in) == code
1184 || (! FLOAT_TYPE_P (TREE_TYPE (in))
1185 /* We can associate addition and subtraction together (even
1186 though the C standard doesn't say so) for integers because
1187 the value is not affected. For reals, the value might be
1188 affected, so we can't. */
1189 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1190 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1192 tree op0 = TREE_OPERAND (in, 0);
1193 tree op1 = TREE_OPERAND (in, 1);
1194 int neg1_p = TREE_CODE (in) == MINUS_EXPR;
1195 int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
1197 /* First see if either of the operands is a literal, then a constant. */
1198 if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST)
1199 *litp = op0, op0 = 0;
1200 else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST)
1201 *litp = op1, neg_litp_p = neg1_p, op1 = 0;
1203 if (op0 != 0 && TREE_CONSTANT (op0))
1204 *conp = op0, op0 = 0;
1205 else if (op1 != 0 && TREE_CONSTANT (op1))
1206 *conp = op1, neg_conp_p = neg1_p, op1 = 0;
1208 /* If we haven't dealt with either operand, this is not a case we can
1209 decompose. Otherwise, VAR is either of the ones remaining, if any. */
1210 if (op0 != 0 && op1 != 0)
1215 var = op1, neg_var_p = neg1_p;
1217 /* Now do any needed negations. */
1219 *minus_litp = *litp, *litp = 0;
1221 *conp = negate_expr (*conp);
1223 var = negate_expr (var);
1225 else if (TREE_CONSTANT (in))
1233 *minus_litp = *litp, *litp = 0;
1234 else if (*minus_litp)
1235 *litp = *minus_litp, *minus_litp = 0;
1236 *conp = negate_expr (*conp);
1237 var = negate_expr (var);
1243 /* Re-associate trees split by the above function. T1 and T2 are either
1244 expressions to associate or null. Return the new expression, if any. If
1245 we build an operation, do it in TYPE and with CODE. */
1248 associate_trees (tree t1, tree t2, enum tree_code code, tree type)
1255 /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
1256 try to fold this since we will have infinite recursion. But do
1257 deal with any NEGATE_EXPRs. */
1258 if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
1259 || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
1261 if (code == PLUS_EXPR)
1263 if (TREE_CODE (t1) == NEGATE_EXPR)
1264 return build2 (MINUS_EXPR, type, fold_convert (type, t2),
1265 fold_convert (type, TREE_OPERAND (t1, 0)));
1266 else if (TREE_CODE (t2) == NEGATE_EXPR)
1267 return build2 (MINUS_EXPR, type, fold_convert (type, t1),
1268 fold_convert (type, TREE_OPERAND (t2, 0)));
1270 return build2 (code, type, fold_convert (type, t1),
1271 fold_convert (type, t2));
1274 return fold (build2 (code, type, fold_convert (type, t1),
1275 fold_convert (type, t2)));
1278 /* Combine two integer constants ARG1 and ARG2 under operation CODE
1279 to produce a new constant.
1281 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1284 int_const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1286 unsigned HOST_WIDE_INT int1l, int2l;
1287 HOST_WIDE_INT int1h, int2h;
1288 unsigned HOST_WIDE_INT low;
1290 unsigned HOST_WIDE_INT garbagel;
1291 HOST_WIDE_INT garbageh;
1293 tree type = TREE_TYPE (arg1);
1294 int uns = TYPE_UNSIGNED (type);
1296 = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
1298 int no_overflow = 0;
1300 int1l = TREE_INT_CST_LOW (arg1);
1301 int1h = TREE_INT_CST_HIGH (arg1);
1302 int2l = TREE_INT_CST_LOW (arg2);
1303 int2h = TREE_INT_CST_HIGH (arg2);
1308 low = int1l | int2l, hi = int1h | int2h;
1312 low = int1l ^ int2l, hi = int1h ^ int2h;
1316 low = int1l & int2l, hi = int1h & int2h;
1322 /* It's unclear from the C standard whether shifts can overflow.
1323 The following code ignores overflow; perhaps a C standard
1324 interpretation ruling is needed. */
1325 lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1333 lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1338 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1342 neg_double (int2l, int2h, &low, &hi);
1343 add_double (int1l, int1h, low, hi, &low, &hi);
1344 overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
1348 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1351 case TRUNC_DIV_EXPR:
1352 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1353 case EXACT_DIV_EXPR:
1354 /* This is a shortcut for a common special case. */
1355 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1356 && ! TREE_CONSTANT_OVERFLOW (arg1)
1357 && ! TREE_CONSTANT_OVERFLOW (arg2)
1358 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1360 if (code == CEIL_DIV_EXPR)
1363 low = int1l / int2l, hi = 0;
1367 /* ... fall through ... */
1369 case ROUND_DIV_EXPR:
1370 if (int2h == 0 && int2l == 1)
1372 low = int1l, hi = int1h;
1375 if (int1l == int2l && int1h == int2h
1376 && ! (int1l == 0 && int1h == 0))
1381 overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
1382 &low, &hi, &garbagel, &garbageh);
1385 case TRUNC_MOD_EXPR:
1386 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1387 /* This is a shortcut for a common special case. */
1388 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1389 && ! TREE_CONSTANT_OVERFLOW (arg1)
1390 && ! TREE_CONSTANT_OVERFLOW (arg2)
1391 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1393 if (code == CEIL_MOD_EXPR)
1395 low = int1l % int2l, hi = 0;
1399 /* ... fall through ... */
1401 case ROUND_MOD_EXPR:
1402 overflow = div_and_round_double (code, uns,
1403 int1l, int1h, int2l, int2h,
1404 &garbagel, &garbageh, &low, &hi);
1410 low = (((unsigned HOST_WIDE_INT) int1h
1411 < (unsigned HOST_WIDE_INT) int2h)
1412 || (((unsigned HOST_WIDE_INT) int1h
1413 == (unsigned HOST_WIDE_INT) int2h)
1416 low = (int1h < int2h
1417 || (int1h == int2h && int1l < int2l));
1419 if (low == (code == MIN_EXPR))
1420 low = int1l, hi = int1h;
1422 low = int2l, hi = int2h;
1429 t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
1433 /* Propagate overflow flags ourselves. */
1434 if (((!uns || is_sizetype) && overflow)
1435 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1438 TREE_OVERFLOW (t) = 1;
1439 TREE_CONSTANT_OVERFLOW (t) = 1;
1441 else if (TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2))
1444 TREE_CONSTANT_OVERFLOW (t) = 1;
1448 t = force_fit_type (t, 1,
1449 ((!uns || is_sizetype) && overflow)
1450 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2),
1451 TREE_CONSTANT_OVERFLOW (arg1)
1452 | TREE_CONSTANT_OVERFLOW (arg2));
1457 /* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
1458 constant. We assume ARG1 and ARG2 have the same data type, or at least
1459 are the same kind of constant and the same machine mode.
1461 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1464 const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1469 if (TREE_CODE (arg1) == INTEGER_CST)
1470 return int_const_binop (code, arg1, arg2, notrunc);
1472 if (TREE_CODE (arg1) == REAL_CST)
1474 enum machine_mode mode;
1477 REAL_VALUE_TYPE value;
1480 d1 = TREE_REAL_CST (arg1);
1481 d2 = TREE_REAL_CST (arg2);
1483 type = TREE_TYPE (arg1);
1484 mode = TYPE_MODE (type);
1486 /* Don't perform operation if we honor signaling NaNs and
1487 either operand is a NaN. */
1488 if (HONOR_SNANS (mode)
1489 && (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2)))
1492 /* Don't perform operation if it would raise a division
1493 by zero exception. */
1494 if (code == RDIV_EXPR
1495 && REAL_VALUES_EQUAL (d2, dconst0)
1496 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1499 /* If either operand is a NaN, just return it. Otherwise, set up
1500 for floating-point trap; we return an overflow. */
1501 if (REAL_VALUE_ISNAN (d1))
1503 else if (REAL_VALUE_ISNAN (d2))
1506 REAL_ARITHMETIC (value, code, d1, d2);
1508 t = build_real (type, real_value_truncate (mode, value));
1510 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1511 TREE_CONSTANT_OVERFLOW (t)
1513 | TREE_CONSTANT_OVERFLOW (arg1)
1514 | TREE_CONSTANT_OVERFLOW (arg2);
1517 if (TREE_CODE (arg1) == COMPLEX_CST)
1519 tree type = TREE_TYPE (arg1);
1520 tree r1 = TREE_REALPART (arg1);
1521 tree i1 = TREE_IMAGPART (arg1);
1522 tree r2 = TREE_REALPART (arg2);
1523 tree i2 = TREE_IMAGPART (arg2);
1529 t = build_complex (type,
1530 const_binop (PLUS_EXPR, r1, r2, notrunc),
1531 const_binop (PLUS_EXPR, i1, i2, notrunc));
1535 t = build_complex (type,
1536 const_binop (MINUS_EXPR, r1, r2, notrunc),
1537 const_binop (MINUS_EXPR, i1, i2, notrunc));
1541 t = build_complex (type,
1542 const_binop (MINUS_EXPR,
1543 const_binop (MULT_EXPR,
1545 const_binop (MULT_EXPR,
1548 const_binop (PLUS_EXPR,
1549 const_binop (MULT_EXPR,
1551 const_binop (MULT_EXPR,
1559 = const_binop (PLUS_EXPR,
1560 const_binop (MULT_EXPR, r2, r2, notrunc),
1561 const_binop (MULT_EXPR, i2, i2, notrunc),
1564 t = build_complex (type,
1566 (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1567 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1568 const_binop (PLUS_EXPR,
1569 const_binop (MULT_EXPR, r1, r2,
1571 const_binop (MULT_EXPR, i1, i2,
1574 magsquared, notrunc),
1576 (INTEGRAL_TYPE_P (TREE_TYPE (r1))
1577 ? TRUNC_DIV_EXPR : RDIV_EXPR,
1578 const_binop (MINUS_EXPR,
1579 const_binop (MULT_EXPR, i1, r2,
1581 const_binop (MULT_EXPR, r1, i2,
1584 magsquared, notrunc));
1596 /* Create a size type INT_CST node with NUMBER sign extended. KIND
1597 indicates which particular sizetype to create. */
1600 size_int_kind (HOST_WIDE_INT number, enum size_type_kind kind)
1602 return build_int_cst (sizetype_tab[(int) kind], number);
1605 /* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
1606 is a tree code. The type of the result is taken from the operands.
1607 Both must be the same type integer type and it must be a size type.
1608 If the operands are constant, so is the result. */
1611 size_binop (enum tree_code code, tree arg0, tree arg1)
1613 tree type = TREE_TYPE (arg0);
1615 gcc_assert (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
1616 && type == TREE_TYPE (arg1));
1618 /* Handle the special case of two integer constants faster. */
1619 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
1621 /* And some specific cases even faster than that. */
1622 if (code == PLUS_EXPR && integer_zerop (arg0))
1624 else if ((code == MINUS_EXPR || code == PLUS_EXPR)
1625 && integer_zerop (arg1))
1627 else if (code == MULT_EXPR && integer_onep (arg0))
1630 /* Handle general case of two integer constants. */
1631 return int_const_binop (code, arg0, arg1, 0);
1634 if (arg0 == error_mark_node || arg1 == error_mark_node)
1635 return error_mark_node;
1637 return fold (build2 (code, type, arg0, arg1));
1640 /* Given two values, either both of sizetype or both of bitsizetype,
1641 compute the difference between the two values. Return the value
1642 in signed type corresponding to the type of the operands. */
1645 size_diffop (tree arg0, tree arg1)
1647 tree type = TREE_TYPE (arg0);
1650 gcc_assert (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)
1651 && type == TREE_TYPE (arg1));
1653 /* If the type is already signed, just do the simple thing. */
1654 if (!TYPE_UNSIGNED (type))
1655 return size_binop (MINUS_EXPR, arg0, arg1);
1657 ctype = type == bitsizetype ? sbitsizetype : ssizetype;
1659 /* If either operand is not a constant, do the conversions to the signed
1660 type and subtract. The hardware will do the right thing with any
1661 overflow in the subtraction. */
1662 if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST)
1663 return size_binop (MINUS_EXPR, fold_convert (ctype, arg0),
1664 fold_convert (ctype, arg1));
1666 /* If ARG0 is larger than ARG1, subtract and return the result in CTYPE.
1667 Otherwise, subtract the other way, convert to CTYPE (we know that can't
1668 overflow) and negate (which can't either). Special-case a result
1669 of zero while we're here. */
1670 if (tree_int_cst_equal (arg0, arg1))
1671 return fold_convert (ctype, integer_zero_node);
1672 else if (tree_int_cst_lt (arg1, arg0))
1673 return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1));
1675 return size_binop (MINUS_EXPR, fold_convert (ctype, integer_zero_node),
1676 fold_convert (ctype, size_binop (MINUS_EXPR,
1681 /* Attempt to fold type conversion operation CODE of expression ARG1 to
1682 type TYPE. If no simplification can be done return NULL_TREE. */
1685 fold_convert_const (enum tree_code code, tree type, tree arg1)
1690 if (TREE_TYPE (arg1) == type)
1693 if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type))
1695 if (TREE_CODE (arg1) == INTEGER_CST)
1697 /* If we would build a constant wider than GCC supports,
1698 leave the conversion unfolded. */
1699 if (TYPE_PRECISION (type) > 2 * HOST_BITS_PER_WIDE_INT)
1702 /* Given an integer constant, make new constant with new type,
1703 appropriately sign-extended or truncated. */
1704 t = build_int_cst_wide (type, TREE_INT_CST_LOW (arg1),
1705 TREE_INT_CST_HIGH (arg1));
1707 t = force_fit_type (t,
1708 /* Don't set the overflow when
1709 converting a pointer */
1710 !POINTER_TYPE_P (TREE_TYPE (arg1)),
1711 (TREE_INT_CST_HIGH (arg1) < 0
1712 && (TYPE_UNSIGNED (type)
1713 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
1714 | TREE_OVERFLOW (arg1),
1715 TREE_CONSTANT_OVERFLOW (arg1));
1718 else if (TREE_CODE (arg1) == REAL_CST)
1720 /* The following code implements the floating point to integer
1721 conversion rules required by the Java Language Specification,
1722 that IEEE NaNs are mapped to zero and values that overflow
1723 the target precision saturate, i.e. values greater than
1724 INT_MAX are mapped to INT_MAX, and values less than INT_MIN
1725 are mapped to INT_MIN. These semantics are allowed by the
1726 C and C++ standards that simply state that the behavior of
1727 FP-to-integer conversion is unspecified upon overflow. */
1729 HOST_WIDE_INT high, low;
1731 REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
1735 case FIX_TRUNC_EXPR:
1736 real_trunc (&r, VOIDmode, &x);
1740 real_ceil (&r, VOIDmode, &x);
1743 case FIX_FLOOR_EXPR:
1744 real_floor (&r, VOIDmode, &x);
1747 case FIX_ROUND_EXPR:
1748 real_round (&r, VOIDmode, &x);
1755 /* If R is NaN, return zero and show we have an overflow. */
1756 if (REAL_VALUE_ISNAN (r))
1763 /* See if R is less than the lower bound or greater than the
1768 tree lt = TYPE_MIN_VALUE (type);
1769 REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
1770 if (REAL_VALUES_LESS (r, l))
1773 high = TREE_INT_CST_HIGH (lt);
1774 low = TREE_INT_CST_LOW (lt);
1780 tree ut = TYPE_MAX_VALUE (type);
1783 REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
1784 if (REAL_VALUES_LESS (u, r))
1787 high = TREE_INT_CST_HIGH (ut);
1788 low = TREE_INT_CST_LOW (ut);
1794 REAL_VALUE_TO_INT (&low, &high, r);
1796 t = build_int_cst_wide (type, low, high);
1798 t = force_fit_type (t, -1, overflow | TREE_OVERFLOW (arg1),
1799 TREE_CONSTANT_OVERFLOW (arg1));
1803 else if (TREE_CODE (type) == REAL_TYPE)
1805 if (TREE_CODE (arg1) == INTEGER_CST)
1806 return build_real_from_int_cst (type, arg1);
1807 if (TREE_CODE (arg1) == REAL_CST)
1809 if (REAL_VALUE_ISNAN (TREE_REAL_CST (arg1)))
1811 /* We make a copy of ARG1 so that we don't modify an
1812 existing constant tree. */
1813 t = copy_node (arg1);
1814 TREE_TYPE (t) = type;
1818 t = build_real (type,
1819 real_value_truncate (TYPE_MODE (type),
1820 TREE_REAL_CST (arg1)));
1822 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
1823 TREE_CONSTANT_OVERFLOW (t)
1824 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
1831 /* Convert expression ARG to type TYPE. Used by the middle-end for
1832 simple conversions in preference to calling the front-end's convert. */
1835 fold_convert (tree type, tree arg)
1837 tree orig = TREE_TYPE (arg);
1843 if (TREE_CODE (arg) == ERROR_MARK
1844 || TREE_CODE (type) == ERROR_MARK
1845 || TREE_CODE (orig) == ERROR_MARK)
1846 return error_mark_node;
1848 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig)
1849 || lang_hooks.types_compatible_p (TYPE_MAIN_VARIANT (type),
1850 TYPE_MAIN_VARIANT (orig)))
1851 return fold (build1 (NOP_EXPR, type, arg));
1853 switch (TREE_CODE (type))
1855 case INTEGER_TYPE: case CHAR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
1856 case POINTER_TYPE: case REFERENCE_TYPE:
1858 if (TREE_CODE (arg) == INTEGER_CST)
1860 tem = fold_convert_const (NOP_EXPR, type, arg);
1861 if (tem != NULL_TREE)
1864 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
1865 || TREE_CODE (orig) == OFFSET_TYPE)
1866 return fold (build1 (NOP_EXPR, type, arg));
1867 if (TREE_CODE (orig) == COMPLEX_TYPE)
1869 tem = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg));
1870 return fold_convert (type, tem);
1872 gcc_assert (TREE_CODE (orig) == VECTOR_TYPE
1873 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
1874 return fold (build1 (NOP_EXPR, type, arg));
1877 if (TREE_CODE (arg) == INTEGER_CST)
1879 tem = fold_convert_const (FLOAT_EXPR, type, arg);
1880 if (tem != NULL_TREE)
1883 else if (TREE_CODE (arg) == REAL_CST)
1885 tem = fold_convert_const (NOP_EXPR, type, arg);
1886 if (tem != NULL_TREE)
1890 switch (TREE_CODE (orig))
1892 case INTEGER_TYPE: case CHAR_TYPE:
1893 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
1894 case POINTER_TYPE: case REFERENCE_TYPE:
1895 return fold (build1 (FLOAT_EXPR, type, arg));
1898 return fold (build1 (flag_float_store ? CONVERT_EXPR : NOP_EXPR,
1902 tem = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg));
1903 return fold_convert (type, tem);
1910 switch (TREE_CODE (orig))
1912 case INTEGER_TYPE: case CHAR_TYPE:
1913 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
1914 case POINTER_TYPE: case REFERENCE_TYPE:
1916 return build2 (COMPLEX_EXPR, type,
1917 fold_convert (TREE_TYPE (type), arg),
1918 fold_convert (TREE_TYPE (type), integer_zero_node));
1923 if (TREE_CODE (arg) == COMPLEX_EXPR)
1925 rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0));
1926 ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1));
1927 return fold (build2 (COMPLEX_EXPR, type, rpart, ipart));
1930 arg = save_expr (arg);
1931 rpart = fold (build1 (REALPART_EXPR, TREE_TYPE (orig), arg));
1932 ipart = fold (build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg));
1933 rpart = fold_convert (TREE_TYPE (type), rpart);
1934 ipart = fold_convert (TREE_TYPE (type), ipart);
1935 return fold (build2 (COMPLEX_EXPR, type, rpart, ipart));
1943 gcc_assert (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
1944 gcc_assert (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
1945 || TREE_CODE (orig) == VECTOR_TYPE);
1946 return fold (build1 (NOP_EXPR, type, arg));
1949 return fold (build1 (CONVERT_EXPR, type, fold_ignored_result (arg)));
1956 /* Return an expr equal to X but certainly not valid as an lvalue. */
1961 /* We only need to wrap lvalue tree codes. */
1962 switch (TREE_CODE (x))
1974 case ARRAY_RANGE_REF:
1980 case PREINCREMENT_EXPR:
1981 case PREDECREMENT_EXPR:
1983 case TRY_CATCH_EXPR:
1984 case WITH_CLEANUP_EXPR:
1995 /* Assume the worst for front-end tree codes. */
1996 if ((int)TREE_CODE (x) >= NUM_TREE_CODES)
2000 return build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
2003 /* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
2004 Zero means allow extended lvalues. */
2006 int pedantic_lvalues;
2008 /* When pedantic, return an expr equal to X but certainly not valid as a
2009 pedantic lvalue. Otherwise, return X. */
2012 pedantic_non_lvalue (tree x)
2014 if (pedantic_lvalues)
2015 return non_lvalue (x);
2020 /* Given a tree comparison code, return the code that is the logical inverse
2021 of the given code. It is not safe to do this for floating-point
2022 comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode
2023 as well: if reversing the comparison is unsafe, return ERROR_MARK. */
2025 static enum tree_code
2026 invert_tree_comparison (enum tree_code code, bool honor_nans)
2028 if (honor_nans && flag_trapping_math)
2038 return honor_nans ? UNLE_EXPR : LE_EXPR;
2040 return honor_nans ? UNLT_EXPR : LT_EXPR;
2042 return honor_nans ? UNGE_EXPR : GE_EXPR;
2044 return honor_nans ? UNGT_EXPR : GT_EXPR;
2058 return UNORDERED_EXPR;
2059 case UNORDERED_EXPR:
2060 return ORDERED_EXPR;
2066 /* Similar, but return the comparison that results if the operands are
2067 swapped. This is safe for floating-point. */
2070 swap_tree_comparison (enum tree_code code)
2091 /* Convert a comparison tree code from an enum tree_code representation
2092 into a compcode bit-based encoding. This function is the inverse of
2093 compcode_to_comparison. */
2095 static enum comparison_code
2096 comparison_to_compcode (enum tree_code code)
2113 return COMPCODE_ORD;
2114 case UNORDERED_EXPR:
2115 return COMPCODE_UNORD;
2117 return COMPCODE_UNLT;
2119 return COMPCODE_UNEQ;
2121 return COMPCODE_UNLE;
2123 return COMPCODE_UNGT;
2125 return COMPCODE_LTGT;
2127 return COMPCODE_UNGE;
2133 /* Convert a compcode bit-based encoding of a comparison operator back
2134 to GCC's enum tree_code representation. This function is the
2135 inverse of comparison_to_compcode. */
2137 static enum tree_code
2138 compcode_to_comparison (enum comparison_code code)
2155 return ORDERED_EXPR;
2156 case COMPCODE_UNORD:
2157 return UNORDERED_EXPR;
2175 /* Return a tree for the comparison which is the combination of
2176 doing the AND or OR (depending on CODE) of the two operations LCODE
2177 and RCODE on the identical operands LL_ARG and LR_ARG. Take into account
2178 the possibility of trapping if the mode has NaNs, and return NULL_TREE
2179 if this makes the transformation invalid. */
2182 combine_comparisons (enum tree_code code, enum tree_code lcode,
2183 enum tree_code rcode, tree truth_type,
2184 tree ll_arg, tree lr_arg)
2186 bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg)));
2187 enum comparison_code lcompcode = comparison_to_compcode (lcode);
2188 enum comparison_code rcompcode = comparison_to_compcode (rcode);
2189 enum comparison_code compcode;
2193 case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR:
2194 compcode = lcompcode & rcompcode;
2197 case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR:
2198 compcode = lcompcode | rcompcode;
2207 /* Eliminate unordered comparisons, as well as LTGT and ORD
2208 which are not used unless the mode has NaNs. */
2209 compcode &= ~COMPCODE_UNORD;
2210 if (compcode == COMPCODE_LTGT)
2211 compcode = COMPCODE_NE;
2212 else if (compcode == COMPCODE_ORD)
2213 compcode = COMPCODE_TRUE;
2215 else if (flag_trapping_math)
2217 /* Check that the original operation and the optimized ones will trap
2218 under the same condition. */
2219 bool ltrap = (lcompcode & COMPCODE_UNORD) == 0
2220 && (lcompcode != COMPCODE_EQ)
2221 && (lcompcode != COMPCODE_ORD);
2222 bool rtrap = (rcompcode & COMPCODE_UNORD) == 0
2223 && (rcompcode != COMPCODE_EQ)
2224 && (rcompcode != COMPCODE_ORD);
2225 bool trap = (compcode & COMPCODE_UNORD) == 0
2226 && (compcode != COMPCODE_EQ)
2227 && (compcode != COMPCODE_ORD);
2229 /* In a short-circuited boolean expression the LHS might be
2230 such that the RHS, if evaluated, will never trap. For
2231 example, in ORD (x, y) && (x < y), we evaluate the RHS only
2232 if neither x nor y is NaN. (This is a mixed blessing: for
2233 example, the expression above will never trap, hence
2234 optimizing it to x < y would be invalid). */
2235 if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD))
2236 || (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD)))
2239 /* If the comparison was short-circuited, and only the RHS
2240 trapped, we may now generate a spurious trap. */
2242 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2245 /* If we changed the conditions that cause a trap, we lose. */
2246 if ((ltrap || rtrap) != trap)
2250 if (compcode == COMPCODE_TRUE)
2251 return constant_boolean_node (true, truth_type);
2252 else if (compcode == COMPCODE_FALSE)
2253 return constant_boolean_node (false, truth_type);
2255 return fold (build2 (compcode_to_comparison (compcode),
2256 truth_type, ll_arg, lr_arg));
2259 /* Return nonzero if CODE is a tree code that represents a truth value. */
2262 truth_value_p (enum tree_code code)
2264 return (TREE_CODE_CLASS (code) == '<'
2265 || code == TRUTH_AND_EXPR || code == TRUTH_ANDIF_EXPR
2266 || code == TRUTH_OR_EXPR || code == TRUTH_ORIF_EXPR
2267 || code == TRUTH_XOR_EXPR || code == TRUTH_NOT_EXPR);
2270 /* Return nonzero if two operands (typically of the same tree node)
2271 are necessarily equal. If either argument has side-effects this
2272 function returns zero. FLAGS modifies behavior as follows:
2274 If OEP_ONLY_CONST is set, only return nonzero for constants.
2275 This function tests whether the operands are indistinguishable;
2276 it does not test whether they are equal using C's == operation.
2277 The distinction is important for IEEE floating point, because
2278 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
2279 (2) two NaNs may be indistinguishable, but NaN!=NaN.
2281 If OEP_ONLY_CONST is unset, a VAR_DECL is considered equal to itself
2282 even though it may hold multiple values during a function.
2283 This is because a GCC tree node guarantees that nothing else is
2284 executed between the evaluation of its "operands" (which may often
2285 be evaluated in arbitrary order). Hence if the operands themselves
2286 don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the
2287 same value in each operand/subexpression. Hence leaving OEP_ONLY_CONST
2288 unset means assuming isochronic (or instantaneous) tree equivalence.
2289 Unless comparing arbitrary expression trees, such as from different
2290 statements, this flag can usually be left unset.
2292 If OEP_PURE_SAME is set, then pure functions with identical arguments
2293 are considered the same. It is used when the caller has other ways
2294 to ensure that global memory is unchanged in between. */
2297 operand_equal_p (tree arg0, tree arg1, unsigned int flags)
2299 /* If one is specified and the other isn't, they aren't equal and if
2300 neither is specified, they are.
2302 ??? This is temporary and is meant only to handle the cases of the
2303 optional operands for COMPONENT_REF and ARRAY_REF. */
2304 if ((arg0 && !arg1) || (!arg0 && arg1))
2306 else if (!arg0 && !arg1)
2308 /* If either is ERROR_MARK, they aren't equal. */
2309 else if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK)
2312 /* If both types don't have the same signedness, then we can't consider
2313 them equal. We must check this before the STRIP_NOPS calls
2314 because they may change the signedness of the arguments. */
2315 if (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1)))
2321 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2322 /* This is needed for conversions and for COMPONENT_REF.
2323 Might as well play it safe and always test this. */
2324 || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
2325 || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
2326 || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
2329 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
2330 We don't care about side effects in that case because the SAVE_EXPR
2331 takes care of that for us. In all other cases, two expressions are
2332 equal if they have no side effects. If we have two identical
2333 expressions with side effects that should be treated the same due
2334 to the only side effects being identical SAVE_EXPR's, that will
2335 be detected in the recursive calls below. */
2336 if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST)
2337 && (TREE_CODE (arg0) == SAVE_EXPR
2338 || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
2341 /* Next handle constant cases, those for which we can return 1 even
2342 if ONLY_CONST is set. */
2343 if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
2344 switch (TREE_CODE (arg0))
2347 return (! TREE_CONSTANT_OVERFLOW (arg0)
2348 && ! TREE_CONSTANT_OVERFLOW (arg1)
2349 && tree_int_cst_equal (arg0, arg1));
2352 return (! TREE_CONSTANT_OVERFLOW (arg0)
2353 && ! TREE_CONSTANT_OVERFLOW (arg1)
2354 && REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
2355 TREE_REAL_CST (arg1)));
2361 if (TREE_CONSTANT_OVERFLOW (arg0)
2362 || TREE_CONSTANT_OVERFLOW (arg1))
2365 v1 = TREE_VECTOR_CST_ELTS (arg0);
2366 v2 = TREE_VECTOR_CST_ELTS (arg1);
2369 if (!operand_equal_p (TREE_VALUE (v1), TREE_VALUE (v2),
2372 v1 = TREE_CHAIN (v1);
2373 v2 = TREE_CHAIN (v2);
2380 return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
2382 && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
2386 return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
2387 && ! memcmp (TREE_STRING_POINTER (arg0),
2388 TREE_STRING_POINTER (arg1),
2389 TREE_STRING_LENGTH (arg0)));
2392 return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
2398 if (flags & OEP_ONLY_CONST)
2401 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
2404 /* Two conversions are equal only if signedness and modes match. */
2405 switch (TREE_CODE (arg0))
2410 case FIX_TRUNC_EXPR:
2411 case FIX_FLOOR_EXPR:
2412 case FIX_ROUND_EXPR:
2413 if (TYPE_UNSIGNED (TREE_TYPE (arg0))
2414 != TYPE_UNSIGNED (TREE_TYPE (arg1)))
2421 return operand_equal_p (TREE_OPERAND (arg0, 0),
2422 TREE_OPERAND (arg1, 0), flags);
2426 if (operand_equal_p (TREE_OPERAND (arg0, 0),
2427 TREE_OPERAND (arg1, 0), flags)
2428 && operand_equal_p (TREE_OPERAND (arg0, 1),
2429 TREE_OPERAND (arg1, 1), flags))
2432 /* For commutative ops, allow the other order. */
2433 return (commutative_tree_code (TREE_CODE (arg0))
2434 && operand_equal_p (TREE_OPERAND (arg0, 0),
2435 TREE_OPERAND (arg1, 1), flags)
2436 && operand_equal_p (TREE_OPERAND (arg0, 1),
2437 TREE_OPERAND (arg1, 0), flags));
2440 /* If either of the pointer (or reference) expressions we are
2441 dereferencing contain a side effect, these cannot be equal. */
2442 if (TREE_SIDE_EFFECTS (arg0)
2443 || TREE_SIDE_EFFECTS (arg1))
2446 switch (TREE_CODE (arg0))
2451 return operand_equal_p (TREE_OPERAND (arg0, 0),
2452 TREE_OPERAND (arg1, 0), flags);
2455 case ARRAY_RANGE_REF:
2456 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2457 TREE_OPERAND (arg1, 0), flags)
2458 && operand_equal_p (TREE_OPERAND (arg0, 1),
2459 TREE_OPERAND (arg1, 1), flags)
2460 && operand_equal_p (TREE_OPERAND (arg0, 2),
2461 TREE_OPERAND (arg1, 2), flags)
2462 && operand_equal_p (TREE_OPERAND (arg0, 3),
2463 TREE_OPERAND (arg1, 3), flags));
2467 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2468 TREE_OPERAND (arg1, 0), flags)
2469 && operand_equal_p (TREE_OPERAND (arg0, 1),
2470 TREE_OPERAND (arg1, 1), flags)
2471 && operand_equal_p (TREE_OPERAND (arg0, 2),
2472 TREE_OPERAND (arg1, 2), flags));
2476 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2477 TREE_OPERAND (arg1, 0), flags)
2478 && operand_equal_p (TREE_OPERAND (arg0, 1),
2479 TREE_OPERAND (arg1, 1), flags)
2480 && operand_equal_p (TREE_OPERAND (arg0, 2),
2481 TREE_OPERAND (arg1, 2), flags));
2487 switch (TREE_CODE (arg0))
2490 case TRUTH_NOT_EXPR:
2491 return operand_equal_p (TREE_OPERAND (arg0, 0),
2492 TREE_OPERAND (arg1, 0), flags);
2494 case TRUTH_ANDIF_EXPR:
2495 case TRUTH_ORIF_EXPR:
2496 return operand_equal_p (TREE_OPERAND (arg0, 0),
2497 TREE_OPERAND (arg1, 0), flags)
2498 && operand_equal_p (TREE_OPERAND (arg0, 1),
2499 TREE_OPERAND (arg1, 1), flags);
2501 case TRUTH_AND_EXPR:
2503 case TRUTH_XOR_EXPR:
2504 return (operand_equal_p (TREE_OPERAND (arg0, 0),
2505 TREE_OPERAND (arg1, 0), flags)
2506 && operand_equal_p (TREE_OPERAND (arg0, 1),
2507 TREE_OPERAND (arg1, 1), flags))
2508 || (operand_equal_p (TREE_OPERAND (arg0, 0),
2509 TREE_OPERAND (arg1, 1), flags)
2510 && operand_equal_p (TREE_OPERAND (arg0, 1),
2511 TREE_OPERAND (arg1, 0), flags));
2514 /* If the CALL_EXPRs call different functions, then they
2515 clearly can not be equal. */
2516 if (! operand_equal_p (TREE_OPERAND (arg0, 0),
2517 TREE_OPERAND (arg1, 0), flags))
2521 unsigned int cef = call_expr_flags (arg0);
2522 if (flags & OEP_PURE_SAME)
2523 cef &= ECF_CONST | ECF_PURE;
2530 /* Now see if all the arguments are the same. operand_equal_p
2531 does not handle TREE_LIST, so we walk the operands here
2532 feeding them to operand_equal_p. */
2533 arg0 = TREE_OPERAND (arg0, 1);
2534 arg1 = TREE_OPERAND (arg1, 1);
2535 while (arg0 && arg1)
2537 if (! operand_equal_p (TREE_VALUE (arg0), TREE_VALUE (arg1),
2541 arg0 = TREE_CHAIN (arg0);
2542 arg1 = TREE_CHAIN (arg1);
2545 /* If we get here and both argument lists are exhausted
2546 then the CALL_EXPRs are equal. */
2547 return ! (arg0 || arg1);
2554 /* Consider __builtin_sqrt equal to sqrt. */
2555 return (TREE_CODE (arg0) == FUNCTION_DECL
2556 && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1)
2557 && DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1)
2558 && DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1));
2565 /* Similar to operand_equal_p, but see if ARG0 might have been made by
2566 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
2568 When in doubt, return 0. */
2571 operand_equal_for_comparison_p (tree arg0, tree arg1, tree other)
2573 int unsignedp1, unsignedpo;
2574 tree primarg0, primarg1, primother;
2575 unsigned int correct_width;
2577 if (operand_equal_p (arg0, arg1, 0))
2580 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
2581 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
2584 /* Discard any conversions that don't change the modes of ARG0 and ARG1
2585 and see if the inner values are the same. This removes any
2586 signedness comparison, which doesn't matter here. */
2587 primarg0 = arg0, primarg1 = arg1;
2588 STRIP_NOPS (primarg0);
2589 STRIP_NOPS (primarg1);
2590 if (operand_equal_p (primarg0, primarg1, 0))
2593 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
2594 actual comparison operand, ARG0.
2596 First throw away any conversions to wider types
2597 already present in the operands. */
2599 primarg1 = get_narrower (arg1, &unsignedp1);
2600 primother = get_narrower (other, &unsignedpo);
2602 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
2603 if (unsignedp1 == unsignedpo
2604 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
2605 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
2607 tree type = TREE_TYPE (arg0);
2609 /* Make sure shorter operand is extended the right way
2610 to match the longer operand. */
2611 primarg1 = fold_convert (lang_hooks.types.signed_or_unsigned_type
2612 (unsignedp1, TREE_TYPE (primarg1)), primarg1);
2614 if (operand_equal_p (arg0, fold_convert (type, primarg1), 0))
2621 /* See if ARG is an expression that is either a comparison or is performing
2622 arithmetic on comparisons. The comparisons must only be comparing
2623 two different values, which will be stored in *CVAL1 and *CVAL2; if
2624 they are nonzero it means that some operands have already been found.
2625 No variables may be used anywhere else in the expression except in the
2626 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
2627 the expression and save_expr needs to be called with CVAL1 and CVAL2.
2629 If this is true, return 1. Otherwise, return zero. */
2632 twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p)
2634 enum tree_code code = TREE_CODE (arg);
2635 char class = TREE_CODE_CLASS (code);
2637 /* We can handle some of the 'e' cases here. */
2638 if (class == 'e' && code == TRUTH_NOT_EXPR)
2640 else if (class == 'e'
2641 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
2642 || code == COMPOUND_EXPR))
2645 else if (class == 'e' && code == SAVE_EXPR
2646 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
2648 /* If we've already found a CVAL1 or CVAL2, this expression is
2649 two complex to handle. */
2650 if (*cval1 || *cval2)
2660 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
2663 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
2664 && twoval_comparison_p (TREE_OPERAND (arg, 1),
2665 cval1, cval2, save_p));
2671 if (code == COND_EXPR)
2672 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
2673 cval1, cval2, save_p)
2674 && twoval_comparison_p (TREE_OPERAND (arg, 1),
2675 cval1, cval2, save_p)
2676 && twoval_comparison_p (TREE_OPERAND (arg, 2),
2677 cval1, cval2, save_p));
2681 /* First see if we can handle the first operand, then the second. For
2682 the second operand, we know *CVAL1 can't be zero. It must be that
2683 one side of the comparison is each of the values; test for the
2684 case where this isn't true by failing if the two operands
2687 if (operand_equal_p (TREE_OPERAND (arg, 0),
2688 TREE_OPERAND (arg, 1), 0))
2692 *cval1 = TREE_OPERAND (arg, 0);
2693 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
2695 else if (*cval2 == 0)
2696 *cval2 = TREE_OPERAND (arg, 0);
2697 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
2702 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
2704 else if (*cval2 == 0)
2705 *cval2 = TREE_OPERAND (arg, 1);
2706 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
2718 /* ARG is a tree that is known to contain just arithmetic operations and
2719 comparisons. Evaluate the operations in the tree substituting NEW0 for
2720 any occurrence of OLD0 as an operand of a comparison and likewise for
2724 eval_subst (tree arg, tree old0, tree new0, tree old1, tree new1)
2726 tree type = TREE_TYPE (arg);
2727 enum tree_code code = TREE_CODE (arg);
2728 char class = TREE_CODE_CLASS (code);
2730 /* We can handle some of the 'e' cases here. */
2731 if (class == 'e' && code == TRUTH_NOT_EXPR)
2733 else if (class == 'e'
2734 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2740 return fold (build1 (code, type,
2741 eval_subst (TREE_OPERAND (arg, 0),
2742 old0, new0, old1, new1)));
2745 return fold (build2 (code, type,
2746 eval_subst (TREE_OPERAND (arg, 0),
2747 old0, new0, old1, new1),
2748 eval_subst (TREE_OPERAND (arg, 1),
2749 old0, new0, old1, new1)));
2755 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
2758 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
2761 return fold (build3 (code, type,
2762 eval_subst (TREE_OPERAND (arg, 0),
2763 old0, new0, old1, new1),
2764 eval_subst (TREE_OPERAND (arg, 1),
2765 old0, new0, old1, new1),
2766 eval_subst (TREE_OPERAND (arg, 2),
2767 old0, new0, old1, new1)));
2771 /* Fall through - ??? */
2775 tree arg0 = TREE_OPERAND (arg, 0);
2776 tree arg1 = TREE_OPERAND (arg, 1);
2778 /* We need to check both for exact equality and tree equality. The
2779 former will be true if the operand has a side-effect. In that
2780 case, we know the operand occurred exactly once. */
2782 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
2784 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
2787 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
2789 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
2792 return fold (build2 (code, type, arg0, arg1));
2800 /* Return a tree for the case when the result of an expression is RESULT
2801 converted to TYPE and OMITTED was previously an operand of the expression
2802 but is now not needed (e.g., we folded OMITTED * 0).
2804 If OMITTED has side effects, we must evaluate it. Otherwise, just do
2805 the conversion of RESULT to TYPE. */
2808 omit_one_operand (tree type, tree result, tree omitted)
2810 tree t = fold_convert (type, result);
2812 if (TREE_SIDE_EFFECTS (omitted))
2813 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
2815 return non_lvalue (t);
2818 /* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
2821 pedantic_omit_one_operand (tree type, tree result, tree omitted)
2823 tree t = fold_convert (type, result);
2825 if (TREE_SIDE_EFFECTS (omitted))
2826 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
2828 return pedantic_non_lvalue (t);
2831 /* Return a tree for the case when the result of an expression is RESULT
2832 converted to TYPE and OMITTED1 and OMITTED2 were previously operands
2833 of the expression but are now not needed.
2835 If OMITTED1 or OMITTED2 has side effects, they must be evaluated.
2836 If both OMITTED1 and OMITTED2 have side effects, OMITTED1 is
2837 evaluated before OMITTED2. Otherwise, if neither has side effects,
2838 just do the conversion of RESULT to TYPE. */
2841 omit_two_operands (tree type, tree result, tree omitted1, tree omitted2)
2843 tree t = fold_convert (type, result);
2845 if (TREE_SIDE_EFFECTS (omitted2))
2846 t = build2 (COMPOUND_EXPR, type, omitted2, t);
2847 if (TREE_SIDE_EFFECTS (omitted1))
2848 t = build2 (COMPOUND_EXPR, type, omitted1, t);
2850 return TREE_CODE (t) != COMPOUND_EXPR ? non_lvalue (t) : t;
2854 /* Return a simplified tree node for the truth-negation of ARG. This
2855 never alters ARG itself. We assume that ARG is an operation that
2856 returns a truth value (0 or 1).
2858 FIXME: one would think we would fold the result, but it causes
2859 problems with the dominator optimizer. */
2861 invert_truthvalue (tree arg)
2863 tree type = TREE_TYPE (arg);
2864 enum tree_code code = TREE_CODE (arg);
2866 if (code == ERROR_MARK)
2869 /* If this is a comparison, we can simply invert it, except for
2870 floating-point non-equality comparisons, in which case we just
2871 enclose a TRUTH_NOT_EXPR around what we have. */
2873 if (TREE_CODE_CLASS (code) == '<')
2875 tree op_type = TREE_TYPE (TREE_OPERAND (arg, 0));
2876 if (FLOAT_TYPE_P (op_type)
2877 && flag_trapping_math
2878 && code != ORDERED_EXPR && code != UNORDERED_EXPR
2879 && code != NE_EXPR && code != EQ_EXPR)
2880 return build1 (TRUTH_NOT_EXPR, type, arg);
2883 code = invert_tree_comparison (code,
2884 HONOR_NANS (TYPE_MODE (op_type)));
2885 if (code == ERROR_MARK)
2886 return build1 (TRUTH_NOT_EXPR, type, arg);
2888 return build2 (code, type,
2889 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
2896 return fold_convert (type,
2897 build_int_cst (NULL_TREE, integer_zerop (arg)));
2899 case TRUTH_AND_EXPR:
2900 return build2 (TRUTH_OR_EXPR, type,
2901 invert_truthvalue (TREE_OPERAND (arg, 0)),
2902 invert_truthvalue (TREE_OPERAND (arg, 1)));
2905 return build2 (TRUTH_AND_EXPR, type,
2906 invert_truthvalue (TREE_OPERAND (arg, 0)),
2907 invert_truthvalue (TREE_OPERAND (arg, 1)));
2909 case TRUTH_XOR_EXPR:
2910 /* Here we can invert either operand. We invert the first operand
2911 unless the second operand is a TRUTH_NOT_EXPR in which case our
2912 result is the XOR of the first operand with the inside of the
2913 negation of the second operand. */
2915 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
2916 return build2 (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
2917 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
2919 return build2 (TRUTH_XOR_EXPR, type,
2920 invert_truthvalue (TREE_OPERAND (arg, 0)),
2921 TREE_OPERAND (arg, 1));
2923 case TRUTH_ANDIF_EXPR:
2924 return build2 (TRUTH_ORIF_EXPR, type,
2925 invert_truthvalue (TREE_OPERAND (arg, 0)),
2926 invert_truthvalue (TREE_OPERAND (arg, 1)));
2928 case TRUTH_ORIF_EXPR:
2929 return build2 (TRUTH_ANDIF_EXPR, type,
2930 invert_truthvalue (TREE_OPERAND (arg, 0)),
2931 invert_truthvalue (TREE_OPERAND (arg, 1)));
2933 case TRUTH_NOT_EXPR:
2934 return TREE_OPERAND (arg, 0);
2937 return build3 (COND_EXPR, type, TREE_OPERAND (arg, 0),
2938 invert_truthvalue (TREE_OPERAND (arg, 1)),
2939 invert_truthvalue (TREE_OPERAND (arg, 2)));
2942 return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
2943 invert_truthvalue (TREE_OPERAND (arg, 1)));
2945 case NON_LVALUE_EXPR:
2946 return invert_truthvalue (TREE_OPERAND (arg, 0));
2949 if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
2954 return build1 (TREE_CODE (arg), type,
2955 invert_truthvalue (TREE_OPERAND (arg, 0)));
2958 if (!integer_onep (TREE_OPERAND (arg, 1)))
2960 return build2 (EQ_EXPR, type, arg,
2961 fold_convert (type, integer_zero_node));
2964 return build1 (TRUTH_NOT_EXPR, type, arg);
2966 case CLEANUP_POINT_EXPR:
2967 return build1 (CLEANUP_POINT_EXPR, type,
2968 invert_truthvalue (TREE_OPERAND (arg, 0)));
2973 gcc_assert (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE);
2974 return build1 (TRUTH_NOT_EXPR, type, arg);
2977 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
2978 operands are another bit-wise operation with a common input. If so,
2979 distribute the bit operations to save an operation and possibly two if
2980 constants are involved. For example, convert
2981 (A | B) & (A | C) into A | (B & C)
2982 Further simplification will occur if B and C are constants.
2984 If this optimization cannot be done, 0 will be returned. */
2987 distribute_bit_expr (enum tree_code code, tree type, tree arg0, tree arg1)
2992 if (TREE_CODE (arg0) != TREE_CODE (arg1)
2993 || TREE_CODE (arg0) == code
2994 || (TREE_CODE (arg0) != BIT_AND_EXPR
2995 && TREE_CODE (arg0) != BIT_IOR_EXPR))
2998 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
3000 common = TREE_OPERAND (arg0, 0);
3001 left = TREE_OPERAND (arg0, 1);
3002 right = TREE_OPERAND (arg1, 1);
3004 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
3006 common = TREE_OPERAND (arg0, 0);
3007 left = TREE_OPERAND (arg0, 1);
3008 right = TREE_OPERAND (arg1, 0);
3010 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
3012 common = TREE_OPERAND (arg0, 1);
3013 left = TREE_OPERAND (arg0, 0);
3014 right = TREE_OPERAND (arg1, 1);
3016 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
3018 common = TREE_OPERAND (arg0, 1);
3019 left = TREE_OPERAND (arg0, 0);
3020 right = TREE_OPERAND (arg1, 0);
3025 return fold (build2 (TREE_CODE (arg0), type, common,
3026 fold (build2 (code, type, left, right))));
3029 /* Return a BIT_FIELD_REF of type TYPE to refer to BITSIZE bits of INNER
3030 starting at BITPOS. The field is unsigned if UNSIGNEDP is nonzero. */
3033 make_bit_field_ref (tree inner, tree type, int bitsize, int bitpos,
3036 tree result = build3 (BIT_FIELD_REF, type, inner,
3037 size_int (bitsize), bitsize_int (bitpos));
3039 BIT_FIELD_REF_UNSIGNED (result) = unsignedp;
3044 /* Optimize a bit-field compare.
3046 There are two cases: First is a compare against a constant and the
3047 second is a comparison of two items where the fields are at the same
3048 bit position relative to the start of a chunk (byte, halfword, word)
3049 large enough to contain it. In these cases we can avoid the shift
3050 implicit in bitfield extractions.
3052 For constants, we emit a compare of the shifted constant with the
3053 BIT_AND_EXPR of a mask and a byte, halfword, or word of the operand being
3054 compared. For two fields at the same position, we do the ANDs with the
3055 similar mask and compare the result of the ANDs.
3057 CODE is the comparison code, known to be either NE_EXPR or EQ_EXPR.
3058 COMPARE_TYPE is the type of the comparison, and LHS and RHS
3059 are the left and right operands of the comparison, respectively.
3061 If the optimization described above can be done, we return the resulting
3062 tree. Otherwise we return zero. */
3065 optimize_bit_field_compare (enum tree_code code, tree compare_type,
3068 HOST_WIDE_INT lbitpos, lbitsize, rbitpos, rbitsize, nbitpos, nbitsize;
3069 tree type = TREE_TYPE (lhs);
3070 tree signed_type, unsigned_type;
3071 int const_p = TREE_CODE (rhs) == INTEGER_CST;
3072 enum machine_mode lmode, rmode, nmode;
3073 int lunsignedp, runsignedp;
3074 int lvolatilep = 0, rvolatilep = 0;
3075 tree linner, rinner = NULL_TREE;
3079 /* Get all the information about the extractions being done. If the bit size
3080 if the same as the size of the underlying object, we aren't doing an
3081 extraction at all and so can do nothing. We also don't want to
3082 do anything if the inner expression is a PLACEHOLDER_EXPR since we
3083 then will no longer be able to replace it. */
3084 linner = get_inner_reference (lhs, &lbitsize, &lbitpos, &offset, &lmode,
3085 &lunsignedp, &lvolatilep);
3086 if (linner == lhs || lbitsize == GET_MODE_BITSIZE (lmode) || lbitsize < 0
3087 || offset != 0 || TREE_CODE (linner) == PLACEHOLDER_EXPR)
3092 /* If this is not a constant, we can only do something if bit positions,
3093 sizes, and signedness are the same. */
3094 rinner = get_inner_reference (rhs, &rbitsize, &rbitpos, &offset, &rmode,
3095 &runsignedp, &rvolatilep);
3097 if (rinner == rhs || lbitpos != rbitpos || lbitsize != rbitsize
3098 || lunsignedp != runsignedp || offset != 0
3099 || TREE_CODE (rinner) == PLACEHOLDER_EXPR)
3103 /* See if we can find a mode to refer to this field. We should be able to,
3104 but fail if we can't. */
3105 nmode = get_best_mode (lbitsize, lbitpos,
3106 const_p ? TYPE_ALIGN (TREE_TYPE (linner))
3107 : MIN (TYPE_ALIGN (TREE_TYPE (linner)),
3108 TYPE_ALIGN (TREE_TYPE (rinner))),
3109 word_mode, lvolatilep || rvolatilep);
3110 if (nmode == VOIDmode)
3113 /* Set signed and unsigned types of the precision of this mode for the
3115 signed_type = lang_hooks.types.type_for_mode (nmode, 0);
3116 unsigned_type = lang_hooks.types.type_for_mode (nmode, 1);
3118 /* Compute the bit position and size for the new reference and our offset
3119 within it. If the new reference is the same size as the original, we
3120 won't optimize anything, so return zero. */
3121 nbitsize = GET_MODE_BITSIZE (nmode);
3122 nbitpos = lbitpos & ~ (nbitsize - 1);
3124 if (nbitsize == lbitsize)
3127 if (BYTES_BIG_ENDIAN)
3128 lbitpos = nbitsize - lbitsize - lbitpos;
3130 /* Make the mask to be used against the extracted field. */
3131 mask = build_int_cst (unsigned_type, -1);
3132 mask = force_fit_type (mask, 0, false, false);
3133 mask = fold_convert (unsigned_type, mask);
3134 mask = const_binop (LSHIFT_EXPR, mask, size_int (nbitsize - lbitsize), 0);
3135 mask = const_binop (RSHIFT_EXPR, mask,
3136 size_int (nbitsize - lbitsize - lbitpos), 0);
3139 /* If not comparing with constant, just rework the comparison
3141 return build2 (code, compare_type,
3142 build2 (BIT_AND_EXPR, unsigned_type,
3143 make_bit_field_ref (linner, unsigned_type,
3144 nbitsize, nbitpos, 1),
3146 build2 (BIT_AND_EXPR, unsigned_type,
3147 make_bit_field_ref (rinner, unsigned_type,
3148 nbitsize, nbitpos, 1),
3151 /* Otherwise, we are handling the constant case. See if the constant is too
3152 big for the field. Warn and return a tree of for 0 (false) if so. We do
3153 this not only for its own sake, but to avoid having to test for this
3154 error case below. If we didn't, we might generate wrong code.
3156 For unsigned fields, the constant shifted right by the field length should
3157 be all zero. For signed fields, the high-order bits should agree with
3162 if (! integer_zerop (const_binop (RSHIFT_EXPR,
3163 fold_convert (unsigned_type, rhs),
3164 size_int (lbitsize), 0)))
3166 warning ("comparison is always %d due to width of bit-field",
3168 return constant_boolean_node (code == NE_EXPR, compare_type);
3173 tree tem = const_binop (RSHIFT_EXPR, fold_convert (signed_type, rhs),
3174 size_int (lbitsize - 1), 0);
3175 if (! integer_zerop (tem) && ! integer_all_onesp (tem))
3177 warning ("comparison is always %d due to width of bit-field",
3179 return constant_boolean_node (code == NE_EXPR, compare_type);
3183 /* Single-bit compares should always be against zero. */
3184 if (lbitsize == 1 && ! integer_zerop (rhs))
3186 code = code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
3187 rhs = fold_convert (type, integer_zero_node);
3190 /* Make a new bitfield reference, shift the constant over the
3191 appropriate number of bits and mask it with the computed mask
3192 (in case this was a signed field). If we changed it, make a new one. */
3193 lhs = make_bit_field_ref (linner, unsigned_type, nbitsize, nbitpos, 1);
3196 TREE_SIDE_EFFECTS (lhs) = 1;
3197 TREE_THIS_VOLATILE (lhs) = 1;
3200 rhs = fold (const_binop (BIT_AND_EXPR,
3201 const_binop (LSHIFT_EXPR,
3202 fold_convert (unsigned_type, rhs),
3203 size_int (lbitpos), 0),
3206 return build2 (code, compare_type,
3207 build2 (BIT_AND_EXPR, unsigned_type, lhs, mask),
3211 /* Subroutine for fold_truthop: decode a field reference.
3213 If EXP is a comparison reference, we return the innermost reference.
3215 *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
3216 set to the starting bit number.
3218 If the innermost field can be completely contained in a mode-sized
3219 unit, *PMODE is set to that mode. Otherwise, it is set to VOIDmode.
3221 *PVOLATILEP is set to 1 if the any expression encountered is volatile;
3222 otherwise it is not changed.
3224 *PUNSIGNEDP is set to the signedness of the field.
3226 *PMASK is set to the mask used. This is either contained in a
3227 BIT_AND_EXPR or derived from the width of the field.
3229 *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.
3231 Return 0 if this is not a component reference or is one that we can't
3232 do anything with. */
3235 decode_field_reference (tree exp, HOST_WIDE_INT *pbitsize,
3236 HOST_WIDE_INT *pbitpos, enum machine_mode *pmode,
3237 int *punsignedp, int *pvolatilep,
3238 tree *pmask, tree *pand_mask)
3240 tree outer_type = 0;
3242 tree mask, inner, offset;
3244 unsigned int precision;
3246 /* All the optimizations using this function assume integer fields.
3247 There are problems with FP fields since the type_for_size call
3248 below can fail for, e.g., XFmode. */
3249 if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
3252 /* We are interested in the bare arrangement of bits, so strip everything
3253 that doesn't affect the machine mode. However, record the type of the
3254 outermost expression if it may matter below. */
3255 if (TREE_CODE (exp) == NOP_EXPR
3256 || TREE_CODE (exp) == CONVERT_EXPR
3257 || TREE_CODE (exp) == NON_LVALUE_EXPR)
3258 outer_type = TREE_TYPE (exp);
3261 if (TREE_CODE (exp) == BIT_AND_EXPR)
3263 and_mask = TREE_OPERAND (exp, 1);
3264 exp = TREE_OPERAND (exp, 0);
3265 STRIP_NOPS (exp); STRIP_NOPS (and_mask);
3266 if (TREE_CODE (and_mask) != INTEGER_CST)
3270 inner = get_inner_reference (exp, pbitsize, pbitpos, &offset, pmode,
3271 punsignedp, pvolatilep);
3272 if ((inner == exp && and_mask == 0)
3273 || *pbitsize < 0 || offset != 0
3274 || TREE_CODE (inner) == PLACEHOLDER_EXPR)
3277 /* If the number of bits in the reference is the same as the bitsize of
3278 the outer type, then the outer type gives the signedness. Otherwise
3279 (in case of a small bitfield) the signedness is unchanged. */
3280 if (outer_type && *pbitsize == TYPE_PRECISION (outer_type))
3281 *punsignedp = TYPE_UNSIGNED (outer_type);
3283 /* Compute the mask to access the bitfield. */
3284 unsigned_type = lang_hooks.types.type_for_size (*pbitsize, 1);
3285 precision = TYPE_PRECISION (unsigned_type);
3287 mask = build_int_cst (unsigned_type, -1);
3288 mask = force_fit_type (mask, 0, false, false);
3290 mask = const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
3291 mask = const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize), 0);
3293 /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */
3295 mask = fold (build2 (BIT_AND_EXPR, unsigned_type,
3296 fold_convert (unsigned_type, and_mask), mask));
3299 *pand_mask = and_mask;
3303 /* Return nonzero if MASK represents a mask of SIZE ones in the low-order
3307 all_ones_mask_p (tree mask, int size)
3309 tree type = TREE_TYPE (mask);
3310 unsigned int precision = TYPE_PRECISION (type);
3313 tmask = build_int_cst (lang_hooks.types.signed_type (type), -1);
3314 tmask = force_fit_type (tmask, 0, false, false);
3317 tree_int_cst_equal (mask,
3318 const_binop (RSHIFT_EXPR,
3319 const_binop (LSHIFT_EXPR, tmask,
3320 size_int (precision - size),
3322 size_int (precision - size), 0));
3325 /* Subroutine for fold: determine if VAL is the INTEGER_CONST that
3326 represents the sign bit of EXP's type. If EXP represents a sign
3327 or zero extension, also test VAL against the unextended type.
3328 The return value is the (sub)expression whose sign bit is VAL,
3329 or NULL_TREE otherwise. */
3332 sign_bit_p (tree exp, tree val)
3334 unsigned HOST_WIDE_INT mask_lo, lo;
3335 HOST_WIDE_INT mask_hi, hi;
3339 /* Tree EXP must have an integral type. */
3340 t = TREE_TYPE (exp);
3341 if (! INTEGRAL_TYPE_P (t))
3344 /* Tree VAL must be an integer constant. */
3345 if (TREE_CODE (val) != INTEGER_CST
3346 || TREE_CONSTANT_OVERFLOW (val))
3349 width = TYPE_PRECISION (t);
3350 if (width > HOST_BITS_PER_WIDE_INT)
3352 hi = (unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT - 1);
3355 mask_hi = ((unsigned HOST_WIDE_INT) -1
3356 >> (2 * HOST_BITS_PER_WIDE_INT - width));
3362 lo = (unsigned HOST_WIDE_INT) 1 << (width - 1);
3365 mask_lo = ((unsigned HOST_WIDE_INT) -1
3366 >> (HOST_BITS_PER_WIDE_INT - width));
3369 /* We mask off those bits beyond TREE_TYPE (exp) so that we can
3370 treat VAL as if it were unsigned. */
3371 if ((TREE_INT_CST_HIGH (val) & mask_hi) == hi
3372 && (TREE_INT_CST_LOW (val) & mask_lo) == lo)
3375 /* Handle extension from a narrower type. */
3376 if (TREE_CODE (exp) == NOP_EXPR
3377 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0))) < width)
3378 return sign_bit_p (TREE_OPERAND (exp, 0), val);
3383 /* Subroutine for fold_truthop: determine if an operand is simple enough
3384 to be evaluated unconditionally. */
3387 simple_operand_p (tree exp)
3389 /* Strip any conversions that don't change the machine mode. */
3390 while ((TREE_CODE (exp) == NOP_EXPR
3391 || TREE_CODE (exp) == CONVERT_EXPR)
3392 && (TYPE_MODE (TREE_TYPE (exp))
3393 == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
3394 exp = TREE_OPERAND (exp, 0);
3396 return (TREE_CODE_CLASS (TREE_CODE (exp)) == 'c'
3398 && ! TREE_ADDRESSABLE (exp)
3399 && ! TREE_THIS_VOLATILE (exp)
3400 && ! DECL_NONLOCAL (exp)
3401 /* Don't regard global variables as simple. They may be
3402 allocated in ways unknown to the compiler (shared memory,
3403 #pragma weak, etc). */
3404 && ! TREE_PUBLIC (exp)
3405 && ! DECL_EXTERNAL (exp)
3406 /* Loading a static variable is unduly expensive, but global
3407 registers aren't expensive. */
3408 && (! TREE_STATIC (exp) || DECL_REGISTER (exp))));
3411 /* The following functions are subroutines to fold_range_test and allow it to
3412 try to change a logical combination of comparisons into a range test.
3415 X == 2 || X == 3 || X == 4 || X == 5
3419 (unsigned) (X - 2) <= 3
3421 We describe each set of comparisons as being either inside or outside
3422 a range, using a variable named like IN_P, and then describe the
3423 range with a lower and upper bound. If one of the bounds is omitted,
3424 it represents either the highest or lowest value of the type.
3426 In the comments below, we represent a range by two numbers in brackets
3427 preceded by a "+" to designate being inside that range, or a "-" to
3428 designate being outside that range, so the condition can be inverted by
3429 flipping the prefix. An omitted bound is represented by a "-". For
3430 example, "- [-, 10]" means being outside the range starting at the lowest
3431 possible value and ending at 10, in other words, being greater than 10.
3432 The range "+ [-, -]" is always true and hence the range "- [-, -]" is
3435 We set up things so that the missing bounds are handled in a consistent
3436 manner so neither a missing bound nor "true" and "false" need to be
3437 handled using a special case. */
3439 /* Return the result of applying CODE to ARG0 and ARG1, but handle the case