1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool mode_signbit_p (enum machine_mode, rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
84 width = GET_MODE_BITSIZE (mode);
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
125 /* Handle addition and subtraction specially. Otherwise, just form
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
152 /* Handle float extensions of constant pool references. */
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD in X with NEW and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old, rtx new)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
254 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
261 switch (GET_RTX_CLASS (code))
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old, new);
267 if (op0 == XEXP (x, 0))
269 return simplify_gen_unary (code, mode, op0, op_mode);
273 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
277 return simplify_gen_binary (code, mode, op0, op1);
280 case RTX_COMM_COMPARE:
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old, new);
285 op1 = simplify_replace_rtx (op1, old, new);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
291 case RTX_BITFIELD_OPS:
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old, new);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
304 /* The only case we try to handle is a SUBREG. */
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
308 if (op0 == SUBREG_REG (x))
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
313 return op0 ? op0 : x;
320 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
321 if (op0 == XEXP (x, 0))
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (REG_P (old) && REGNO (x) == REGNO (old))
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 if (!VECTOR_MODE_P (mode))
365 if (GET_MODE (trueop) != VOIDmode
366 && !VECTOR_MODE_P (GET_MODE (trueop))
367 && GET_MODE_INNER (mode) != GET_MODE (trueop))
369 if (GET_MODE (trueop) != VOIDmode
370 && VECTOR_MODE_P (GET_MODE (trueop))
371 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
373 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
374 || GET_CODE (trueop) == CONST_VECTOR)
376 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
377 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
378 rtvec v = rtvec_alloc (n_elts);
381 if (GET_CODE (trueop) != CONST_VECTOR)
382 for (i = 0; i < n_elts; i++)
383 RTVEC_ELT (v, i) = trueop;
386 enum machine_mode inmode = GET_MODE (trueop);
387 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
388 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
390 if (in_n_elts >= n_elts || n_elts % in_n_elts)
392 for (i = 0; i < n_elts; i++)
393 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
395 return gen_rtx_CONST_VECTOR (mode, v);
398 else if (GET_CODE (op) == CONST)
399 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
401 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
403 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
404 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
405 enum machine_mode opmode = GET_MODE (trueop);
406 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
407 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
408 rtvec v = rtvec_alloc (n_elts);
411 if (op_n_elts != n_elts)
414 for (i = 0; i < n_elts; i++)
416 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
417 CONST_VECTOR_ELT (trueop, i),
418 GET_MODE_INNER (opmode));
421 RTVEC_ELT (v, i) = x;
423 return gen_rtx_CONST_VECTOR (mode, v);
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
430 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
431 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
433 HOST_WIDE_INT hv, lv;
436 if (GET_CODE (trueop) == CONST_INT)
437 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
439 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
441 REAL_VALUE_FROM_INT (d, lv, hv, mode);
442 d = real_value_truncate (mode, d);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
445 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
446 && (GET_CODE (trueop) == CONST_DOUBLE
447 || GET_CODE (trueop) == CONST_INT))
449 HOST_WIDE_INT hv, lv;
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
457 if (op_mode == VOIDmode)
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
464 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
467 hv = 0, lv &= GET_MODE_MASK (op_mode);
469 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
470 d = real_value_truncate (mode, d);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
474 if (GET_CODE (trueop) == CONST_INT
475 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
477 HOST_WIDE_INT arg0 = INTVAL (trueop);
491 val = (arg0 >= 0 ? arg0 : - arg0);
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0 &= GET_MODE_MASK (mode);
498 val = exact_log2 (arg0 & (- arg0)) + 1;
502 arg0 &= GET_MODE_MASK (mode);
503 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
506 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
510 arg0 &= GET_MODE_MASK (mode);
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 val = GET_MODE_BITSIZE (mode);
519 val = exact_log2 (arg0 & -arg0);
523 arg0 &= GET_MODE_MASK (mode);
526 val++, arg0 &= arg0 - 1;
530 arg0 &= GET_MODE_MASK (mode);
533 val++, arg0 &= arg0 - 1;
542 /* When zero-extending a CONST_INT, we need to know its
544 if (op_mode == VOIDmode)
546 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width != GET_MODE_BITSIZE (op_mode))
555 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
556 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
562 if (op_mode == VOIDmode)
564 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width != GET_MODE_BITSIZE (op_mode))
573 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
576 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
578 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
579 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
596 val = trunc_int_for_mode (val, mode);
598 return GEN_INT (val);
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop) == VOIDmode
604 && width <= HOST_BITS_PER_WIDE_INT * 2
605 && (GET_CODE (trueop) == CONST_DOUBLE
606 || GET_CODE (trueop) == CONST_INT))
608 unsigned HOST_WIDE_INT l1, lv;
609 HOST_WIDE_INT h1, hv;
611 if (GET_CODE (trueop) == CONST_DOUBLE)
612 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
614 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
624 neg_double (l1, h1, &lv, &hv);
629 neg_double (l1, h1, &lv, &hv);
641 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
644 lv = exact_log2 (l1 & -l1) + 1;
650 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
651 - HOST_BITS_PER_WIDE_INT;
653 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
654 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
655 lv = GET_MODE_BITSIZE (mode);
661 lv = exact_log2 (l1 & -l1);
663 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
664 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
665 lv = GET_MODE_BITSIZE (mode);
688 /* This is just a change-of-mode, so do nothing. */
693 if (op_mode == VOIDmode)
696 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
700 lv = l1 & GET_MODE_MASK (op_mode);
704 if (op_mode == VOIDmode
705 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
709 lv = l1 & GET_MODE_MASK (op_mode);
710 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
711 && (lv & ((HOST_WIDE_INT) 1
712 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
713 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
715 hv = HWI_SIGN_EXTEND (lv);
726 return immed_double_const (lv, hv, mode);
729 else if (GET_CODE (trueop) == CONST_DOUBLE
730 && GET_MODE_CLASS (mode) == MODE_FLOAT)
732 REAL_VALUE_TYPE d, t;
733 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
738 if (HONOR_SNANS (mode) && real_isnan (&d))
740 real_sqrt (&t, mode, &d);
744 d = REAL_VALUE_ABS (d);
747 d = REAL_VALUE_NEGATE (d);
750 d = real_value_truncate (mode, d);
753 /* All this does is change the mode. */
756 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
763 real_to_target (tmp, &d, GET_MODE (trueop));
764 for (i = 0; i < 4; i++)
766 real_from_target (&d, tmp, mode);
771 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
774 else if (GET_CODE (trueop) == CONST_DOUBLE
775 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
776 && GET_MODE_CLASS (mode) == MODE_INT
777 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
779 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
780 operators are intentionally left unspecified (to ease implementation
781 by target backends), for consistency, this routine implements the
782 same semantics for constant folding as used by the middle-end. */
784 HOST_WIDE_INT xh, xl, th, tl;
785 REAL_VALUE_TYPE x, t;
786 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
790 if (REAL_VALUE_ISNAN (x))
793 /* Test against the signed upper bound. */
794 if (width > HOST_BITS_PER_WIDE_INT)
796 th = ((unsigned HOST_WIDE_INT) 1
797 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
803 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
805 real_from_integer (&t, VOIDmode, tl, th, 0);
806 if (REAL_VALUES_LESS (t, x))
813 /* Test against the signed lower bound. */
814 if (width > HOST_BITS_PER_WIDE_INT)
816 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
822 tl = (HOST_WIDE_INT) -1 << (width - 1);
824 real_from_integer (&t, VOIDmode, tl, th, 0);
825 if (REAL_VALUES_LESS (x, t))
831 REAL_VALUE_TO_INT (&xl, &xh, x);
835 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
838 /* Test against the unsigned upper bound. */
839 if (width == 2*HOST_BITS_PER_WIDE_INT)
844 else if (width >= HOST_BITS_PER_WIDE_INT)
846 th = ((unsigned HOST_WIDE_INT) 1
847 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
853 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
855 real_from_integer (&t, VOIDmode, tl, th, 1);
856 if (REAL_VALUES_LESS (t, x))
863 REAL_VALUE_TO_INT (&xl, &xh, x);
869 return immed_double_const (xl, xh, mode);
872 /* This was formerly used only for non-IEEE float.
873 eggert@twinsun.com says it is safe for IEEE also. */
876 enum rtx_code reversed;
879 /* There are some simplifications we can do even if the operands
884 /* (not (not X)) == X. */
885 if (GET_CODE (op) == NOT)
888 /* (not (eq X Y)) == (ne X Y), etc. */
889 if (COMPARISON_P (op)
890 && (mode == BImode || STORE_FLAG_VALUE == -1)
891 && ((reversed = reversed_comparison_code (op, NULL_RTX))
893 return simplify_gen_relational (reversed, mode, VOIDmode,
894 XEXP (op, 0), XEXP (op, 1));
896 /* (not (plus X -1)) can become (neg X). */
897 if (GET_CODE (op) == PLUS
898 && XEXP (op, 1) == constm1_rtx)
899 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
901 /* Similarly, (not (neg X)) is (plus X -1). */
902 if (GET_CODE (op) == NEG)
903 return plus_constant (XEXP (op, 0), -1);
905 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
906 if (GET_CODE (op) == XOR
907 && GET_CODE (XEXP (op, 1)) == CONST_INT
908 && (temp = simplify_unary_operation (NOT, mode,
911 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
913 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
914 if (GET_CODE (op) == PLUS
915 && GET_CODE (XEXP (op, 1)) == CONST_INT
916 && mode_signbit_p (mode, XEXP (op, 1))
917 && (temp = simplify_unary_operation (NOT, mode,
920 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
924 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
925 operands other than 1, but that is not valid. We could do a
926 similar simplification for (not (lshiftrt C X)) where C is
927 just the sign bit, but this doesn't seem common enough to
929 if (GET_CODE (op) == ASHIFT
930 && XEXP (op, 0) == const1_rtx)
932 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
933 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
936 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
937 by reversing the comparison code if valid. */
938 if (STORE_FLAG_VALUE == -1
940 && (reversed = reversed_comparison_code (op, NULL_RTX))
942 return simplify_gen_relational (reversed, mode, VOIDmode,
943 XEXP (op, 0), XEXP (op, 1));
945 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
946 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
947 so we can perform the above simplification. */
949 if (STORE_FLAG_VALUE == -1
950 && GET_CODE (op) == ASHIFTRT
951 && GET_CODE (XEXP (op, 1)) == CONST_INT
952 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
953 return simplify_gen_relational (GE, mode, VOIDmode,
954 XEXP (op, 0), const0_rtx);
959 /* (neg (neg X)) == X. */
960 if (GET_CODE (op) == NEG)
963 /* (neg (plus X 1)) can become (not X). */
964 if (GET_CODE (op) == PLUS
965 && XEXP (op, 1) == const1_rtx)
966 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
968 /* Similarly, (neg (not X)) is (plus X 1). */
969 if (GET_CODE (op) == NOT)
970 return plus_constant (XEXP (op, 0), 1);
972 /* (neg (minus X Y)) can become (minus Y X). This transformation
973 isn't safe for modes with signed zeros, since if X and Y are
974 both +0, (minus Y X) is the same as (minus X Y). If the
975 rounding mode is towards +infinity (or -infinity) then the two
976 expressions will be rounded differently. */
977 if (GET_CODE (op) == MINUS
978 && !HONOR_SIGNED_ZEROS (mode)
979 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
980 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
983 if (GET_CODE (op) == PLUS
984 && !HONOR_SIGNED_ZEROS (mode)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
987 /* (neg (plus A C)) is simplified to (minus -C A). */
988 if (GET_CODE (XEXP (op, 1)) == CONST_INT
989 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
991 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
994 return simplify_gen_binary (MINUS, mode, temp,
998 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
999 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1000 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1003 /* (neg (mult A B)) becomes (mult (neg A) B).
1004 This works even for floating-point values. */
1005 if (GET_CODE (op) == MULT
1006 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1008 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1009 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1012 /* NEG commutes with ASHIFT since it is multiplication. Only do
1013 this if we can then eliminate the NEG (e.g., if the operand
1015 if (GET_CODE (op) == ASHIFT)
1017 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1020 return simplify_gen_binary (ASHIFT, mode, temp,
1024 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op) == ASHIFTRT
1027 && GET_CODE (XEXP (op, 1)) == CONST_INT
1028 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1029 return simplify_gen_binary (LSHIFTRT, mode,
1030 XEXP (op, 0), XEXP (op, 1));
1032 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1033 C is equal to the width of MODE minus 1. */
1034 if (GET_CODE (op) == LSHIFTRT
1035 && GET_CODE (XEXP (op, 1)) == CONST_INT
1036 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1037 return simplify_gen_binary (ASHIFTRT, mode,
1038 XEXP (op, 0), XEXP (op, 1));
1043 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1044 becomes just the MINUS if its mode is MODE. This allows
1045 folding switch statements on machines using casesi (such as
1047 if (GET_CODE (op) == TRUNCATE
1048 && GET_MODE (XEXP (op, 0)) == mode
1049 && GET_CODE (XEXP (op, 0)) == MINUS
1050 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1051 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1052 return XEXP (op, 0);
1054 /* Check for a sign extension of a subreg of a promoted
1055 variable, where the promotion is sign-extended, and the
1056 target mode is the same as the variable's promotion. */
1057 if (GET_CODE (op) == SUBREG
1058 && SUBREG_PROMOTED_VAR_P (op)
1059 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1060 && GET_MODE (XEXP (op, 0)) == mode)
1061 return XEXP (op, 0);
1063 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1064 if (! POINTERS_EXTEND_UNSIGNED
1065 && mode == Pmode && GET_MODE (op) == ptr_mode
1067 || (GET_CODE (op) == SUBREG
1068 && GET_CODE (SUBREG_REG (op)) == REG
1069 && REG_POINTER (SUBREG_REG (op))
1070 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1071 return convert_memory_address (Pmode, op);
1076 /* Check for a zero extension of a subreg of a promoted
1077 variable, where the promotion is zero-extended, and the
1078 target mode is the same as the variable's promotion. */
1079 if (GET_CODE (op) == SUBREG
1080 && SUBREG_PROMOTED_VAR_P (op)
1081 && SUBREG_PROMOTED_UNSIGNED_P (op)
1082 && GET_MODE (XEXP (op, 0)) == mode)
1083 return XEXP (op, 0);
1085 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1086 if (POINTERS_EXTEND_UNSIGNED > 0
1087 && mode == Pmode && GET_MODE (op) == ptr_mode
1089 || (GET_CODE (op) == SUBREG
1090 && GET_CODE (SUBREG_REG (op)) == REG
1091 && REG_POINTER (SUBREG_REG (op))
1092 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1093 return convert_memory_address (Pmode, op);
1105 /* Subroutine of simplify_binary_operation to simplify a commutative,
1106 associative binary operation CODE with result mode MODE, operating
1107 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1108 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1109 canonicalization is possible. */
1112 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1117 /* Linearize the operator to the left. */
1118 if (GET_CODE (op1) == code)
1120 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1121 if (GET_CODE (op0) == code)
1123 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1124 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1127 /* "a op (b op c)" becomes "(b op c) op a". */
1128 if (! swap_commutative_operands_p (op1, op0))
1129 return simplify_gen_binary (code, mode, op1, op0);
1136 if (GET_CODE (op0) == code)
1138 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1139 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1141 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1142 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1145 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1150 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1152 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1153 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1154 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1155 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1157 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1163 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1164 and OP1. Return 0 if no simplification is possible.
1166 Don't use this for relational operations such as EQ or LT.
1167 Use simplify_relational_operation instead. */
1169 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1172 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1174 unsigned int width = GET_MODE_BITSIZE (mode);
1175 rtx trueop0, trueop1;
1178 #ifdef ENABLE_CHECKING
1179 /* Relational operations don't work here. We must know the mode
1180 of the operands in order to do the comparison correctly.
1181 Assuming a full word can give incorrect results.
1182 Consider comparing 128 with -128 in QImode. */
1184 if (GET_RTX_CLASS (code) == RTX_COMPARE
1185 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
1189 /* Make sure the constant is second. */
1190 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1191 && swap_commutative_operands_p (op0, op1))
1193 tem = op0, op0 = op1, op1 = tem;
1196 trueop0 = avoid_constant_pool_reference (op0);
1197 trueop1 = avoid_constant_pool_reference (op1);
1199 if (VECTOR_MODE_P (mode)
1200 && GET_CODE (trueop0) == CONST_VECTOR
1201 && GET_CODE (trueop1) == CONST_VECTOR)
1203 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1204 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1205 enum machine_mode op0mode = GET_MODE (trueop0);
1206 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1207 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1208 enum machine_mode op1mode = GET_MODE (trueop1);
1209 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1210 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1211 rtvec v = rtvec_alloc (n_elts);
1214 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1217 for (i = 0; i < n_elts; i++)
1219 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1220 CONST_VECTOR_ELT (trueop0, i),
1221 CONST_VECTOR_ELT (trueop1, i));
1224 RTVEC_ELT (v, i) = x;
1227 return gen_rtx_CONST_VECTOR (mode, v);
1230 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1231 && GET_CODE (trueop0) == CONST_DOUBLE
1232 && GET_CODE (trueop1) == CONST_DOUBLE
1233 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1244 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1246 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1248 for (i = 0; i < 4; i++)
1252 else if (code == IOR)
1254 else if (code == XOR)
1259 real_from_target (&r, tmp0, mode);
1260 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1264 REAL_VALUE_TYPE f0, f1, value;
1266 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1267 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1268 f0 = real_value_truncate (mode, f0);
1269 f1 = real_value_truncate (mode, f1);
1271 if (HONOR_SNANS (mode)
1272 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1276 && REAL_VALUES_EQUAL (f1, dconst0)
1277 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1280 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1282 value = real_value_truncate (mode, value);
1283 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1287 /* We can fold some multi-word operations. */
1288 if (GET_MODE_CLASS (mode) == MODE_INT
1289 && width == HOST_BITS_PER_WIDE_INT * 2
1290 && (GET_CODE (trueop0) == CONST_DOUBLE
1291 || GET_CODE (trueop0) == CONST_INT)
1292 && (GET_CODE (trueop1) == CONST_DOUBLE
1293 || GET_CODE (trueop1) == CONST_INT))
1295 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1296 HOST_WIDE_INT h1, h2, hv, ht;
1298 if (GET_CODE (trueop0) == CONST_DOUBLE)
1299 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1301 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1303 if (GET_CODE (trueop1) == CONST_DOUBLE)
1304 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1306 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1311 /* A - B == A + (-B). */
1312 neg_double (l2, h2, &lv, &hv);
1315 /* Fall through.... */
1318 add_double (l1, h1, l2, h2, &lv, &hv);
1322 mul_double (l1, h1, l2, h2, &lv, &hv);
1326 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1327 &lv, &hv, <, &ht))
1332 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1333 <, &ht, &lv, &hv))
1338 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1339 &lv, &hv, <, &ht))
1344 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1345 <, &ht, &lv, &hv))
1350 lv = l1 & l2, hv = h1 & h2;
1354 lv = l1 | l2, hv = h1 | h2;
1358 lv = l1 ^ l2, hv = h1 ^ h2;
1364 && ((unsigned HOST_WIDE_INT) l1
1365 < (unsigned HOST_WIDE_INT) l2)))
1374 && ((unsigned HOST_WIDE_INT) l1
1375 > (unsigned HOST_WIDE_INT) l2)))
1382 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1384 && ((unsigned HOST_WIDE_INT) l1
1385 < (unsigned HOST_WIDE_INT) l2)))
1392 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1394 && ((unsigned HOST_WIDE_INT) l1
1395 > (unsigned HOST_WIDE_INT) l2)))
1401 case LSHIFTRT: case ASHIFTRT:
1403 case ROTATE: case ROTATERT:
1404 if (SHIFT_COUNT_TRUNCATED)
1405 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1407 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1410 if (code == LSHIFTRT || code == ASHIFTRT)
1411 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1413 else if (code == ASHIFT)
1414 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1415 else if (code == ROTATE)
1416 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1417 else /* code == ROTATERT */
1418 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1425 return immed_double_const (lv, hv, mode);
1428 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1429 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1431 /* Even if we can't compute a constant result,
1432 there are some cases worth simplifying. */
1437 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1438 when x is NaN, infinite, or finite and nonzero. They aren't
1439 when x is -0 and the rounding mode is not towards -infinity,
1440 since (-0) + 0 is then 0. */
1441 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1444 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1445 transformations are safe even for IEEE. */
1446 if (GET_CODE (op0) == NEG)
1447 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1448 else if (GET_CODE (op1) == NEG)
1449 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1451 /* (~a) + 1 -> -a */
1452 if (INTEGRAL_MODE_P (mode)
1453 && GET_CODE (op0) == NOT
1454 && trueop1 == const1_rtx)
1455 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1457 /* Handle both-operands-constant cases. We can only add
1458 CONST_INTs to constants since the sum of relocatable symbols
1459 can't be handled by most assemblers. Don't add CONST_INT
1460 to CONST_INT since overflow won't be computed properly if wider
1461 than HOST_BITS_PER_WIDE_INT. */
1463 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1464 && GET_CODE (op1) == CONST_INT)
1465 return plus_constant (op0, INTVAL (op1));
1466 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1467 && GET_CODE (op0) == CONST_INT)
1468 return plus_constant (op1, INTVAL (op0));
1470 /* See if this is something like X * C - X or vice versa or
1471 if the multiplication is written as a shift. If so, we can
1472 distribute and make a new multiply, shift, or maybe just
1473 have X (if C is 2 in the example above). But don't make
1474 real multiply if we didn't have one before. */
1476 if (! FLOAT_MODE_P (mode))
1478 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1479 rtx lhs = op0, rhs = op1;
1482 if (GET_CODE (lhs) == NEG)
1483 coeff0 = -1, lhs = XEXP (lhs, 0);
1484 else if (GET_CODE (lhs) == MULT
1485 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1487 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1490 else if (GET_CODE (lhs) == ASHIFT
1491 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1492 && INTVAL (XEXP (lhs, 1)) >= 0
1493 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1495 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1496 lhs = XEXP (lhs, 0);
1499 if (GET_CODE (rhs) == NEG)
1500 coeff1 = -1, rhs = XEXP (rhs, 0);
1501 else if (GET_CODE (rhs) == MULT
1502 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1504 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1507 else if (GET_CODE (rhs) == ASHIFT
1508 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1509 && INTVAL (XEXP (rhs, 1)) >= 0
1510 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1512 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1513 rhs = XEXP (rhs, 0);
1516 if (rtx_equal_p (lhs, rhs))
1518 tem = simplify_gen_binary (MULT, mode, lhs,
1519 GEN_INT (coeff0 + coeff1));
1520 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1524 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1525 if ((GET_CODE (op1) == CONST_INT
1526 || GET_CODE (op1) == CONST_DOUBLE)
1527 && GET_CODE (op0) == XOR
1528 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1529 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1530 && mode_signbit_p (mode, op1))
1531 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1532 simplify_gen_binary (XOR, mode, op1,
1535 /* If one of the operands is a PLUS or a MINUS, see if we can
1536 simplify this by the associative law.
1537 Don't use the associative law for floating point.
1538 The inaccuracy makes it nonassociative,
1539 and subtle programs can break if operations are associated. */
1541 if (INTEGRAL_MODE_P (mode)
1542 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1543 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1544 || (GET_CODE (op0) == CONST
1545 && GET_CODE (XEXP (op0, 0)) == PLUS)
1546 || (GET_CODE (op1) == CONST
1547 && GET_CODE (XEXP (op1, 0)) == PLUS))
1548 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1551 /* Reassociate floating point addition only when the user
1552 specifies unsafe math optimizations. */
1553 if (FLOAT_MODE_P (mode)
1554 && flag_unsafe_math_optimizations)
1556 tem = simplify_associative_operation (code, mode, op0, op1);
1564 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1565 using cc0, in which case we want to leave it as a COMPARE
1566 so we can distinguish it from a register-register-copy.
1568 In IEEE floating point, x-0 is not the same as x. */
1570 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1571 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1572 && trueop1 == CONST0_RTX (mode))
1576 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1577 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1578 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1579 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1581 rtx xop00 = XEXP (op0, 0);
1582 rtx xop10 = XEXP (op1, 0);
1585 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1587 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1588 && GET_MODE (xop00) == GET_MODE (xop10)
1589 && REGNO (xop00) == REGNO (xop10)
1590 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1591 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1598 /* We can't assume x-x is 0 even with non-IEEE floating point,
1599 but since it is zero except in very strange circumstances, we
1600 will treat it as zero with -funsafe-math-optimizations. */
1601 if (rtx_equal_p (trueop0, trueop1)
1602 && ! side_effects_p (op0)
1603 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1604 return CONST0_RTX (mode);
1606 /* Change subtraction from zero into negation. (0 - x) is the
1607 same as -x when x is NaN, infinite, or finite and nonzero.
1608 But if the mode has signed zeros, and does not round towards
1609 -infinity, then 0 - 0 is 0, not -0. */
1610 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1611 return simplify_gen_unary (NEG, mode, op1, mode);
1613 /* (-1 - a) is ~a. */
1614 if (trueop0 == constm1_rtx)
1615 return simplify_gen_unary (NOT, mode, op1, mode);
1617 /* Subtracting 0 has no effect unless the mode has signed zeros
1618 and supports rounding towards -infinity. In such a case,
1620 if (!(HONOR_SIGNED_ZEROS (mode)
1621 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1622 && trueop1 == CONST0_RTX (mode))
1625 /* See if this is something like X * C - X or vice versa or
1626 if the multiplication is written as a shift. If so, we can
1627 distribute and make a new multiply, shift, or maybe just
1628 have X (if C is 2 in the example above). But don't make
1629 real multiply if we didn't have one before. */
1631 if (! FLOAT_MODE_P (mode))
1633 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1634 rtx lhs = op0, rhs = op1;
1637 if (GET_CODE (lhs) == NEG)
1638 coeff0 = -1, lhs = XEXP (lhs, 0);
1639 else if (GET_CODE (lhs) == MULT
1640 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1642 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1645 else if (GET_CODE (lhs) == ASHIFT
1646 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1647 && INTVAL (XEXP (lhs, 1)) >= 0
1648 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1650 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1651 lhs = XEXP (lhs, 0);
1654 if (GET_CODE (rhs) == NEG)
1655 coeff1 = - 1, rhs = XEXP (rhs, 0);
1656 else if (GET_CODE (rhs) == MULT
1657 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1659 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1662 else if (GET_CODE (rhs) == ASHIFT
1663 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1664 && INTVAL (XEXP (rhs, 1)) >= 0
1665 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1667 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1668 rhs = XEXP (rhs, 0);
1671 if (rtx_equal_p (lhs, rhs))
1673 tem = simplify_gen_binary (MULT, mode, lhs,
1674 GEN_INT (coeff0 - coeff1));
1675 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1679 /* (a - (-b)) -> (a + b). True even for IEEE. */
1680 if (GET_CODE (op1) == NEG)
1681 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1683 /* (-x - c) may be simplified as (-c - x). */
1684 if (GET_CODE (op0) == NEG
1685 && (GET_CODE (op1) == CONST_INT
1686 || GET_CODE (op1) == CONST_DOUBLE))
1688 tem = simplify_unary_operation (NEG, mode, op1, mode);
1690 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1693 /* If one of the operands is a PLUS or a MINUS, see if we can
1694 simplify this by the associative law.
1695 Don't use the associative law for floating point.
1696 The inaccuracy makes it nonassociative,
1697 and subtle programs can break if operations are associated. */
1699 if (INTEGRAL_MODE_P (mode)
1700 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1701 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1702 || (GET_CODE (op0) == CONST
1703 && GET_CODE (XEXP (op0, 0)) == PLUS)
1704 || (GET_CODE (op1) == CONST
1705 && GET_CODE (XEXP (op1, 0)) == PLUS))
1706 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1709 /* Don't let a relocatable value get a negative coeff. */
1710 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1711 return simplify_gen_binary (PLUS, mode,
1713 neg_const_int (mode, op1));
1715 /* (x - (x & y)) -> (x & ~y) */
1716 if (GET_CODE (op1) == AND)
1718 if (rtx_equal_p (op0, XEXP (op1, 0)))
1720 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1721 GET_MODE (XEXP (op1, 1)));
1722 return simplify_gen_binary (AND, mode, op0, tem);
1724 if (rtx_equal_p (op0, XEXP (op1, 1)))
1726 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1727 GET_MODE (XEXP (op1, 0)));
1728 return simplify_gen_binary (AND, mode, op0, tem);
1734 if (trueop1 == constm1_rtx)
1735 return simplify_gen_unary (NEG, mode, op0, mode);
1737 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1738 x is NaN, since x * 0 is then also NaN. Nor is it valid
1739 when the mode has signed zeros, since multiplying a negative
1740 number by 0 will give -0, not 0. */
1741 if (!HONOR_NANS (mode)
1742 && !HONOR_SIGNED_ZEROS (mode)
1743 && trueop1 == CONST0_RTX (mode)
1744 && ! side_effects_p (op0))
1747 /* In IEEE floating point, x*1 is not equivalent to x for
1749 if (!HONOR_SNANS (mode)
1750 && trueop1 == CONST1_RTX (mode))
1753 /* Convert multiply by constant power of two into shift unless
1754 we are still generating RTL. This test is a kludge. */
1755 if (GET_CODE (trueop1) == CONST_INT
1756 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1757 /* If the mode is larger than the host word size, and the
1758 uppermost bit is set, then this isn't a power of two due
1759 to implicit sign extension. */
1760 && (width <= HOST_BITS_PER_WIDE_INT
1761 || val != HOST_BITS_PER_WIDE_INT - 1)
1762 && ! rtx_equal_function_value_matters)
1763 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1765 /* x*2 is x+x and x*(-1) is -x */
1766 if (GET_CODE (trueop1) == CONST_DOUBLE
1767 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1768 && GET_MODE (op0) == mode)
1771 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1773 if (REAL_VALUES_EQUAL (d, dconst2))
1774 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1776 if (REAL_VALUES_EQUAL (d, dconstm1))
1777 return simplify_gen_unary (NEG, mode, op0, mode);
1780 /* Reassociate multiplication, but for floating point MULTs
1781 only when the user specifies unsafe math optimizations. */
1782 if (! FLOAT_MODE_P (mode)
1783 || flag_unsafe_math_optimizations)
1785 tem = simplify_associative_operation (code, mode, op0, op1);
1792 if (trueop1 == const0_rtx)
1794 if (GET_CODE (trueop1) == CONST_INT
1795 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1796 == GET_MODE_MASK (mode)))
1798 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1800 /* A | (~A) -> -1 */
1801 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1802 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1803 && ! side_effects_p (op0)
1804 && GET_MODE_CLASS (mode) != MODE_CC)
1806 tem = simplify_associative_operation (code, mode, op0, op1);
1812 if (trueop1 == const0_rtx)
1814 if (GET_CODE (trueop1) == CONST_INT
1815 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1816 == GET_MODE_MASK (mode)))
1817 return simplify_gen_unary (NOT, mode, op0, mode);
1818 if (trueop0 == trueop1
1819 && ! side_effects_p (op0)
1820 && GET_MODE_CLASS (mode) != MODE_CC)
1823 /* Canonicalize XOR of the most significant bit to PLUS. */
1824 if ((GET_CODE (op1) == CONST_INT
1825 || GET_CODE (op1) == CONST_DOUBLE)
1826 && mode_signbit_p (mode, op1))
1827 return simplify_gen_binary (PLUS, mode, op0, op1);
1828 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1829 if ((GET_CODE (op1) == CONST_INT
1830 || GET_CODE (op1) == CONST_DOUBLE)
1831 && GET_CODE (op0) == PLUS
1832 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1833 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1834 && mode_signbit_p (mode, XEXP (op0, 1)))
1835 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1836 simplify_gen_binary (XOR, mode, op1,
1839 tem = simplify_associative_operation (code, mode, op0, op1);
1845 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1847 /* If we are turning off bits already known off in OP0, we need
1849 if (GET_CODE (trueop1) == CONST_INT
1850 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1851 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1853 if (trueop0 == trueop1 && ! side_effects_p (op0)
1854 && GET_MODE_CLASS (mode) != MODE_CC)
1857 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1858 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1859 && ! side_effects_p (op0)
1860 && GET_MODE_CLASS (mode) != MODE_CC)
1862 tem = simplify_associative_operation (code, mode, op0, op1);
1868 /* 0/x is 0 (or x&0 if x has side-effects). */
1869 if (trueop0 == const0_rtx)
1870 return side_effects_p (op1)
1871 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1874 if (trueop1 == const1_rtx)
1876 /* Handle narrowing UDIV. */
1877 rtx x = gen_lowpart_common (mode, op0);
1880 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1881 return gen_lowpart_SUBREG (mode, op0);
1884 /* Convert divide by power of two into shift. */
1885 if (GET_CODE (trueop1) == CONST_INT
1886 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1887 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1891 /* Handle floating point and integers separately. */
1892 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1894 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1895 safe for modes with NaNs, since 0.0 / 0.0 will then be
1896 NaN rather than 0.0. Nor is it safe for modes with signed
1897 zeros, since dividing 0 by a negative number gives -0.0 */
1898 if (trueop0 == CONST0_RTX (mode)
1899 && !HONOR_NANS (mode)
1900 && !HONOR_SIGNED_ZEROS (mode)
1901 && ! side_effects_p (op1))
1904 if (trueop1 == CONST1_RTX (mode)
1905 && !HONOR_SNANS (mode))
1908 if (GET_CODE (trueop1) == CONST_DOUBLE
1909 && trueop1 != CONST0_RTX (mode))
1912 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1915 if (REAL_VALUES_EQUAL (d, dconstm1)
1916 && !HONOR_SNANS (mode))
1917 return simplify_gen_unary (NEG, mode, op0, mode);
1919 /* Change FP division by a constant into multiplication.
1920 Only do this with -funsafe-math-optimizations. */
1921 if (flag_unsafe_math_optimizations
1922 && !REAL_VALUES_EQUAL (d, dconst0))
1924 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1925 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1926 return simplify_gen_binary (MULT, mode, op0, tem);
1932 /* 0/x is 0 (or x&0 if x has side-effects). */
1933 if (trueop0 == const0_rtx)
1934 return side_effects_p (op1)
1935 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1938 if (trueop1 == const1_rtx)
1940 /* Handle narrowing DIV. */
1941 rtx x = gen_lowpart_common (mode, op0);
1944 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1945 return gen_lowpart_SUBREG (mode, op0);
1949 if (trueop1 == constm1_rtx)
1951 rtx x = gen_lowpart_common (mode, op0);
1953 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1954 ? gen_lowpart_SUBREG (mode, op0) : op0;
1955 return simplify_gen_unary (NEG, mode, x, mode);
1961 /* 0%x is 0 (or x&0 if x has side-effects). */
1962 if (trueop0 == const0_rtx)
1963 return side_effects_p (op1)
1964 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1966 /* x%1 is 0 (of x&0 if x has side-effects). */
1967 if (trueop1 == const1_rtx)
1968 return side_effects_p (op0)
1969 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1971 /* Implement modulus by power of two as AND. */
1972 if (GET_CODE (trueop1) == CONST_INT
1973 && exact_log2 (INTVAL (trueop1)) > 0)
1974 return simplify_gen_binary (AND, mode, op0,
1975 GEN_INT (INTVAL (op1) - 1));
1979 /* 0%x is 0 (or x&0 if x has side-effects). */
1980 if (trueop0 == const0_rtx)
1981 return side_effects_p (op1)
1982 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1984 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
1985 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
1986 return side_effects_p (op0)
1987 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
1994 /* Rotating ~0 always results in ~0. */
1995 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1996 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1997 && ! side_effects_p (op1))
2000 /* Fall through.... */
2004 if (trueop1 == const0_rtx)
2006 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2011 if (width <= HOST_BITS_PER_WIDE_INT
2012 && GET_CODE (trueop1) == CONST_INT
2013 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2014 && ! side_effects_p (op0))
2016 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2018 tem = simplify_associative_operation (code, mode, op0, op1);
2024 if (width <= HOST_BITS_PER_WIDE_INT
2025 && GET_CODE (trueop1) == CONST_INT
2026 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2027 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2028 && ! side_effects_p (op0))
2030 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2032 tem = simplify_associative_operation (code, mode, op0, op1);
2038 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2040 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2042 tem = simplify_associative_operation (code, mode, op0, op1);
2048 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2050 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2052 tem = simplify_associative_operation (code, mode, op0, op1);
2061 /* ??? There are simplifications that can be done. */
2065 if (!VECTOR_MODE_P (mode))
2067 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2069 != GET_MODE_INNER (GET_MODE (trueop0)))
2070 || GET_CODE (trueop1) != PARALLEL
2071 || XVECLEN (trueop1, 0) != 1
2072 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
2075 if (GET_CODE (trueop0) == CONST_VECTOR)
2076 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
2080 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2081 || (GET_MODE_INNER (mode)
2082 != GET_MODE_INNER (GET_MODE (trueop0)))
2083 || GET_CODE (trueop1) != PARALLEL)
2086 if (GET_CODE (trueop0) == CONST_VECTOR)
2088 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2089 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2090 rtvec v = rtvec_alloc (n_elts);
2093 if (XVECLEN (trueop1, 0) != (int) n_elts)
2095 for (i = 0; i < n_elts; i++)
2097 rtx x = XVECEXP (trueop1, 0, i);
2099 if (GET_CODE (x) != CONST_INT)
2101 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
2104 return gen_rtx_CONST_VECTOR (mode, v);
2110 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2111 ? GET_MODE (trueop0)
2112 : GET_MODE_INNER (mode));
2113 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2114 ? GET_MODE (trueop1)
2115 : GET_MODE_INNER (mode));
2117 if (!VECTOR_MODE_P (mode)
2118 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2119 != GET_MODE_SIZE (mode)))
2122 if ((VECTOR_MODE_P (op0_mode)
2123 && (GET_MODE_INNER (mode)
2124 != GET_MODE_INNER (op0_mode)))
2125 || (!VECTOR_MODE_P (op0_mode)
2126 && GET_MODE_INNER (mode) != op0_mode))
2129 if ((VECTOR_MODE_P (op1_mode)
2130 && (GET_MODE_INNER (mode)
2131 != GET_MODE_INNER (op1_mode)))
2132 || (!VECTOR_MODE_P (op1_mode)
2133 && GET_MODE_INNER (mode) != op1_mode))
2136 if ((GET_CODE (trueop0) == CONST_VECTOR
2137 || GET_CODE (trueop0) == CONST_INT
2138 || GET_CODE (trueop0) == CONST_DOUBLE)
2139 && (GET_CODE (trueop1) == CONST_VECTOR
2140 || GET_CODE (trueop1) == CONST_INT
2141 || GET_CODE (trueop1) == CONST_DOUBLE))
2143 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2144 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2145 rtvec v = rtvec_alloc (n_elts);
2147 unsigned in_n_elts = 1;
2149 if (VECTOR_MODE_P (op0_mode))
2150 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2151 for (i = 0; i < n_elts; i++)
2155 if (!VECTOR_MODE_P (op0_mode))
2156 RTVEC_ELT (v, i) = trueop0;
2158 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2162 if (!VECTOR_MODE_P (op1_mode))
2163 RTVEC_ELT (v, i) = trueop1;
2165 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2170 return gen_rtx_CONST_VECTOR (mode, v);
2182 /* Get the integer argument values in two forms:
2183 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2185 arg0 = INTVAL (trueop0);
2186 arg1 = INTVAL (trueop1);
2188 if (width < HOST_BITS_PER_WIDE_INT)
2190 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2191 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2194 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2195 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2198 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2199 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2207 /* Compute the value of the arithmetic. */
2212 val = arg0s + arg1s;
2216 val = arg0s - arg1s;
2220 val = arg0s * arg1s;
2225 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2228 val = arg0s / arg1s;
2233 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2236 val = arg0s % arg1s;
2241 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2244 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2249 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2252 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2268 /* If shift count is undefined, don't fold it; let the machine do
2269 what it wants. But truncate it if the machine will do that. */
2273 if (SHIFT_COUNT_TRUNCATED)
2276 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2283 if (SHIFT_COUNT_TRUNCATED)
2286 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2293 if (SHIFT_COUNT_TRUNCATED)
2296 val = arg0s >> arg1;
2298 /* Bootstrap compiler may not have sign extended the right shift.
2299 Manually extend the sign to insure bootstrap cc matches gcc. */
2300 if (arg0s < 0 && arg1 > 0)
2301 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2310 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2311 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2319 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2320 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2324 /* Do nothing here. */
2328 val = arg0s <= arg1s ? arg0s : arg1s;
2332 val = ((unsigned HOST_WIDE_INT) arg0
2333 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2337 val = arg0s > arg1s ? arg0s : arg1s;
2341 val = ((unsigned HOST_WIDE_INT) arg0
2342 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2349 /* ??? There are simplifications that can be done. */
2356 val = trunc_int_for_mode (val, mode);
2358 return GEN_INT (val);
2361 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2364 Rather than test for specific case, we do this by a brute-force method
2365 and do all possible simplifications until no more changes occur. Then
2366 we rebuild the operation.
2368 If FORCE is true, then always generate the rtx. This is used to
2369 canonicalize stuff emitted from simplify_gen_binary. Note that this
2370 can still fail if the rtx is too complex. It won't fail just because
2371 the result is not 'simpler' than the input, however. */
2373 struct simplify_plus_minus_op_data
2380 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2382 const struct simplify_plus_minus_op_data *d1 = p1;
2383 const struct simplify_plus_minus_op_data *d2 = p2;
2385 return (commutative_operand_precedence (d2->op)
2386 - commutative_operand_precedence (d1->op));
2390 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2393 struct simplify_plus_minus_op_data ops[8];
2395 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2399 memset (ops, 0, sizeof ops);
2401 /* Set up the two operands and then expand them until nothing has been
2402 changed. If we run out of room in our array, give up; this should
2403 almost never happen. */
2408 ops[1].neg = (code == MINUS);
2414 for (i = 0; i < n_ops; i++)
2416 rtx this_op = ops[i].op;
2417 int this_neg = ops[i].neg;
2418 enum rtx_code this_code = GET_CODE (this_op);
2427 ops[n_ops].op = XEXP (this_op, 1);
2428 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2431 ops[i].op = XEXP (this_op, 0);
2437 ops[i].op = XEXP (this_op, 0);
2438 ops[i].neg = ! this_neg;
2444 && GET_CODE (XEXP (this_op, 0)) == PLUS
2445 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2446 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2448 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2449 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2450 ops[n_ops].neg = this_neg;
2458 /* ~a -> (-a - 1) */
2461 ops[n_ops].op = constm1_rtx;
2462 ops[n_ops++].neg = this_neg;
2463 ops[i].op = XEXP (this_op, 0);
2464 ops[i].neg = !this_neg;
2472 ops[i].op = neg_const_int (mode, this_op);
2485 /* If we only have two operands, we can't do anything. */
2486 if (n_ops <= 2 && !force)
2489 /* Count the number of CONSTs we didn't split above. */
2490 for (i = 0; i < n_ops; i++)
2491 if (GET_CODE (ops[i].op) == CONST)
2494 /* Now simplify each pair of operands until nothing changes. The first
2495 time through just simplify constants against each other. */
2502 for (i = 0; i < n_ops - 1; i++)
2503 for (j = i + 1; j < n_ops; j++)
2505 rtx lhs = ops[i].op, rhs = ops[j].op;
2506 int lneg = ops[i].neg, rneg = ops[j].neg;
2508 if (lhs != 0 && rhs != 0
2509 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2511 enum rtx_code ncode = PLUS;
2517 tem = lhs, lhs = rhs, rhs = tem;
2519 else if (swap_commutative_operands_p (lhs, rhs))
2520 tem = lhs, lhs = rhs, rhs = tem;
2522 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2524 /* Reject "simplifications" that just wrap the two
2525 arguments in a CONST. Failure to do so can result
2526 in infinite recursion with simplify_binary_operation
2527 when it calls us to simplify CONST operations. */
2529 && ! (GET_CODE (tem) == CONST
2530 && GET_CODE (XEXP (tem, 0)) == ncode
2531 && XEXP (XEXP (tem, 0), 0) == lhs
2532 && XEXP (XEXP (tem, 0), 1) == rhs)
2533 /* Don't allow -x + -1 -> ~x simplifications in the
2534 first pass. This allows us the chance to combine
2535 the -1 with other constants. */
2537 && GET_CODE (tem) == NOT
2538 && XEXP (tem, 0) == rhs))
2541 if (GET_CODE (tem) == NEG)
2542 tem = XEXP (tem, 0), lneg = !lneg;
2543 if (GET_CODE (tem) == CONST_INT && lneg)
2544 tem = neg_const_int (mode, tem), lneg = 0;
2548 ops[j].op = NULL_RTX;
2558 /* Pack all the operands to the lower-numbered entries. */
2559 for (i = 0, j = 0; j < n_ops; j++)
2564 /* Sort the operations based on swap_commutative_operands_p. */
2565 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2567 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2569 && GET_CODE (ops[1].op) == CONST_INT
2570 && CONSTANT_P (ops[0].op)
2572 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2574 /* We suppressed creation of trivial CONST expressions in the
2575 combination loop to avoid recursion. Create one manually now.
2576 The combination loop should have ensured that there is exactly
2577 one CONST_INT, and the sort will have ensured that it is last
2578 in the array and that any other constant will be next-to-last. */
2581 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2582 && CONSTANT_P (ops[n_ops - 2].op))
2584 rtx value = ops[n_ops - 1].op;
2585 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2586 value = neg_const_int (mode, value);
2587 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2591 /* Count the number of CONSTs that we generated. */
2593 for (i = 0; i < n_ops; i++)
2594 if (GET_CODE (ops[i].op) == CONST)
2597 /* Give up if we didn't reduce the number of operands we had. Make
2598 sure we count a CONST as two operands. If we have the same
2599 number of operands, but have made more CONSTs than before, this
2600 is also an improvement, so accept it. */
2602 && (n_ops + n_consts > input_ops
2603 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2606 /* Put a non-negated operand first, if possible. */
2608 for (i = 0; i < n_ops && ops[i].neg; i++)
2611 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2620 /* Now make the result by performing the requested operations. */
2622 for (i = 1; i < n_ops; i++)
2623 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2624 mode, result, ops[i].op);
2629 /* Like simplify_binary_operation except used for relational operators.
2630 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2633 CMP_MODE specifies in which mode the comparison is done in, so it is
2634 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2635 the operands or, if both are VOIDmode, the operands are compared in
2636 "infinite precision". */
2638 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2639 enum machine_mode cmp_mode, rtx op0, rtx op1)
2641 rtx tem, trueop0, trueop1;
2643 if (cmp_mode == VOIDmode)
2644 cmp_mode = GET_MODE (op0);
2645 if (cmp_mode == VOIDmode)
2646 cmp_mode = GET_MODE (op1);
2648 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2651 #ifdef FLOAT_STORE_FLAG_VALUE
2652 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2654 if (tem == const0_rtx)
2655 return CONST0_RTX (mode);
2656 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2658 REAL_VALUE_TYPE val;
2659 val = FLOAT_STORE_FLAG_VALUE (mode);
2660 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2668 /* For the following tests, ensure const0_rtx is op1. */
2669 if (swap_commutative_operands_p (op0, op1)
2670 || (op0 == const0_rtx && op1 != const0_rtx))
2671 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2673 /* If op0 is a compare, extract the comparison arguments from it. */
2674 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2675 return simplify_relational_operation (code, mode, VOIDmode,
2676 XEXP (op0, 0), XEXP (op0, 1));
2678 if (mode == VOIDmode
2679 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2683 trueop0 = avoid_constant_pool_reference (op0);
2684 trueop1 = avoid_constant_pool_reference (op1);
2685 return simplify_relational_operation_1 (code, mode, cmp_mode,
2689 /* This part of simplify_relational_operation is only used when CMP_MODE
2690 is not in class MODE_CC (i.e. it is a real comparison).
2692 MODE is the mode of the result, while CMP_MODE specifies in which
2693 mode the comparison is done in, so it is the mode of the operands. */
2695 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2696 enum machine_mode cmp_mode, rtx op0, rtx op1)
2698 if (GET_CODE (op1) == CONST_INT)
2700 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2702 /* If op0 is a comparison, extract the comparison arguments form it. */
2705 if (GET_MODE (op0) == cmp_mode)
2706 return simplify_rtx (op0);
2708 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2709 XEXP (op0, 0), XEXP (op0, 1));
2711 else if (code == EQ)
2713 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
2715 return simplify_gen_relational (new, mode, VOIDmode,
2716 XEXP (op0, 0), XEXP (op0, 1));
2724 /* Check if the given comparison (done in the given MODE) is actually a
2725 tautology or a contradiction.
2726 If no simplification is possible, this function returns zero.
2727 Otherwise, it returns either const_true_rtx or const0_rtx. */
2730 simplify_const_relational_operation (enum rtx_code code,
2731 enum machine_mode mode,
2734 int equal, op0lt, op0ltu, op1lt, op1ltu;
2739 if (mode == VOIDmode
2740 && (GET_MODE (op0) != VOIDmode
2741 || GET_MODE (op1) != VOIDmode))
2744 /* If op0 is a compare, extract the comparison arguments from it. */
2745 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2746 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2748 /* We can't simplify MODE_CC values since we don't know what the
2749 actual comparison is. */
2750 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2753 /* Make sure the constant is second. */
2754 if (swap_commutative_operands_p (op0, op1))
2756 tem = op0, op0 = op1, op1 = tem;
2757 code = swap_condition (code);
2760 trueop0 = avoid_constant_pool_reference (op0);
2761 trueop1 = avoid_constant_pool_reference (op1);
2763 /* For integer comparisons of A and B maybe we can simplify A - B and can
2764 then simplify a comparison of that with zero. If A and B are both either
2765 a register or a CONST_INT, this can't help; testing for these cases will
2766 prevent infinite recursion here and speed things up.
2768 If CODE is an unsigned comparison, then we can never do this optimization,
2769 because it gives an incorrect result if the subtraction wraps around zero.
2770 ANSI C defines unsigned operations such that they never overflow, and
2771 thus such cases can not be ignored; but we cannot do it even for
2772 signed comparisons for languages such as Java, so test flag_wrapv. */
2774 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2775 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2776 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2777 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2778 /* We cannot do this for == or != if tem is a nonzero address. */
2779 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2780 && code != GTU && code != GEU && code != LTU && code != LEU)
2781 return simplify_const_relational_operation (signed_condition (code),
2782 mode, tem, const0_rtx);
2784 if (flag_unsafe_math_optimizations && code == ORDERED)
2785 return const_true_rtx;
2787 if (flag_unsafe_math_optimizations && code == UNORDERED)
2790 /* For modes without NaNs, if the two operands are equal, we know the
2791 result except if they have side-effects. */
2792 if (! HONOR_NANS (GET_MODE (trueop0))
2793 && rtx_equal_p (trueop0, trueop1)
2794 && ! side_effects_p (trueop0))
2795 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2797 /* If the operands are floating-point constants, see if we can fold
2799 else if (GET_CODE (trueop0) == CONST_DOUBLE
2800 && GET_CODE (trueop1) == CONST_DOUBLE
2801 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2803 REAL_VALUE_TYPE d0, d1;
2805 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2806 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2808 /* Comparisons are unordered iff at least one of the values is NaN. */
2809 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2819 return const_true_rtx;
2832 equal = REAL_VALUES_EQUAL (d0, d1);
2833 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2834 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2837 /* Otherwise, see if the operands are both integers. */
2838 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2839 && (GET_CODE (trueop0) == CONST_DOUBLE
2840 || GET_CODE (trueop0) == CONST_INT)
2841 && (GET_CODE (trueop1) == CONST_DOUBLE
2842 || GET_CODE (trueop1) == CONST_INT))
2844 int width = GET_MODE_BITSIZE (mode);
2845 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2846 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2848 /* Get the two words comprising each integer constant. */
2849 if (GET_CODE (trueop0) == CONST_DOUBLE)
2851 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2852 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2856 l0u = l0s = INTVAL (trueop0);
2857 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2860 if (GET_CODE (trueop1) == CONST_DOUBLE)
2862 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2863 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2867 l1u = l1s = INTVAL (trueop1);
2868 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2871 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2872 we have to sign or zero-extend the values. */
2873 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2875 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2876 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2878 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2879 l0s |= ((HOST_WIDE_INT) (-1) << width);
2881 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2882 l1s |= ((HOST_WIDE_INT) (-1) << width);
2884 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2885 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2887 equal = (h0u == h1u && l0u == l1u);
2888 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2889 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2890 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2891 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2894 /* Otherwise, there are some code-specific tests we can make. */
2897 /* Optimize comparisons with upper and lower bounds. */
2898 if (INTEGRAL_MODE_P (mode)
2899 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2912 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
2919 /* x >= min is always true. */
2920 if (rtx_equal_p (trueop1, mmin))
2921 tem = const_true_rtx;
2927 /* x <= max is always true. */
2928 if (rtx_equal_p (trueop1, mmax))
2929 tem = const_true_rtx;
2934 /* x > max is always false. */
2935 if (rtx_equal_p (trueop1, mmax))
2941 /* x < min is always false. */
2942 if (rtx_equal_p (trueop1, mmin))
2949 if (tem == const0_rtx
2950 || tem == const_true_rtx)
2957 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2962 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2963 return const_true_rtx;
2967 /* Optimize abs(x) < 0.0. */
2968 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2970 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2972 if (GET_CODE (tem) == ABS)
2978 /* Optimize abs(x) >= 0.0. */
2979 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2981 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2983 if (GET_CODE (tem) == ABS)
2984 return const_true_rtx;
2989 /* Optimize ! (abs(x) < 0.0). */
2990 if (trueop1 == CONST0_RTX (mode))
2992 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2994 if (GET_CODE (tem) == ABS)
2995 return const_true_rtx;
3006 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3012 return equal ? const_true_rtx : const0_rtx;
3015 return ! equal ? const_true_rtx : const0_rtx;
3018 return op0lt ? const_true_rtx : const0_rtx;
3021 return op1lt ? const_true_rtx : const0_rtx;
3023 return op0ltu ? const_true_rtx : const0_rtx;
3025 return op1ltu ? const_true_rtx : const0_rtx;
3028 return equal || op0lt ? const_true_rtx : const0_rtx;
3031 return equal || op1lt ? const_true_rtx : const0_rtx;
3033 return equal || op0ltu ? const_true_rtx : const0_rtx;
3035 return equal || op1ltu ? const_true_rtx : const0_rtx;
3037 return const_true_rtx;
3045 /* Simplify CODE, an operation with result mode MODE and three operands,
3046 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3047 a constant. Return 0 if no simplifications is possible. */
3050 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3051 enum machine_mode op0_mode, rtx op0, rtx op1,
3054 unsigned int width = GET_MODE_BITSIZE (mode);
3056 /* VOIDmode means "infinite" precision. */
3058 width = HOST_BITS_PER_WIDE_INT;
3064 if (GET_CODE (op0) == CONST_INT
3065 && GET_CODE (op1) == CONST_INT
3066 && GET_CODE (op2) == CONST_INT
3067 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3068 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3070 /* Extracting a bit-field from a constant */
3071 HOST_WIDE_INT val = INTVAL (op0);
3073 if (BITS_BIG_ENDIAN)
3074 val >>= (GET_MODE_BITSIZE (op0_mode)
3075 - INTVAL (op2) - INTVAL (op1));
3077 val >>= INTVAL (op2);
3079 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3081 /* First zero-extend. */
3082 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3083 /* If desired, propagate sign bit. */
3084 if (code == SIGN_EXTRACT
3085 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3086 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3089 /* Clear the bits that don't belong in our mode,
3090 unless they and our sign bit are all one.
3091 So we get either a reasonable negative value or a reasonable
3092 unsigned value for this mode. */
3093 if (width < HOST_BITS_PER_WIDE_INT
3094 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3095 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3096 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3098 return GEN_INT (val);
3103 if (GET_CODE (op0) == CONST_INT)
3104 return op0 != const0_rtx ? op1 : op2;
3106 /* Convert c ? a : a into "a". */
3107 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3110 /* Convert a != b ? a : b into "a". */
3111 if (GET_CODE (op0) == NE
3112 && ! side_effects_p (op0)
3113 && ! HONOR_NANS (mode)
3114 && ! HONOR_SIGNED_ZEROS (mode)
3115 && ((rtx_equal_p (XEXP (op0, 0), op1)
3116 && rtx_equal_p (XEXP (op0, 1), op2))
3117 || (rtx_equal_p (XEXP (op0, 0), op2)
3118 && rtx_equal_p (XEXP (op0, 1), op1))))
3121 /* Convert a == b ? a : b into "b". */
3122 if (GET_CODE (op0) == EQ
3123 && ! side_effects_p (op0)
3124 && ! HONOR_NANS (mode)
3125 && ! HONOR_SIGNED_ZEROS (mode)
3126 && ((rtx_equal_p (XEXP (op0, 0), op1)
3127 && rtx_equal_p (XEXP (op0, 1), op2))
3128 || (rtx_equal_p (XEXP (op0, 0), op2)
3129 && rtx_equal_p (XEXP (op0, 1), op1))))
3132 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3134 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3135 ? GET_MODE (XEXP (op0, 1))
3136 : GET_MODE (XEXP (op0, 0)));
3139 /* Look for happy constants in op1 and op2. */
3140 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3142 HOST_WIDE_INT t = INTVAL (op1);
3143 HOST_WIDE_INT f = INTVAL (op2);
3145 if (t == STORE_FLAG_VALUE && f == 0)
3146 code = GET_CODE (op0);
3147 else if (t == 0 && f == STORE_FLAG_VALUE)
3150 tmp = reversed_comparison_code (op0, NULL_RTX);
3158 return simplify_gen_relational (code, mode, cmp_mode,
3159 XEXP (op0, 0), XEXP (op0, 1));
3162 if (cmp_mode == VOIDmode)
3163 cmp_mode = op0_mode;
3164 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3165 cmp_mode, XEXP (op0, 0),
3168 /* See if any simplifications were possible. */
3171 if (GET_CODE (temp) == CONST_INT)
3172 return temp == const0_rtx ? op2 : op1;
3174 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3180 if (GET_MODE (op0) != mode
3181 || GET_MODE (op1) != mode
3182 || !VECTOR_MODE_P (mode))
3184 op2 = avoid_constant_pool_reference (op2);
3185 if (GET_CODE (op2) == CONST_INT)
3187 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3188 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3189 int mask = (1 << n_elts) - 1;
3191 if (!(INTVAL (op2) & mask))
3193 if ((INTVAL (op2) & mask) == mask)
3196 op0 = avoid_constant_pool_reference (op0);
3197 op1 = avoid_constant_pool_reference (op1);
3198 if (GET_CODE (op0) == CONST_VECTOR
3199 && GET_CODE (op1) == CONST_VECTOR)
3201 rtvec v = rtvec_alloc (n_elts);
3204 for (i = 0; i < n_elts; i++)
3205 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3206 ? CONST_VECTOR_ELT (op0, i)
3207 : CONST_VECTOR_ELT (op1, i));
3208 return gen_rtx_CONST_VECTOR (mode, v);
3220 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3221 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3223 Works by unpacking OP into a collection of 8-bit values
3224 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3225 and then repacking them again for OUTERMODE. */
3228 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3229 enum machine_mode innermode, unsigned int byte)
3231 /* We support up to 512-bit values (for V8DFmode). */
3235 value_mask = (1 << value_bit) - 1
3237 unsigned char value[max_bitsize / value_bit];
3246 rtvec result_v = NULL;
3247 enum mode_class outer_class;
3248 enum machine_mode outer_submode;
3250 /* Some ports misuse CCmode. */
3251 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3254 /* Unpack the value. */
3256 if (GET_CODE (op) == CONST_VECTOR)
3258 num_elem = CONST_VECTOR_NUNITS (op);
3259 elems = &CONST_VECTOR_ELT (op, 0);
3260 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3266 elem_bitsize = max_bitsize;
3269 if (BITS_PER_UNIT % value_bit != 0)
3270 abort (); /* Too complicated; reducing value_bit may help. */
3271 if (elem_bitsize % BITS_PER_UNIT != 0)
3272 abort (); /* I don't know how to handle endianness of sub-units. */
3274 for (elem = 0; elem < num_elem; elem++)
3277 rtx el = elems[elem];
3279 /* Vectors are kept in target memory order. (This is probably
3282 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3283 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3285 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3286 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3287 unsigned bytele = (subword_byte % UNITS_PER_WORD
3288 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3289 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3292 switch (GET_CODE (el))
3296 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3298 *vp++ = INTVAL (el) >> i;
3299 /* CONST_INTs are always logically sign-extended. */
3300 for (; i < elem_bitsize; i += value_bit)
3301 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3305 if (GET_MODE (el) == VOIDmode)
3307 /* If this triggers, someone should have generated a
3308 CONST_INT instead. */
3309 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3312 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3313 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3314 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3317 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3320 /* It shouldn't matter what's done here, so fill it with
3322 for (; i < max_bitsize; i += value_bit)
3325 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3327 long tmp[max_bitsize / 32];
3328 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3330 if (bitsize > elem_bitsize)
3332 if (bitsize % value_bit != 0)
3335 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3338 /* real_to_target produces its result in words affected by
3339 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3340 and use WORDS_BIG_ENDIAN instead; see the documentation
3341 of SUBREG in rtl.texi. */
3342 for (i = 0; i < bitsize; i += value_bit)
3345 if (WORDS_BIG_ENDIAN)
3346 ibase = bitsize - 1 - i;
3349 *vp++ = tmp[ibase / 32] >> i % 32;
3352 /* It shouldn't matter what's done here, so fill it with
3354 for (; i < elem_bitsize; i += value_bit)
3366 /* Now, pick the right byte to start with. */
3367 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3368 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3369 will already have offset 0. */
3370 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3372 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3374 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3375 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3376 byte = (subword_byte % UNITS_PER_WORD
3377 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3380 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3381 so if it's become negative it will instead be very large.) */
3382 if (byte >= GET_MODE_SIZE (innermode))
3385 /* Convert from bytes to chunks of size value_bit. */
3386 value_start = byte * (BITS_PER_UNIT / value_bit);
3388 /* Re-pack the value. */
3390 if (VECTOR_MODE_P (outermode))
3392 num_elem = GET_MODE_NUNITS (outermode);
3393 result_v = rtvec_alloc (num_elem);
3394 elems = &RTVEC_ELT (result_v, 0);
3395 outer_submode = GET_MODE_INNER (outermode);
3401 outer_submode = outermode;
3404 outer_class = GET_MODE_CLASS (outer_submode);
3405 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3407 if (elem_bitsize % value_bit != 0)
3409 if (elem_bitsize + value_start * value_bit > max_bitsize)
3412 for (elem = 0; elem < num_elem; elem++)
3416 /* Vectors are stored in target memory order. (This is probably
3419 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3420 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3422 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3423 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3424 unsigned bytele = (subword_byte % UNITS_PER_WORD
3425 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3426 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3429 switch (outer_class)
3432 case MODE_PARTIAL_INT:
3434 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3437 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3439 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3440 for (; i < elem_bitsize; i += value_bit)
3441 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3442 << (i - HOST_BITS_PER_WIDE_INT));
3444 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3446 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3447 elems[elem] = gen_int_mode (lo, outer_submode);
3449 elems[elem] = immed_double_const (lo, hi, outer_submode);
3456 long tmp[max_bitsize / 32];
3458 /* real_from_target wants its input in words affected by
3459 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3460 and use WORDS_BIG_ENDIAN instead; see the documentation
3461 of SUBREG in rtl.texi. */
3462 for (i = 0; i < max_bitsize / 32; i++)
3464 for (i = 0; i < elem_bitsize; i += value_bit)
3467 if (WORDS_BIG_ENDIAN)
3468 ibase = elem_bitsize - 1 - i;
3471 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3474 real_from_target (&r, tmp, outer_submode);
3475 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3483 if (VECTOR_MODE_P (outermode))
3484 return gen_rtx_CONST_VECTOR (outermode, result_v);
3489 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3490 Return 0 if no simplifications are possible. */
3492 simplify_subreg (enum machine_mode outermode, rtx op,
3493 enum machine_mode innermode, unsigned int byte)
3495 /* Little bit of sanity checking. */
3496 if (innermode == VOIDmode || outermode == VOIDmode
3497 || innermode == BLKmode || outermode == BLKmode)
3500 if (GET_MODE (op) != innermode
3501 && GET_MODE (op) != VOIDmode)
3504 if (byte % GET_MODE_SIZE (outermode)
3505 || byte >= GET_MODE_SIZE (innermode))
3508 if (outermode == innermode && !byte)
3511 if (GET_CODE (op) == CONST_INT
3512 || GET_CODE (op) == CONST_DOUBLE
3513 || GET_CODE (op) == CONST_VECTOR)
3514 return simplify_immed_subreg (outermode, op, innermode, byte);
3516 /* Changing mode twice with SUBREG => just change it once,
3517 or not at all if changing back op starting mode. */
3518 if (GET_CODE (op) == SUBREG)
3520 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3521 int final_offset = byte + SUBREG_BYTE (op);
3524 if (outermode == innermostmode
3525 && byte == 0 && SUBREG_BYTE (op) == 0)
3526 return SUBREG_REG (op);
3528 /* The SUBREG_BYTE represents offset, as if the value were stored
3529 in memory. Irritating exception is paradoxical subreg, where
3530 we define SUBREG_BYTE to be 0. On big endian machines, this
3531 value should be negative. For a moment, undo this exception. */
3532 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3534 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3535 if (WORDS_BIG_ENDIAN)
3536 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3537 if (BYTES_BIG_ENDIAN)
3538 final_offset += difference % UNITS_PER_WORD;
3540 if (SUBREG_BYTE (op) == 0
3541 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3543 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3544 if (WORDS_BIG_ENDIAN)
3545 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3546 if (BYTES_BIG_ENDIAN)
3547 final_offset += difference % UNITS_PER_WORD;
3550 /* See whether resulting subreg will be paradoxical. */
3551 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3553 /* In nonparadoxical subregs we can't handle negative offsets. */
3554 if (final_offset < 0)
3556 /* Bail out in case resulting subreg would be incorrect. */
3557 if (final_offset % GET_MODE_SIZE (outermode)
3558 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3564 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3566 /* In paradoxical subreg, see if we are still looking on lower part.
3567 If so, our SUBREG_BYTE will be 0. */
3568 if (WORDS_BIG_ENDIAN)
3569 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3570 if (BYTES_BIG_ENDIAN)
3571 offset += difference % UNITS_PER_WORD;
3572 if (offset == final_offset)
3578 /* Recurse for further possible simplifications. */
3579 new = simplify_subreg (outermode, SUBREG_REG (op),
3580 GET_MODE (SUBREG_REG (op)),
3584 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3587 /* SUBREG of a hard register => just change the register number
3588 and/or mode. If the hard register is not valid in that mode,
3589 suppress this simplification. If the hard register is the stack,
3590 frame, or argument pointer, leave this as a SUBREG. */
3593 && (! REG_FUNCTION_VALUE_P (op)
3594 || ! rtx_equal_function_value_matters)
3595 && REGNO (op) < FIRST_PSEUDO_REGISTER
3596 #ifdef CANNOT_CHANGE_MODE_CLASS
3597 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3598 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3599 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3601 && ((reload_completed && !frame_pointer_needed)
3602 || (REGNO (op) != FRAME_POINTER_REGNUM
3603 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3604 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3607 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3608 && REGNO (op) != ARG_POINTER_REGNUM
3610 && REGNO (op) != STACK_POINTER_REGNUM
3611 && subreg_offset_representable_p (REGNO (op), innermode,
3614 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3615 int final_regno = subreg_hard_regno (tem, 0);
3617 /* ??? We do allow it if the current REG is not valid for
3618 its mode. This is a kludge to work around how float/complex
3619 arguments are passed on 32-bit SPARC and should be fixed. */
3620 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3621 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3623 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3625 /* Propagate original regno. We don't have any way to specify
3626 the offset inside original regno, so do so only for lowpart.
3627 The information is used only by alias analysis that can not
3628 grog partial register anyway. */
3630 if (subreg_lowpart_offset (outermode, innermode) == byte)
3631 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3636 /* If we have a SUBREG of a register that we are replacing and we are
3637 replacing it with a MEM, make a new MEM and try replacing the
3638 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3639 or if we would be widening it. */
3641 if (GET_CODE (op) == MEM
3642 && ! mode_dependent_address_p (XEXP (op, 0))
3643 /* Allow splitting of volatile memory references in case we don't
3644 have instruction to move the whole thing. */
3645 && (! MEM_VOLATILE_P (op)
3646 || ! have_insn_for (SET, innermode))
3647 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3648 return adjust_address_nv (op, outermode, byte);
3650 /* Handle complex values represented as CONCAT
3651 of real and imaginary part. */
3652 if (GET_CODE (op) == CONCAT)
3654 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3655 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3656 unsigned int final_offset;
3659 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3660 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3663 /* We can at least simplify it by referring directly to the
3665 return gen_rtx_SUBREG (outermode, part, final_offset);
3668 /* Optimize SUBREG truncations of zero and sign extended values. */
3669 if ((GET_CODE (op) == ZERO_EXTEND
3670 || GET_CODE (op) == SIGN_EXTEND)
3671 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3673 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3675 /* If we're requesting the lowpart of a zero or sign extension,
3676 there are three possibilities. If the outermode is the same
3677 as the origmode, we can omit both the extension and the subreg.
3678 If the outermode is not larger than the origmode, we can apply
3679 the truncation without the extension. Finally, if the outermode
3680 is larger than the origmode, but both are integer modes, we
3681 can just extend to the appropriate mode. */
3684 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3685 if (outermode == origmode)
3686 return XEXP (op, 0);
3687 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3688 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3689 subreg_lowpart_offset (outermode,
3691 if (SCALAR_INT_MODE_P (outermode))
3692 return simplify_gen_unary (GET_CODE (op), outermode,
3693 XEXP (op, 0), origmode);
3696 /* A SUBREG resulting from a zero extension may fold to zero if
3697 it extracts higher bits that the ZERO_EXTEND's source bits. */
3698 if (GET_CODE (op) == ZERO_EXTEND
3699 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3700 return CONST0_RTX (outermode);
3706 /* Make a SUBREG operation or equivalent if it folds. */
3709 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3710 enum machine_mode innermode, unsigned int byte)
3713 /* Little bit of sanity checking. */
3714 if (innermode == VOIDmode || outermode == VOIDmode
3715 || innermode == BLKmode || outermode == BLKmode)
3718 if (GET_MODE (op) != innermode
3719 && GET_MODE (op) != VOIDmode)
3722 if (byte % GET_MODE_SIZE (outermode)
3723 || byte >= GET_MODE_SIZE (innermode))
3726 if (GET_CODE (op) == QUEUED)
3729 new = simplify_subreg (outermode, op, innermode, byte);
3733 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3736 return gen_rtx_SUBREG (outermode, op, byte);
3738 /* Simplify X, an rtx expression.
3740 Return the simplified expression or NULL if no simplifications
3743 This is the preferred entry point into the simplification routines;
3744 however, we still allow passes to call the more specific routines.
3746 Right now GCC has three (yes, three) major bodies of RTL simplification
3747 code that need to be unified.
3749 1. fold_rtx in cse.c. This code uses various CSE specific
3750 information to aid in RTL simplification.
3752 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3753 it uses combine specific information to aid in RTL
3756 3. The routines in this file.
3759 Long term we want to only have one body of simplification code; to
3760 get to that state I recommend the following steps:
3762 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3763 which are not pass dependent state into these routines.
3765 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3766 use this routine whenever possible.
3768 3. Allow for pass dependent state to be provided to these
3769 routines and add simplifications based on the pass dependent
3770 state. Remove code from cse.c & combine.c that becomes
3773 It will take time, but ultimately the compiler will be easier to
3774 maintain and improve. It's totally silly that when we add a
3775 simplification that it needs to be added to 4 places (3 for RTL
3776 simplification and 1 for tree simplification. */
3779 simplify_rtx (rtx x)
3781 enum rtx_code code = GET_CODE (x);
3782 enum machine_mode mode = GET_MODE (x);
3784 switch (GET_RTX_CLASS (code))
3787 return simplify_unary_operation (code, mode,
3788 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3789 case RTX_COMM_ARITH:
3790 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3791 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3793 /* Fall through.... */
3796 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3799 case RTX_BITFIELD_OPS:
3800 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3801 XEXP (x, 0), XEXP (x, 1),
3805 case RTX_COMM_COMPARE:
3806 return simplify_relational_operation (code, mode,
3807 ((GET_MODE (XEXP (x, 0))
3809 ? GET_MODE (XEXP (x, 0))
3810 : GET_MODE (XEXP (x, 1))),
3816 return simplify_gen_subreg (mode, SUBREG_REG (x),
3817 GET_MODE (SUBREG_REG (x)),
3824 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3825 if (GET_CODE (XEXP (x, 0)) == HIGH
3826 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))