1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
84 width = GET_MODE_BITSIZE (mode);
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
125 /* Handle addition and subtraction specially. Otherwise, just form
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
152 /* Handle float extensions of constant pool references. */
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
261 switch (GET_RTX_CLASS (code))
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
269 return simplify_gen_unary (code, mode, op0, op_mode);
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
277 return simplify_gen_binary (code, mode, op0, op1);
280 case RTX_COMM_COMPARE:
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
291 case RTX_BITFIELD_OPS:
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
304 /* The only case we try to handle is a SUBREG. */
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
313 return op0 ? op0 : x;
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (rtx_equal_p (x, old_rtx))
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 gcc_assert (VECTOR_MODE_P (mode));
364 if (GET_MODE (trueop) != VOIDmode)
366 if (!VECTOR_MODE_P (GET_MODE (trueop)))
367 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
369 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
370 (GET_MODE (trueop)));
372 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_VECTOR)
375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
376 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
377 rtvec v = rtvec_alloc (n_elts);
380 if (GET_CODE (trueop) != CONST_VECTOR)
381 for (i = 0; i < n_elts; i++)
382 RTVEC_ELT (v, i) = trueop;
385 enum machine_mode inmode = GET_MODE (trueop);
386 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
387 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
389 gcc_assert (in_n_elts < n_elts);
390 gcc_assert ((n_elts % in_n_elts) == 0);
391 for (i = 0; i < n_elts; i++)
392 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
394 return gen_rtx_CONST_VECTOR (mode, v);
397 else if (GET_CODE (op) == CONST)
398 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
400 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 enum machine_mode opmode = GET_MODE (trueop);
405 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
406 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
407 rtvec v = rtvec_alloc (n_elts);
410 gcc_assert (op_n_elts == n_elts);
411 for (i = 0; i < n_elts; i++)
413 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
414 CONST_VECTOR_ELT (trueop, i),
415 GET_MODE_INNER (opmode));
418 RTVEC_ELT (v, i) = x;
420 return gen_rtx_CONST_VECTOR (mode, v);
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
427 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
428 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
430 HOST_WIDE_INT hv, lv;
433 if (GET_CODE (trueop) == CONST_INT)
434 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
436 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 d = real_value_truncate (mode, d);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
442 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE
444 || GET_CODE (trueop) == CONST_INT))
446 HOST_WIDE_INT hv, lv;
449 if (GET_CODE (trueop) == CONST_INT)
450 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
452 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
454 if (op_mode == VOIDmode)
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
461 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
464 hv = 0, lv &= GET_MODE_MASK (op_mode);
466 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
467 d = real_value_truncate (mode, d);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
471 if (GET_CODE (trueop) == CONST_INT
472 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
474 HOST_WIDE_INT arg0 = INTVAL (trueop);
488 val = (arg0 >= 0 ? arg0 : - arg0);
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0 &= GET_MODE_MASK (mode);
495 val = exact_log2 (arg0 & (- arg0)) + 1;
499 arg0 &= GET_MODE_MASK (mode);
500 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
503 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
507 arg0 &= GET_MODE_MASK (mode);
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
513 val = GET_MODE_BITSIZE (mode);
516 val = exact_log2 (arg0 & -arg0);
520 arg0 &= GET_MODE_MASK (mode);
523 val++, arg0 &= arg0 - 1;
527 arg0 &= GET_MODE_MASK (mode);
530 val++, arg0 &= arg0 - 1;
539 /* When zero-extending a CONST_INT, we need to know its
541 gcc_assert (op_mode != VOIDmode);
542 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
550 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
551 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
557 if (op_mode == VOIDmode)
559 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
570 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
572 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
573 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
590 val = trunc_int_for_mode (val, mode);
592 return GEN_INT (val);
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop) == VOIDmode
598 && width <= HOST_BITS_PER_WIDE_INT * 2
599 && (GET_CODE (trueop) == CONST_DOUBLE
600 || GET_CODE (trueop) == CONST_INT))
602 unsigned HOST_WIDE_INT l1, lv;
603 HOST_WIDE_INT h1, hv;
605 if (GET_CODE (trueop) == CONST_DOUBLE)
606 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
608 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
618 neg_double (l1, h1, &lv, &hv);
623 neg_double (l1, h1, &lv, &hv);
635 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
638 lv = exact_log2 (l1 & -l1) + 1;
644 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
645 - HOST_BITS_PER_WIDE_INT;
647 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
649 lv = GET_MODE_BITSIZE (mode);
655 lv = exact_log2 (l1 & -l1);
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
659 lv = GET_MODE_BITSIZE (mode);
682 /* This is just a change-of-mode, so do nothing. */
687 gcc_assert (op_mode != VOIDmode);
689 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
693 lv = l1 & GET_MODE_MASK (op_mode);
697 if (op_mode == VOIDmode
698 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
702 lv = l1 & GET_MODE_MASK (op_mode);
703 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
704 && (lv & ((HOST_WIDE_INT) 1
705 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
706 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
708 hv = HWI_SIGN_EXTEND (lv);
719 return immed_double_const (lv, hv, mode);
722 else if (GET_CODE (trueop) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode) == MODE_FLOAT)
725 REAL_VALUE_TYPE d, t;
726 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
731 if (HONOR_SNANS (mode) && real_isnan (&d))
733 real_sqrt (&t, mode, &d);
737 d = REAL_VALUE_ABS (d);
740 d = REAL_VALUE_NEGATE (d);
743 d = real_value_truncate (mode, d);
746 /* All this does is change the mode. */
749 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
756 real_to_target (tmp, &d, GET_MODE (trueop));
757 for (i = 0; i < 4; i++)
759 real_from_target (&d, tmp, mode);
765 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
768 else if (GET_CODE (trueop) == CONST_DOUBLE
769 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
770 && GET_MODE_CLASS (mode) == MODE_INT
771 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
773 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
774 operators are intentionally left unspecified (to ease implementation
775 by target backends), for consistency, this routine implements the
776 same semantics for constant folding as used by the middle-end. */
778 HOST_WIDE_INT xh, xl, th, tl;
779 REAL_VALUE_TYPE x, t;
780 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
784 if (REAL_VALUE_ISNAN (x))
787 /* Test against the signed upper bound. */
788 if (width > HOST_BITS_PER_WIDE_INT)
790 th = ((unsigned HOST_WIDE_INT) 1
791 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
797 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
799 real_from_integer (&t, VOIDmode, tl, th, 0);
800 if (REAL_VALUES_LESS (t, x))
807 /* Test against the signed lower bound. */
808 if (width > HOST_BITS_PER_WIDE_INT)
810 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
816 tl = (HOST_WIDE_INT) -1 << (width - 1);
818 real_from_integer (&t, VOIDmode, tl, th, 0);
819 if (REAL_VALUES_LESS (x, t))
825 REAL_VALUE_TO_INT (&xl, &xh, x);
829 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
832 /* Test against the unsigned upper bound. */
833 if (width == 2*HOST_BITS_PER_WIDE_INT)
838 else if (width >= HOST_BITS_PER_WIDE_INT)
840 th = ((unsigned HOST_WIDE_INT) 1
841 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
847 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
849 real_from_integer (&t, VOIDmode, tl, th, 1);
850 if (REAL_VALUES_LESS (t, x))
857 REAL_VALUE_TO_INT (&xl, &xh, x);
863 return immed_double_const (xl, xh, mode);
866 /* This was formerly used only for non-IEEE float.
867 eggert@twinsun.com says it is safe for IEEE also. */
870 enum rtx_code reversed;
873 /* There are some simplifications we can do even if the operands
878 /* (not (not X)) == X. */
879 if (GET_CODE (op) == NOT)
882 /* (not (eq X Y)) == (ne X Y), etc. */
883 if (COMPARISON_P (op)
884 && (mode == BImode || STORE_FLAG_VALUE == -1)
885 && ((reversed = reversed_comparison_code (op, NULL_RTX))
887 return simplify_gen_relational (reversed, mode, VOIDmode,
888 XEXP (op, 0), XEXP (op, 1));
890 /* (not (plus X -1)) can become (neg X). */
891 if (GET_CODE (op) == PLUS
892 && XEXP (op, 1) == constm1_rtx)
893 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
895 /* Similarly, (not (neg X)) is (plus X -1). */
896 if (GET_CODE (op) == NEG)
897 return plus_constant (XEXP (op, 0), -1);
899 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
900 if (GET_CODE (op) == XOR
901 && GET_CODE (XEXP (op, 1)) == CONST_INT
902 && (temp = simplify_unary_operation (NOT, mode,
905 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
907 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
908 if (GET_CODE (op) == PLUS
909 && GET_CODE (XEXP (op, 1)) == CONST_INT
910 && mode_signbit_p (mode, XEXP (op, 1))
911 && (temp = simplify_unary_operation (NOT, mode,
914 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
918 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
919 operands other than 1, but that is not valid. We could do a
920 similar simplification for (not (lshiftrt C X)) where C is
921 just the sign bit, but this doesn't seem common enough to
923 if (GET_CODE (op) == ASHIFT
924 && XEXP (op, 0) == const1_rtx)
926 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
927 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
930 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
931 by reversing the comparison code if valid. */
932 if (STORE_FLAG_VALUE == -1
934 && (reversed = reversed_comparison_code (op, NULL_RTX))
936 return simplify_gen_relational (reversed, mode, VOIDmode,
937 XEXP (op, 0), XEXP (op, 1));
939 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
940 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
941 so we can perform the above simplification. */
943 if (STORE_FLAG_VALUE == -1
944 && GET_CODE (op) == ASHIFTRT
945 && GET_CODE (XEXP (op, 1)) == CONST_INT
946 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
947 return simplify_gen_relational (GE, mode, VOIDmode,
948 XEXP (op, 0), const0_rtx);
953 /* (neg (neg X)) == X. */
954 if (GET_CODE (op) == NEG)
957 /* (neg (plus X 1)) can become (not X). */
958 if (GET_CODE (op) == PLUS
959 && XEXP (op, 1) == const1_rtx)
960 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
962 /* Similarly, (neg (not X)) is (plus X 1). */
963 if (GET_CODE (op) == NOT)
964 return plus_constant (XEXP (op, 0), 1);
966 /* (neg (minus X Y)) can become (minus Y X). This transformation
967 isn't safe for modes with signed zeros, since if X and Y are
968 both +0, (minus Y X) is the same as (minus X Y). If the
969 rounding mode is towards +infinity (or -infinity) then the two
970 expressions will be rounded differently. */
971 if (GET_CODE (op) == MINUS
972 && !HONOR_SIGNED_ZEROS (mode)
973 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
974 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
977 if (GET_CODE (op) == PLUS
978 && !HONOR_SIGNED_ZEROS (mode)
979 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
981 /* (neg (plus A C)) is simplified to (minus -C A). */
982 if (GET_CODE (XEXP (op, 1)) == CONST_INT
983 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
985 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
988 return simplify_gen_binary (MINUS, mode, temp,
992 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
993 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
994 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
997 /* (neg (mult A B)) becomes (mult (neg A) B).
998 This works even for floating-point values. */
999 if (GET_CODE (op) == MULT
1000 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1002 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1003 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1006 /* NEG commutes with ASHIFT since it is multiplication. Only do
1007 this if we can then eliminate the NEG (e.g., if the operand
1009 if (GET_CODE (op) == ASHIFT)
1011 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1014 return simplify_gen_binary (ASHIFT, mode, temp,
1018 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1019 C is equal to the width of MODE minus 1. */
1020 if (GET_CODE (op) == ASHIFTRT
1021 && GET_CODE (XEXP (op, 1)) == CONST_INT
1022 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1023 return simplify_gen_binary (LSHIFTRT, mode,
1024 XEXP (op, 0), XEXP (op, 1));
1026 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1027 C is equal to the width of MODE minus 1. */
1028 if (GET_CODE (op) == LSHIFTRT
1029 && GET_CODE (XEXP (op, 1)) == CONST_INT
1030 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1031 return simplify_gen_binary (ASHIFTRT, mode,
1032 XEXP (op, 0), XEXP (op, 1));
1037 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1038 becomes just the MINUS if its mode is MODE. This allows
1039 folding switch statements on machines using casesi (such as
1041 if (GET_CODE (op) == TRUNCATE
1042 && GET_MODE (XEXP (op, 0)) == mode
1043 && GET_CODE (XEXP (op, 0)) == MINUS
1044 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1045 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1046 return XEXP (op, 0);
1048 /* Check for a sign extension of a subreg of a promoted
1049 variable, where the promotion is sign-extended, and the
1050 target mode is the same as the variable's promotion. */
1051 if (GET_CODE (op) == SUBREG
1052 && SUBREG_PROMOTED_VAR_P (op)
1053 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1054 && GET_MODE (XEXP (op, 0)) == mode)
1055 return XEXP (op, 0);
1057 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1058 if (! POINTERS_EXTEND_UNSIGNED
1059 && mode == Pmode && GET_MODE (op) == ptr_mode
1061 || (GET_CODE (op) == SUBREG
1062 && REG_P (SUBREG_REG (op))
1063 && REG_POINTER (SUBREG_REG (op))
1064 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1065 return convert_memory_address (Pmode, op);
1070 /* Check for a zero extension of a subreg of a promoted
1071 variable, where the promotion is zero-extended, and the
1072 target mode is the same as the variable's promotion. */
1073 if (GET_CODE (op) == SUBREG
1074 && SUBREG_PROMOTED_VAR_P (op)
1075 && SUBREG_PROMOTED_UNSIGNED_P (op)
1076 && GET_MODE (XEXP (op, 0)) == mode)
1077 return XEXP (op, 0);
1079 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1080 if (POINTERS_EXTEND_UNSIGNED > 0
1081 && mode == Pmode && GET_MODE (op) == ptr_mode
1083 || (GET_CODE (op) == SUBREG
1084 && REG_P (SUBREG_REG (op))
1085 && REG_POINTER (SUBREG_REG (op))
1086 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1087 return convert_memory_address (Pmode, op);
1099 /* Subroutine of simplify_binary_operation to simplify a commutative,
1100 associative binary operation CODE with result mode MODE, operating
1101 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1102 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1103 canonicalization is possible. */
1106 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1111 /* Linearize the operator to the left. */
1112 if (GET_CODE (op1) == code)
1114 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1115 if (GET_CODE (op0) == code)
1117 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1118 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1121 /* "a op (b op c)" becomes "(b op c) op a". */
1122 if (! swap_commutative_operands_p (op1, op0))
1123 return simplify_gen_binary (code, mode, op1, op0);
1130 if (GET_CODE (op0) == code)
1132 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1133 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1135 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1136 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1139 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1140 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1141 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1142 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1144 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1146 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1147 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1148 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1149 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1151 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1157 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1158 and OP1. Return 0 if no simplification is possible.
1160 Don't use this for relational operations such as EQ or LT.
1161 Use simplify_relational_operation instead. */
1163 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1166 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1168 unsigned int width = GET_MODE_BITSIZE (mode);
1169 rtx trueop0, trueop1;
1172 /* Relational operations don't work here. We must know the mode
1173 of the operands in order to do the comparison correctly.
1174 Assuming a full word can give incorrect results.
1175 Consider comparing 128 with -128 in QImode. */
1176 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1177 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1179 /* Make sure the constant is second. */
1180 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1181 && swap_commutative_operands_p (op0, op1))
1183 tem = op0, op0 = op1, op1 = tem;
1186 trueop0 = avoid_constant_pool_reference (op0);
1187 trueop1 = avoid_constant_pool_reference (op1);
1189 if (VECTOR_MODE_P (mode)
1190 && code != VEC_CONCAT
1191 && GET_CODE (trueop0) == CONST_VECTOR
1192 && GET_CODE (trueop1) == CONST_VECTOR)
1194 unsigned n_elts = GET_MODE_NUNITS (mode);
1195 enum machine_mode op0mode = GET_MODE (trueop0);
1196 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
1197 enum machine_mode op1mode = GET_MODE (trueop1);
1198 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
1199 rtvec v = rtvec_alloc (n_elts);
1202 gcc_assert (op0_n_elts == n_elts);
1203 gcc_assert (op1_n_elts == n_elts);
1204 for (i = 0; i < n_elts; i++)
1206 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1207 CONST_VECTOR_ELT (trueop0, i),
1208 CONST_VECTOR_ELT (trueop1, i));
1211 RTVEC_ELT (v, i) = x;
1214 return gen_rtx_CONST_VECTOR (mode, v);
1217 if (VECTOR_MODE_P (mode)
1218 && code == VEC_CONCAT
1219 && CONSTANT_P (trueop0) && CONSTANT_P (trueop1))
1221 unsigned n_elts = GET_MODE_NUNITS (mode);
1222 rtvec v = rtvec_alloc (n_elts);
1224 gcc_assert (n_elts >= 2);
1227 gcc_assert (GET_CODE (trueop0) != CONST_VECTOR);
1228 gcc_assert (GET_CODE (trueop1) != CONST_VECTOR);
1230 RTVEC_ELT (v, 0) = trueop0;
1231 RTVEC_ELT (v, 1) = trueop1;
1235 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0));
1236 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1));
1239 gcc_assert (GET_CODE (trueop0) == CONST_VECTOR);
1240 gcc_assert (GET_CODE (trueop1) == CONST_VECTOR);
1241 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
1243 for (i = 0; i < op0_n_elts; ++i)
1244 RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i);
1245 for (i = 0; i < op1_n_elts; ++i)
1246 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i);
1249 return gen_rtx_CONST_VECTOR (mode, v);
1252 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1253 && GET_CODE (trueop0) == CONST_DOUBLE
1254 && GET_CODE (trueop1) == CONST_DOUBLE
1255 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1266 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1268 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1270 for (i = 0; i < 4; i++)
1287 real_from_target (&r, tmp0, mode);
1288 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1292 REAL_VALUE_TYPE f0, f1, value, result;
1295 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1296 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1297 real_convert (&f0, mode, &f0);
1298 real_convert (&f1, mode, &f1);
1300 if (HONOR_SNANS (mode)
1301 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1305 && REAL_VALUES_EQUAL (f1, dconst0)
1306 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1309 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1310 && flag_trapping_math
1311 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1313 int s0 = REAL_VALUE_NEGATIVE (f0);
1314 int s1 = REAL_VALUE_NEGATIVE (f1);
1319 /* Inf + -Inf = NaN plus exception. */
1324 /* Inf - Inf = NaN plus exception. */
1329 /* Inf / Inf = NaN plus exception. */
1336 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1337 && flag_trapping_math
1338 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1339 || (REAL_VALUE_ISINF (f1)
1340 && REAL_VALUES_EQUAL (f0, dconst0))))
1341 /* Inf * 0 = NaN plus exception. */
1344 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
1346 real_convert (&result, mode, &value);
1348 /* Don't constant fold this floating point operation if the
1349 result may dependent upon the run-time rounding mode and
1350 flag_rounding_math is set, or if GCC's software emulation
1351 is unable to accurately represent the result. */
1353 if ((flag_rounding_math
1354 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
1355 && !flag_unsafe_math_optimizations))
1356 && (inexact || !real_identical (&result, &value)))
1359 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
1363 /* We can fold some multi-word operations. */
1364 if (GET_MODE_CLASS (mode) == MODE_INT
1365 && width == HOST_BITS_PER_WIDE_INT * 2
1366 && (GET_CODE (trueop0) == CONST_DOUBLE
1367 || GET_CODE (trueop0) == CONST_INT)
1368 && (GET_CODE (trueop1) == CONST_DOUBLE
1369 || GET_CODE (trueop1) == CONST_INT))
1371 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1372 HOST_WIDE_INT h1, h2, hv, ht;
1374 if (GET_CODE (trueop0) == CONST_DOUBLE)
1375 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1377 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1379 if (GET_CODE (trueop1) == CONST_DOUBLE)
1380 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1382 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1387 /* A - B == A + (-B). */
1388 neg_double (l2, h2, &lv, &hv);
1391 /* Fall through.... */
1394 add_double (l1, h1, l2, h2, &lv, &hv);
1398 mul_double (l1, h1, l2, h2, &lv, &hv);
1402 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1403 &lv, &hv, <, &ht))
1408 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1409 <, &ht, &lv, &hv))
1414 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1415 &lv, &hv, <, &ht))
1420 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1421 <, &ht, &lv, &hv))
1426 lv = l1 & l2, hv = h1 & h2;
1430 lv = l1 | l2, hv = h1 | h2;
1434 lv = l1 ^ l2, hv = h1 ^ h2;
1440 && ((unsigned HOST_WIDE_INT) l1
1441 < (unsigned HOST_WIDE_INT) l2)))
1450 && ((unsigned HOST_WIDE_INT) l1
1451 > (unsigned HOST_WIDE_INT) l2)))
1458 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1460 && ((unsigned HOST_WIDE_INT) l1
1461 < (unsigned HOST_WIDE_INT) l2)))
1468 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1470 && ((unsigned HOST_WIDE_INT) l1
1471 > (unsigned HOST_WIDE_INT) l2)))
1477 case LSHIFTRT: case ASHIFTRT:
1479 case ROTATE: case ROTATERT:
1480 if (SHIFT_COUNT_TRUNCATED)
1481 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1483 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1486 if (code == LSHIFTRT || code == ASHIFTRT)
1487 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1489 else if (code == ASHIFT)
1490 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1491 else if (code == ROTATE)
1492 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1493 else /* code == ROTATERT */
1494 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1501 return immed_double_const (lv, hv, mode);
1504 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1505 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1507 /* Even if we can't compute a constant result,
1508 there are some cases worth simplifying. */
1513 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1514 when x is NaN, infinite, or finite and nonzero. They aren't
1515 when x is -0 and the rounding mode is not towards -infinity,
1516 since (-0) + 0 is then 0. */
1517 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1520 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1521 transformations are safe even for IEEE. */
1522 if (GET_CODE (op0) == NEG)
1523 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1524 else if (GET_CODE (op1) == NEG)
1525 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1527 /* (~a) + 1 -> -a */
1528 if (INTEGRAL_MODE_P (mode)
1529 && GET_CODE (op0) == NOT
1530 && trueop1 == const1_rtx)
1531 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1533 /* Handle both-operands-constant cases. We can only add
1534 CONST_INTs to constants since the sum of relocatable symbols
1535 can't be handled by most assemblers. Don't add CONST_INT
1536 to CONST_INT since overflow won't be computed properly if wider
1537 than HOST_BITS_PER_WIDE_INT. */
1539 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1540 && GET_CODE (op1) == CONST_INT)
1541 return plus_constant (op0, INTVAL (op1));
1542 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1543 && GET_CODE (op0) == CONST_INT)
1544 return plus_constant (op1, INTVAL (op0));
1546 /* See if this is something like X * C - X or vice versa or
1547 if the multiplication is written as a shift. If so, we can
1548 distribute and make a new multiply, shift, or maybe just
1549 have X (if C is 2 in the example above). But don't make
1550 something more expensive than we had before. */
1552 if (! FLOAT_MODE_P (mode))
1554 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1555 rtx lhs = op0, rhs = op1;
1557 if (GET_CODE (lhs) == NEG)
1558 coeff0 = -1, lhs = XEXP (lhs, 0);
1559 else if (GET_CODE (lhs) == MULT
1560 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1562 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1564 else if (GET_CODE (lhs) == ASHIFT
1565 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1566 && INTVAL (XEXP (lhs, 1)) >= 0
1567 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1569 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1570 lhs = XEXP (lhs, 0);
1573 if (GET_CODE (rhs) == NEG)
1574 coeff1 = -1, rhs = XEXP (rhs, 0);
1575 else if (GET_CODE (rhs) == MULT
1576 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1578 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1580 else if (GET_CODE (rhs) == ASHIFT
1581 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1582 && INTVAL (XEXP (rhs, 1)) >= 0
1583 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1585 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1586 rhs = XEXP (rhs, 0);
1589 if (rtx_equal_p (lhs, rhs))
1591 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1592 tem = simplify_gen_binary (MULT, mode, lhs,
1593 GEN_INT (coeff0 + coeff1));
1594 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1599 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1600 if ((GET_CODE (op1) == CONST_INT
1601 || GET_CODE (op1) == CONST_DOUBLE)
1602 && GET_CODE (op0) == XOR
1603 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1604 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1605 && mode_signbit_p (mode, op1))
1606 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1607 simplify_gen_binary (XOR, mode, op1,
1610 /* If one of the operands is a PLUS or a MINUS, see if we can
1611 simplify this by the associative law.
1612 Don't use the associative law for floating point.
1613 The inaccuracy makes it nonassociative,
1614 and subtle programs can break if operations are associated. */
1616 if (INTEGRAL_MODE_P (mode)
1617 && (plus_minus_operand_p (op0)
1618 || plus_minus_operand_p (op1))
1619 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1622 /* Reassociate floating point addition only when the user
1623 specifies unsafe math optimizations. */
1624 if (FLOAT_MODE_P (mode)
1625 && flag_unsafe_math_optimizations)
1627 tem = simplify_associative_operation (code, mode, op0, op1);
1635 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1636 using cc0, in which case we want to leave it as a COMPARE
1637 so we can distinguish it from a register-register-copy.
1639 In IEEE floating point, x-0 is not the same as x. */
1641 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1642 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1643 && trueop1 == CONST0_RTX (mode))
1647 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1648 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1649 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1650 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1652 rtx xop00 = XEXP (op0, 0);
1653 rtx xop10 = XEXP (op1, 0);
1656 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1658 if (REG_P (xop00) && REG_P (xop10)
1659 && GET_MODE (xop00) == GET_MODE (xop10)
1660 && REGNO (xop00) == REGNO (xop10)
1661 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1662 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1669 /* We can't assume x-x is 0 even with non-IEEE floating point,
1670 but since it is zero except in very strange circumstances, we
1671 will treat it as zero with -funsafe-math-optimizations. */
1672 if (rtx_equal_p (trueop0, trueop1)
1673 && ! side_effects_p (op0)
1674 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1675 return CONST0_RTX (mode);
1677 /* Change subtraction from zero into negation. (0 - x) is the
1678 same as -x when x is NaN, infinite, or finite and nonzero.
1679 But if the mode has signed zeros, and does not round towards
1680 -infinity, then 0 - 0 is 0, not -0. */
1681 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1682 return simplify_gen_unary (NEG, mode, op1, mode);
1684 /* (-1 - a) is ~a. */
1685 if (trueop0 == constm1_rtx)
1686 return simplify_gen_unary (NOT, mode, op1, mode);
1688 /* Subtracting 0 has no effect unless the mode has signed zeros
1689 and supports rounding towards -infinity. In such a case,
1691 if (!(HONOR_SIGNED_ZEROS (mode)
1692 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1693 && trueop1 == CONST0_RTX (mode))
1696 /* See if this is something like X * C - X or vice versa or
1697 if the multiplication is written as a shift. If so, we can
1698 distribute and make a new multiply, shift, or maybe just
1699 have X (if C is 2 in the example above). But don't make
1700 something more expensive than we had before. */
1702 if (! FLOAT_MODE_P (mode))
1704 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1705 rtx lhs = op0, rhs = op1;
1707 if (GET_CODE (lhs) == NEG)
1708 coeff0 = -1, lhs = XEXP (lhs, 0);
1709 else if (GET_CODE (lhs) == MULT
1710 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1712 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1714 else if (GET_CODE (lhs) == ASHIFT
1715 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1716 && INTVAL (XEXP (lhs, 1)) >= 0
1717 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1719 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1720 lhs = XEXP (lhs, 0);
1723 if (GET_CODE (rhs) == NEG)
1724 coeff1 = - 1, rhs = XEXP (rhs, 0);
1725 else if (GET_CODE (rhs) == MULT
1726 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1728 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1730 else if (GET_CODE (rhs) == ASHIFT
1731 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1732 && INTVAL (XEXP (rhs, 1)) >= 0
1733 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1735 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1736 rhs = XEXP (rhs, 0);
1739 if (rtx_equal_p (lhs, rhs))
1741 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1742 tem = simplify_gen_binary (MULT, mode, lhs,
1743 GEN_INT (coeff0 - coeff1));
1744 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1749 /* (a - (-b)) -> (a + b). True even for IEEE. */
1750 if (GET_CODE (op1) == NEG)
1751 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1753 /* (-x - c) may be simplified as (-c - x). */
1754 if (GET_CODE (op0) == NEG
1755 && (GET_CODE (op1) == CONST_INT
1756 || GET_CODE (op1) == CONST_DOUBLE))
1758 tem = simplify_unary_operation (NEG, mode, op1, mode);
1760 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1763 /* If one of the operands is a PLUS or a MINUS, see if we can
1764 simplify this by the associative law.
1765 Don't use the associative law for floating point.
1766 The inaccuracy makes it nonassociative,
1767 and subtle programs can break if operations are associated. */
1769 if (INTEGRAL_MODE_P (mode)
1770 && (plus_minus_operand_p (op0)
1771 || plus_minus_operand_p (op1))
1772 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1775 /* Don't let a relocatable value get a negative coeff. */
1776 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1777 return simplify_gen_binary (PLUS, mode,
1779 neg_const_int (mode, op1));
1781 /* (x - (x & y)) -> (x & ~y) */
1782 if (GET_CODE (op1) == AND)
1784 if (rtx_equal_p (op0, XEXP (op1, 0)))
1786 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1787 GET_MODE (XEXP (op1, 1)));
1788 return simplify_gen_binary (AND, mode, op0, tem);
1790 if (rtx_equal_p (op0, XEXP (op1, 1)))
1792 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1793 GET_MODE (XEXP (op1, 0)));
1794 return simplify_gen_binary (AND, mode, op0, tem);
1800 if (trueop1 == constm1_rtx)
1801 return simplify_gen_unary (NEG, mode, op0, mode);
1803 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1804 x is NaN, since x * 0 is then also NaN. Nor is it valid
1805 when the mode has signed zeros, since multiplying a negative
1806 number by 0 will give -0, not 0. */
1807 if (!HONOR_NANS (mode)
1808 && !HONOR_SIGNED_ZEROS (mode)
1809 && trueop1 == CONST0_RTX (mode)
1810 && ! side_effects_p (op0))
1813 /* In IEEE floating point, x*1 is not equivalent to x for
1815 if (!HONOR_SNANS (mode)
1816 && trueop1 == CONST1_RTX (mode))
1819 /* Convert multiply by constant power of two into shift unless
1820 we are still generating RTL. This test is a kludge. */
1821 if (GET_CODE (trueop1) == CONST_INT
1822 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1823 /* If the mode is larger than the host word size, and the
1824 uppermost bit is set, then this isn't a power of two due
1825 to implicit sign extension. */
1826 && (width <= HOST_BITS_PER_WIDE_INT
1827 || val != HOST_BITS_PER_WIDE_INT - 1))
1828 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1830 /* x*2 is x+x and x*(-1) is -x */
1831 if (GET_CODE (trueop1) == CONST_DOUBLE
1832 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1833 && GET_MODE (op0) == mode)
1836 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1838 if (REAL_VALUES_EQUAL (d, dconst2))
1839 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1841 if (REAL_VALUES_EQUAL (d, dconstm1))
1842 return simplify_gen_unary (NEG, mode, op0, mode);
1845 /* Reassociate multiplication, but for floating point MULTs
1846 only when the user specifies unsafe math optimizations. */
1847 if (! FLOAT_MODE_P (mode)
1848 || flag_unsafe_math_optimizations)
1850 tem = simplify_associative_operation (code, mode, op0, op1);
1857 if (trueop1 == const0_rtx)
1859 if (GET_CODE (trueop1) == CONST_INT
1860 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1861 == GET_MODE_MASK (mode)))
1863 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1865 /* A | (~A) -> -1 */
1866 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1867 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1868 && ! side_effects_p (op0)
1869 && GET_MODE_CLASS (mode) != MODE_CC)
1871 tem = simplify_associative_operation (code, mode, op0, op1);
1877 if (trueop1 == const0_rtx)
1879 if (GET_CODE (trueop1) == CONST_INT
1880 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1881 == GET_MODE_MASK (mode)))
1882 return simplify_gen_unary (NOT, mode, op0, mode);
1883 if (trueop0 == trueop1
1884 && ! side_effects_p (op0)
1885 && GET_MODE_CLASS (mode) != MODE_CC)
1888 /* Canonicalize XOR of the most significant bit to PLUS. */
1889 if ((GET_CODE (op1) == CONST_INT
1890 || GET_CODE (op1) == CONST_DOUBLE)
1891 && mode_signbit_p (mode, op1))
1892 return simplify_gen_binary (PLUS, mode, op0, op1);
1893 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1894 if ((GET_CODE (op1) == CONST_INT
1895 || GET_CODE (op1) == CONST_DOUBLE)
1896 && GET_CODE (op0) == PLUS
1897 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1898 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1899 && mode_signbit_p (mode, XEXP (op0, 1)))
1900 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1901 simplify_gen_binary (XOR, mode, op1,
1904 tem = simplify_associative_operation (code, mode, op0, op1);
1910 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1912 /* If we are turning off bits already known off in OP0, we need
1914 if (GET_CODE (trueop1) == CONST_INT
1915 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1916 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1918 if (trueop0 == trueop1 && ! side_effects_p (op0)
1919 && GET_MODE_CLASS (mode) != MODE_CC)
1922 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1923 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1924 && ! side_effects_p (op0)
1925 && GET_MODE_CLASS (mode) != MODE_CC)
1928 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
1929 there are no nonzero bits of C outside of X's mode. */
1930 if ((GET_CODE (op0) == SIGN_EXTEND
1931 || GET_CODE (op0) == ZERO_EXTEND)
1932 && GET_CODE (trueop1) == CONST_INT
1933 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1934 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
1935 & INTVAL (trueop1)) == 0)
1937 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
1938 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
1939 gen_int_mode (INTVAL (trueop1),
1941 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
1944 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1945 ((A & N) + B) & M -> (A + B) & M
1946 Similarly if (N & M) == 0,
1947 ((A | N) + B) & M -> (A + B) & M
1948 and for - instead of + and/or ^ instead of |. */
1949 if (GET_CODE (trueop1) == CONST_INT
1950 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1951 && ~INTVAL (trueop1)
1952 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1953 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1958 pmop[0] = XEXP (op0, 0);
1959 pmop[1] = XEXP (op0, 1);
1961 for (which = 0; which < 2; which++)
1964 switch (GET_CODE (tem))
1967 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1968 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1969 == INTVAL (trueop1))
1970 pmop[which] = XEXP (tem, 0);
1974 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1975 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1976 pmop[which] = XEXP (tem, 0);
1983 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1985 tem = simplify_gen_binary (GET_CODE (op0), mode,
1987 return simplify_gen_binary (code, mode, tem, op1);
1990 tem = simplify_associative_operation (code, mode, op0, op1);
1996 /* 0/x is 0 (or x&0 if x has side-effects). */
1997 if (trueop0 == const0_rtx)
1998 return side_effects_p (op1)
1999 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2002 if (trueop1 == const1_rtx)
2004 /* Handle narrowing UDIV. */
2005 rtx x = gen_lowpart_common (mode, op0);
2008 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2009 return gen_lowpart_SUBREG (mode, op0);
2012 /* Convert divide by power of two into shift. */
2013 if (GET_CODE (trueop1) == CONST_INT
2014 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
2015 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
2019 /* Handle floating point and integers separately. */
2020 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2022 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2023 safe for modes with NaNs, since 0.0 / 0.0 will then be
2024 NaN rather than 0.0. Nor is it safe for modes with signed
2025 zeros, since dividing 0 by a negative number gives -0.0 */
2026 if (trueop0 == CONST0_RTX (mode)
2027 && !HONOR_NANS (mode)
2028 && !HONOR_SIGNED_ZEROS (mode)
2029 && ! side_effects_p (op1))
2032 if (trueop1 == CONST1_RTX (mode)
2033 && !HONOR_SNANS (mode))
2036 if (GET_CODE (trueop1) == CONST_DOUBLE
2037 && trueop1 != CONST0_RTX (mode))
2040 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2043 if (REAL_VALUES_EQUAL (d, dconstm1)
2044 && !HONOR_SNANS (mode))
2045 return simplify_gen_unary (NEG, mode, op0, mode);
2047 /* Change FP division by a constant into multiplication.
2048 Only do this with -funsafe-math-optimizations. */
2049 if (flag_unsafe_math_optimizations
2050 && !REAL_VALUES_EQUAL (d, dconst0))
2052 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2053 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2054 return simplify_gen_binary (MULT, mode, op0, tem);
2060 /* 0/x is 0 (or x&0 if x has side-effects). */
2061 if (trueop0 == const0_rtx)
2062 return side_effects_p (op1)
2063 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2066 if (trueop1 == const1_rtx)
2068 /* Handle narrowing DIV. */
2069 rtx x = gen_lowpart_common (mode, op0);
2072 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2073 return gen_lowpart_SUBREG (mode, op0);
2077 if (trueop1 == constm1_rtx)
2079 rtx x = gen_lowpart_common (mode, op0);
2081 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2082 ? gen_lowpart_SUBREG (mode, op0) : op0;
2083 return simplify_gen_unary (NEG, mode, x, mode);
2089 /* 0%x is 0 (or x&0 if x has side-effects). */
2090 if (trueop0 == const0_rtx)
2091 return side_effects_p (op1)
2092 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2094 /* x%1 is 0 (of x&0 if x has side-effects). */
2095 if (trueop1 == const1_rtx)
2096 return side_effects_p (op0)
2097 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2099 /* Implement modulus by power of two as AND. */
2100 if (GET_CODE (trueop1) == CONST_INT
2101 && exact_log2 (INTVAL (trueop1)) > 0)
2102 return simplify_gen_binary (AND, mode, op0,
2103 GEN_INT (INTVAL (op1) - 1));
2107 /* 0%x is 0 (or x&0 if x has side-effects). */
2108 if (trueop0 == const0_rtx)
2109 return side_effects_p (op1)
2110 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2112 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2113 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2114 return side_effects_p (op0)
2115 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2122 /* Rotating ~0 always results in ~0. */
2123 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2124 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2125 && ! side_effects_p (op1))
2128 /* Fall through.... */
2132 if (trueop1 == const0_rtx)
2134 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2139 if (width <= HOST_BITS_PER_WIDE_INT
2140 && GET_CODE (trueop1) == CONST_INT
2141 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2142 && ! side_effects_p (op0))
2144 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2146 tem = simplify_associative_operation (code, mode, op0, op1);
2152 if (width <= HOST_BITS_PER_WIDE_INT
2153 && GET_CODE (trueop1) == CONST_INT
2154 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2155 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2156 && ! side_effects_p (op0))
2158 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2160 tem = simplify_associative_operation (code, mode, op0, op1);
2166 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2168 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2170 tem = simplify_associative_operation (code, mode, op0, op1);
2176 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2178 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2180 tem = simplify_associative_operation (code, mode, op0, op1);
2189 /* ??? There are simplifications that can be done. */
2193 if (!VECTOR_MODE_P (mode))
2195 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2196 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2197 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2198 gcc_assert (XVECLEN (trueop1, 0) == 1);
2199 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2201 if (GET_CODE (trueop0) == CONST_VECTOR)
2202 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2207 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2208 gcc_assert (GET_MODE_INNER (mode)
2209 == GET_MODE_INNER (GET_MODE (trueop0)));
2210 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2212 if (GET_CODE (trueop0) == CONST_VECTOR)
2214 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2215 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2216 rtvec v = rtvec_alloc (n_elts);
2219 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2220 for (i = 0; i < n_elts; i++)
2222 rtx x = XVECEXP (trueop1, 0, i);
2224 gcc_assert (GET_CODE (x) == CONST_INT);
2225 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2229 return gen_rtx_CONST_VECTOR (mode, v);
2235 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2236 ? GET_MODE (trueop0)
2237 : GET_MODE_INNER (mode));
2238 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2239 ? GET_MODE (trueop1)
2240 : GET_MODE_INNER (mode));
2242 gcc_assert (VECTOR_MODE_P (mode));
2243 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2244 == GET_MODE_SIZE (mode));
2246 if (VECTOR_MODE_P (op0_mode))
2247 gcc_assert (GET_MODE_INNER (mode)
2248 == GET_MODE_INNER (op0_mode));
2250 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2252 if (VECTOR_MODE_P (op1_mode))
2253 gcc_assert (GET_MODE_INNER (mode)
2254 == GET_MODE_INNER (op1_mode));
2256 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2258 if ((GET_CODE (trueop0) == CONST_VECTOR
2259 || GET_CODE (trueop0) == CONST_INT
2260 || GET_CODE (trueop0) == CONST_DOUBLE)
2261 && (GET_CODE (trueop1) == CONST_VECTOR
2262 || GET_CODE (trueop1) == CONST_INT
2263 || GET_CODE (trueop1) == CONST_DOUBLE))
2265 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2266 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2267 rtvec v = rtvec_alloc (n_elts);
2269 unsigned in_n_elts = 1;
2271 if (VECTOR_MODE_P (op0_mode))
2272 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2273 for (i = 0; i < n_elts; i++)
2277 if (!VECTOR_MODE_P (op0_mode))
2278 RTVEC_ELT (v, i) = trueop0;
2280 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2284 if (!VECTOR_MODE_P (op1_mode))
2285 RTVEC_ELT (v, i) = trueop1;
2287 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2292 return gen_rtx_CONST_VECTOR (mode, v);
2304 /* Get the integer argument values in two forms:
2305 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2307 arg0 = INTVAL (trueop0);
2308 arg1 = INTVAL (trueop1);
2310 if (width < HOST_BITS_PER_WIDE_INT)
2312 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2313 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2316 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2317 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2320 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2321 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2329 /* Compute the value of the arithmetic. */
2334 val = arg0s + arg1s;
2338 val = arg0s - arg1s;
2342 val = arg0s * arg1s;
2347 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2350 val = arg0s / arg1s;
2355 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2358 val = arg0s % arg1s;
2363 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2366 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2371 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2374 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2392 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2393 value is in range. We can't return any old value for out-of-range
2394 arguments because either the middle-end (via shift_truncation_mask)
2395 or the back-end might be relying on target-specific knowledge.
2396 Nor can we rely on shift_truncation_mask, since the shift might
2397 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2398 if (SHIFT_COUNT_TRUNCATED)
2399 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2400 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2403 val = (code == ASHIFT
2404 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2405 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2407 /* Sign-extend the result for arithmetic right shifts. */
2408 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2409 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2417 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2418 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2426 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2427 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2431 /* Do nothing here. */
2435 val = arg0s <= arg1s ? arg0s : arg1s;
2439 val = ((unsigned HOST_WIDE_INT) arg0
2440 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2444 val = arg0s > arg1s ? arg0s : arg1s;
2448 val = ((unsigned HOST_WIDE_INT) arg0
2449 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2456 /* ??? There are simplifications that can be done. */
2463 val = trunc_int_for_mode (val, mode);
2465 return GEN_INT (val);
2468 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2471 Rather than test for specific case, we do this by a brute-force method
2472 and do all possible simplifications until no more changes occur. Then
2473 we rebuild the operation.
2475 If FORCE is true, then always generate the rtx. This is used to
2476 canonicalize stuff emitted from simplify_gen_binary. Note that this
2477 can still fail if the rtx is too complex. It won't fail just because
2478 the result is not 'simpler' than the input, however. */
2480 struct simplify_plus_minus_op_data
2487 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2489 const struct simplify_plus_minus_op_data *d1 = p1;
2490 const struct simplify_plus_minus_op_data *d2 = p2;
2492 return (commutative_operand_precedence (d2->op)
2493 - commutative_operand_precedence (d1->op));
2497 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2500 struct simplify_plus_minus_op_data ops[8];
2502 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2506 memset (ops, 0, sizeof ops);
2508 /* Set up the two operands and then expand them until nothing has been
2509 changed. If we run out of room in our array, give up; this should
2510 almost never happen. */
2515 ops[1].neg = (code == MINUS);
2521 for (i = 0; i < n_ops; i++)
2523 rtx this_op = ops[i].op;
2524 int this_neg = ops[i].neg;
2525 enum rtx_code this_code = GET_CODE (this_op);
2534 ops[n_ops].op = XEXP (this_op, 1);
2535 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2538 ops[i].op = XEXP (this_op, 0);
2544 ops[i].op = XEXP (this_op, 0);
2545 ops[i].neg = ! this_neg;
2551 && GET_CODE (XEXP (this_op, 0)) == PLUS
2552 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2553 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2555 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2556 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2557 ops[n_ops].neg = this_neg;
2565 /* ~a -> (-a - 1) */
2568 ops[n_ops].op = constm1_rtx;
2569 ops[n_ops++].neg = this_neg;
2570 ops[i].op = XEXP (this_op, 0);
2571 ops[i].neg = !this_neg;
2579 ops[i].op = neg_const_int (mode, this_op);
2592 /* If we only have two operands, we can't do anything. */
2593 if (n_ops <= 2 && !force)
2596 /* Count the number of CONSTs we didn't split above. */
2597 for (i = 0; i < n_ops; i++)
2598 if (GET_CODE (ops[i].op) == CONST)
2601 /* Now simplify each pair of operands until nothing changes. The first
2602 time through just simplify constants against each other. */
2609 for (i = 0; i < n_ops - 1; i++)
2610 for (j = i + 1; j < n_ops; j++)
2612 rtx lhs = ops[i].op, rhs = ops[j].op;
2613 int lneg = ops[i].neg, rneg = ops[j].neg;
2615 if (lhs != 0 && rhs != 0
2616 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2618 enum rtx_code ncode = PLUS;
2624 tem = lhs, lhs = rhs, rhs = tem;
2626 else if (swap_commutative_operands_p (lhs, rhs))
2627 tem = lhs, lhs = rhs, rhs = tem;
2629 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2631 /* Reject "simplifications" that just wrap the two
2632 arguments in a CONST. Failure to do so can result
2633 in infinite recursion with simplify_binary_operation
2634 when it calls us to simplify CONST operations. */
2636 && ! (GET_CODE (tem) == CONST
2637 && GET_CODE (XEXP (tem, 0)) == ncode
2638 && XEXP (XEXP (tem, 0), 0) == lhs
2639 && XEXP (XEXP (tem, 0), 1) == rhs)
2640 /* Don't allow -x + -1 -> ~x simplifications in the
2641 first pass. This allows us the chance to combine
2642 the -1 with other constants. */
2644 && GET_CODE (tem) == NOT
2645 && XEXP (tem, 0) == rhs))
2648 if (GET_CODE (tem) == NEG)
2649 tem = XEXP (tem, 0), lneg = !lneg;
2650 if (GET_CODE (tem) == CONST_INT && lneg)
2651 tem = neg_const_int (mode, tem), lneg = 0;
2655 ops[j].op = NULL_RTX;
2665 /* Pack all the operands to the lower-numbered entries. */
2666 for (i = 0, j = 0; j < n_ops; j++)
2671 /* Sort the operations based on swap_commutative_operands_p. */
2672 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2674 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2676 && GET_CODE (ops[1].op) == CONST_INT
2677 && CONSTANT_P (ops[0].op)
2679 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2681 /* We suppressed creation of trivial CONST expressions in the
2682 combination loop to avoid recursion. Create one manually now.
2683 The combination loop should have ensured that there is exactly
2684 one CONST_INT, and the sort will have ensured that it is last
2685 in the array and that any other constant will be next-to-last. */
2688 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2689 && CONSTANT_P (ops[n_ops - 2].op))
2691 rtx value = ops[n_ops - 1].op;
2692 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2693 value = neg_const_int (mode, value);
2694 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2698 /* Count the number of CONSTs that we generated. */
2700 for (i = 0; i < n_ops; i++)
2701 if (GET_CODE (ops[i].op) == CONST)
2704 /* Give up if we didn't reduce the number of operands we had. Make
2705 sure we count a CONST as two operands. If we have the same
2706 number of operands, but have made more CONSTs than before, this
2707 is also an improvement, so accept it. */
2709 && (n_ops + n_consts > input_ops
2710 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2713 /* Put a non-negated operand first, if possible. */
2715 for (i = 0; i < n_ops && ops[i].neg; i++)
2718 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2727 /* Now make the result by performing the requested operations. */
2729 for (i = 1; i < n_ops; i++)
2730 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2731 mode, result, ops[i].op);
2736 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2738 plus_minus_operand_p (rtx x)
2740 return GET_CODE (x) == PLUS
2741 || GET_CODE (x) == MINUS
2742 || (GET_CODE (x) == CONST
2743 && GET_CODE (XEXP (x, 0)) == PLUS
2744 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2745 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2748 /* Like simplify_binary_operation except used for relational operators.
2749 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2750 not also be VOIDmode.
2752 CMP_MODE specifies in which mode the comparison is done in, so it is
2753 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2754 the operands or, if both are VOIDmode, the operands are compared in
2755 "infinite precision". */
2757 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2758 enum machine_mode cmp_mode, rtx op0, rtx op1)
2760 rtx tem, trueop0, trueop1;
2762 if (cmp_mode == VOIDmode)
2763 cmp_mode = GET_MODE (op0);
2764 if (cmp_mode == VOIDmode)
2765 cmp_mode = GET_MODE (op1);
2767 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2770 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2772 if (tem == const0_rtx)
2773 return CONST0_RTX (mode);
2774 #ifdef FLOAT_STORE_FLAG_VALUE
2776 REAL_VALUE_TYPE val;
2777 val = FLOAT_STORE_FLAG_VALUE (mode);
2778 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2784 if (VECTOR_MODE_P (mode))
2786 if (tem == const0_rtx)
2787 return CONST0_RTX (mode);
2788 #ifdef VECTOR_STORE_FLAG_VALUE
2793 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2794 if (val == NULL_RTX)
2796 if (val == const1_rtx)
2797 return CONST1_RTX (mode);
2799 units = GET_MODE_NUNITS (mode);
2800 v = rtvec_alloc (units);
2801 for (i = 0; i < units; i++)
2802 RTVEC_ELT (v, i) = val;
2803 return gen_rtx_raw_CONST_VECTOR (mode, v);
2813 /* For the following tests, ensure const0_rtx is op1. */
2814 if (swap_commutative_operands_p (op0, op1)
2815 || (op0 == const0_rtx && op1 != const0_rtx))
2816 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2818 /* If op0 is a compare, extract the comparison arguments from it. */
2819 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2820 return simplify_relational_operation (code, mode, VOIDmode,
2821 XEXP (op0, 0), XEXP (op0, 1));
2823 if (mode == VOIDmode
2824 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2828 trueop0 = avoid_constant_pool_reference (op0);
2829 trueop1 = avoid_constant_pool_reference (op1);
2830 return simplify_relational_operation_1 (code, mode, cmp_mode,
2834 /* This part of simplify_relational_operation is only used when CMP_MODE
2835 is not in class MODE_CC (i.e. it is a real comparison).
2837 MODE is the mode of the result, while CMP_MODE specifies in which
2838 mode the comparison is done in, so it is the mode of the operands. */
2841 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2842 enum machine_mode cmp_mode, rtx op0, rtx op1)
2844 enum rtx_code op0code = GET_CODE (op0);
2846 if (GET_CODE (op1) == CONST_INT)
2848 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2850 /* If op0 is a comparison, extract the comparison arguments form it. */
2853 if (GET_MODE (op0) == cmp_mode)
2854 return simplify_rtx (op0);
2856 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2857 XEXP (op0, 0), XEXP (op0, 1));
2859 else if (code == EQ)
2861 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2862 if (new_code != UNKNOWN)
2863 return simplify_gen_relational (new_code, mode, VOIDmode,
2864 XEXP (op0, 0), XEXP (op0, 1));
2869 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2870 if ((code == EQ || code == NE)
2871 && (op0code == PLUS || op0code == MINUS)
2873 && CONSTANT_P (XEXP (op0, 1))
2874 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2876 rtx x = XEXP (op0, 0);
2877 rtx c = XEXP (op0, 1);
2879 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2881 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2884 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
2885 the same as (zero_extract:SI FOO (const_int 1) BAR). */
2887 && op1 == const0_rtx
2888 && GET_MODE_CLASS (mode) == MODE_INT
2889 && cmp_mode != VOIDmode
2890 && cmp_mode != BImode
2891 && nonzero_bits (op0, cmp_mode) == 1
2892 && STORE_FLAG_VALUE == 1)
2893 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
2894 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
2895 : lowpart_subreg (mode, op0, cmp_mode);
2900 /* Check if the given comparison (done in the given MODE) is actually a
2901 tautology or a contradiction.
2902 If no simplification is possible, this function returns zero.
2903 Otherwise, it returns either const_true_rtx or const0_rtx. */
2906 simplify_const_relational_operation (enum rtx_code code,
2907 enum machine_mode mode,
2910 int equal, op0lt, op0ltu, op1lt, op1ltu;
2915 gcc_assert (mode != VOIDmode
2916 || (GET_MODE (op0) == VOIDmode
2917 && GET_MODE (op1) == VOIDmode));
2919 /* If op0 is a compare, extract the comparison arguments from it. */
2920 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2921 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2923 /* We can't simplify MODE_CC values since we don't know what the
2924 actual comparison is. */
2925 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2928 /* Make sure the constant is second. */
2929 if (swap_commutative_operands_p (op0, op1))
2931 tem = op0, op0 = op1, op1 = tem;
2932 code = swap_condition (code);
2935 trueop0 = avoid_constant_pool_reference (op0);
2936 trueop1 = avoid_constant_pool_reference (op1);
2938 /* For integer comparisons of A and B maybe we can simplify A - B and can
2939 then simplify a comparison of that with zero. If A and B are both either
2940 a register or a CONST_INT, this can't help; testing for these cases will
2941 prevent infinite recursion here and speed things up.
2943 If CODE is an unsigned comparison, then we can never do this optimization,
2944 because it gives an incorrect result if the subtraction wraps around zero.
2945 ANSI C defines unsigned operations such that they never overflow, and
2946 thus such cases can not be ignored; but we cannot do it even for
2947 signed comparisons for languages such as Java, so test flag_wrapv. */
2949 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2950 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2951 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2952 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2953 /* We cannot do this for == or != if tem is a nonzero address. */
2954 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2955 && code != GTU && code != GEU && code != LTU && code != LEU)
2956 return simplify_const_relational_operation (signed_condition (code),
2957 mode, tem, const0_rtx);
2959 if (flag_unsafe_math_optimizations && code == ORDERED)
2960 return const_true_rtx;
2962 if (flag_unsafe_math_optimizations && code == UNORDERED)
2965 /* For modes without NaNs, if the two operands are equal, we know the
2966 result except if they have side-effects. */
2967 if (! HONOR_NANS (GET_MODE (trueop0))
2968 && rtx_equal_p (trueop0, trueop1)
2969 && ! side_effects_p (trueop0))
2970 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2972 /* If the operands are floating-point constants, see if we can fold
2974 else if (GET_CODE (trueop0) == CONST_DOUBLE
2975 && GET_CODE (trueop1) == CONST_DOUBLE
2976 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2978 REAL_VALUE_TYPE d0, d1;
2980 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2981 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2983 /* Comparisons are unordered iff at least one of the values is NaN. */
2984 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2994 return const_true_rtx;
3007 equal = REAL_VALUES_EQUAL (d0, d1);
3008 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3009 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3012 /* Otherwise, see if the operands are both integers. */
3013 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3014 && (GET_CODE (trueop0) == CONST_DOUBLE
3015 || GET_CODE (trueop0) == CONST_INT)
3016 && (GET_CODE (trueop1) == CONST_DOUBLE
3017 || GET_CODE (trueop1) == CONST_INT))
3019 int width = GET_MODE_BITSIZE (mode);
3020 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3021 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3023 /* Get the two words comprising each integer constant. */
3024 if (GET_CODE (trueop0) == CONST_DOUBLE)
3026 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3027 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3031 l0u = l0s = INTVAL (trueop0);
3032 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3035 if (GET_CODE (trueop1) == CONST_DOUBLE)
3037 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3038 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3042 l1u = l1s = INTVAL (trueop1);
3043 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3046 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3047 we have to sign or zero-extend the values. */
3048 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3050 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3051 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3053 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3054 l0s |= ((HOST_WIDE_INT) (-1) << width);
3056 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3057 l1s |= ((HOST_WIDE_INT) (-1) << width);
3059 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3060 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3062 equal = (h0u == h1u && l0u == l1u);
3063 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3064 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3065 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3066 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3069 /* Otherwise, there are some code-specific tests we can make. */
3072 /* Optimize comparisons with upper and lower bounds. */
3073 if (SCALAR_INT_MODE_P (mode)
3074 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3087 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3094 /* x >= min is always true. */
3095 if (rtx_equal_p (trueop1, mmin))
3096 tem = const_true_rtx;
3102 /* x <= max is always true. */
3103 if (rtx_equal_p (trueop1, mmax))
3104 tem = const_true_rtx;
3109 /* x > max is always false. */
3110 if (rtx_equal_p (trueop1, mmax))
3116 /* x < min is always false. */
3117 if (rtx_equal_p (trueop1, mmin))
3124 if (tem == const0_rtx
3125 || tem == const_true_rtx)
3132 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3137 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3138 return const_true_rtx;
3142 /* Optimize abs(x) < 0.0. */
3143 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3145 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3147 if (GET_CODE (tem) == ABS)
3153 /* Optimize abs(x) >= 0.0. */
3154 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3156 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3158 if (GET_CODE (tem) == ABS)
3159 return const_true_rtx;
3164 /* Optimize ! (abs(x) < 0.0). */
3165 if (trueop1 == CONST0_RTX (mode))
3167 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3169 if (GET_CODE (tem) == ABS)
3170 return const_true_rtx;
3181 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3187 return equal ? const_true_rtx : const0_rtx;
3190 return ! equal ? const_true_rtx : const0_rtx;
3193 return op0lt ? const_true_rtx : const0_rtx;
3196 return op1lt ? const_true_rtx : const0_rtx;
3198 return op0ltu ? const_true_rtx : const0_rtx;
3200 return op1ltu ? const_true_rtx : const0_rtx;
3203 return equal || op0lt ? const_true_rtx : const0_rtx;
3206 return equal || op1lt ? const_true_rtx : const0_rtx;
3208 return equal || op0ltu ? const_true_rtx : const0_rtx;
3210 return equal || op1ltu ? const_true_rtx : const0_rtx;
3212 return const_true_rtx;
3220 /* Simplify CODE, an operation with result mode MODE and three operands,
3221 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3222 a constant. Return 0 if no simplifications is possible. */
3225 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3226 enum machine_mode op0_mode, rtx op0, rtx op1,
3229 unsigned int width = GET_MODE_BITSIZE (mode);
3231 /* VOIDmode means "infinite" precision. */
3233 width = HOST_BITS_PER_WIDE_INT;
3239 if (GET_CODE (op0) == CONST_INT
3240 && GET_CODE (op1) == CONST_INT
3241 && GET_CODE (op2) == CONST_INT
3242 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3243 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3245 /* Extracting a bit-field from a constant */
3246 HOST_WIDE_INT val = INTVAL (op0);
3248 if (BITS_BIG_ENDIAN)
3249 val >>= (GET_MODE_BITSIZE (op0_mode)
3250 - INTVAL (op2) - INTVAL (op1));
3252 val >>= INTVAL (op2);
3254 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3256 /* First zero-extend. */
3257 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3258 /* If desired, propagate sign bit. */
3259 if (code == SIGN_EXTRACT
3260 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3261 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3264 /* Clear the bits that don't belong in our mode,
3265 unless they and our sign bit are all one.
3266 So we get either a reasonable negative value or a reasonable
3267 unsigned value for this mode. */
3268 if (width < HOST_BITS_PER_WIDE_INT
3269 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3270 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3271 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3273 return gen_int_mode (val, mode);
3278 if (GET_CODE (op0) == CONST_INT)
3279 return op0 != const0_rtx ? op1 : op2;
3281 /* Convert c ? a : a into "a". */
3282 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3285 /* Convert a != b ? a : b into "a". */
3286 if (GET_CODE (op0) == NE
3287 && ! side_effects_p (op0)
3288 && ! HONOR_NANS (mode)
3289 && ! HONOR_SIGNED_ZEROS (mode)
3290 && ((rtx_equal_p (XEXP (op0, 0), op1)
3291 && rtx_equal_p (XEXP (op0, 1), op2))
3292 || (rtx_equal_p (XEXP (op0, 0), op2)
3293 && rtx_equal_p (XEXP (op0, 1), op1))))
3296 /* Convert a == b ? a : b into "b". */
3297 if (GET_CODE (op0) == EQ
3298 && ! side_effects_p (op0)
3299 && ! HONOR_NANS (mode)
3300 && ! HONOR_SIGNED_ZEROS (mode)
3301 && ((rtx_equal_p (XEXP (op0, 0), op1)
3302 && rtx_equal_p (XEXP (op0, 1), op2))
3303 || (rtx_equal_p (XEXP (op0, 0), op2)
3304 && rtx_equal_p (XEXP (op0, 1), op1))))
3307 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3309 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3310 ? GET_MODE (XEXP (op0, 1))
3311 : GET_MODE (XEXP (op0, 0)));
3314 /* Look for happy constants in op1 and op2. */
3315 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3317 HOST_WIDE_INT t = INTVAL (op1);
3318 HOST_WIDE_INT f = INTVAL (op2);
3320 if (t == STORE_FLAG_VALUE && f == 0)
3321 code = GET_CODE (op0);
3322 else if (t == 0 && f == STORE_FLAG_VALUE)
3325 tmp = reversed_comparison_code (op0, NULL_RTX);
3333 return simplify_gen_relational (code, mode, cmp_mode,
3334 XEXP (op0, 0), XEXP (op0, 1));
3337 if (cmp_mode == VOIDmode)
3338 cmp_mode = op0_mode;
3339 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3340 cmp_mode, XEXP (op0, 0),
3343 /* See if any simplifications were possible. */
3346 if (GET_CODE (temp) == CONST_INT)
3347 return temp == const0_rtx ? op2 : op1;
3349 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3355 gcc_assert (GET_MODE (op0) == mode);
3356 gcc_assert (GET_MODE (op1) == mode);
3357 gcc_assert (VECTOR_MODE_P (mode));
3358 op2 = avoid_constant_pool_reference (op2);
3359 if (GET_CODE (op2) == CONST_INT)
3361 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3362 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3363 int mask = (1 << n_elts) - 1;
3365 if (!(INTVAL (op2) & mask))
3367 if ((INTVAL (op2) & mask) == mask)
3370 op0 = avoid_constant_pool_reference (op0);
3371 op1 = avoid_constant_pool_reference (op1);
3372 if (GET_CODE (op0) == CONST_VECTOR
3373 && GET_CODE (op1) == CONST_VECTOR)
3375 rtvec v = rtvec_alloc (n_elts);
3378 for (i = 0; i < n_elts; i++)
3379 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3380 ? CONST_VECTOR_ELT (op0, i)
3381 : CONST_VECTOR_ELT (op1, i));
3382 return gen_rtx_CONST_VECTOR (mode, v);
3394 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3395 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3397 Works by unpacking OP into a collection of 8-bit values
3398 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3399 and then repacking them again for OUTERMODE. */
3402 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3403 enum machine_mode innermode, unsigned int byte)
3405 /* We support up to 512-bit values (for V8DFmode). */
3409 value_mask = (1 << value_bit) - 1
3411 unsigned char value[max_bitsize / value_bit];
3420 rtvec result_v = NULL;
3421 enum mode_class outer_class;
3422 enum machine_mode outer_submode;
3424 /* Some ports misuse CCmode. */
3425 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3428 /* We have no way to represent a complex constant at the rtl level. */
3429 if (COMPLEX_MODE_P (outermode))
3432 /* Unpack the value. */
3434 if (GET_CODE (op) == CONST_VECTOR)
3436 num_elem = CONST_VECTOR_NUNITS (op);
3437 elems = &CONST_VECTOR_ELT (op, 0);
3438 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3444 elem_bitsize = max_bitsize;
3446 /* If this asserts, it is too complicated; reducing value_bit may help. */
3447 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3448 /* I don't know how to handle endianness of sub-units. */
3449 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3451 for (elem = 0; elem < num_elem; elem++)
3454 rtx el = elems[elem];
3456 /* Vectors are kept in target memory order. (This is probably
3459 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3460 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3462 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3463 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3464 unsigned bytele = (subword_byte % UNITS_PER_WORD
3465 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3466 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3469 switch (GET_CODE (el))
3473 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3475 *vp++ = INTVAL (el) >> i;
3476 /* CONST_INTs are always logically sign-extended. */
3477 for (; i < elem_bitsize; i += value_bit)
3478 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3482 if (GET_MODE (el) == VOIDmode)
3484 /* If this triggers, someone should have generated a
3485 CONST_INT instead. */
3486 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3488 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3489 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3490 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3493 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3496 /* It shouldn't matter what's done here, so fill it with
3498 for (; i < max_bitsize; i += value_bit)
3503 long tmp[max_bitsize / 32];
3504 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3506 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3507 gcc_assert (bitsize <= elem_bitsize);
3508 gcc_assert (bitsize % value_bit == 0);
3510 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3513 /* real_to_target produces its result in words affected by
3514 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3515 and use WORDS_BIG_ENDIAN instead; see the documentation
3516 of SUBREG in rtl.texi. */
3517 for (i = 0; i < bitsize; i += value_bit)
3520 if (WORDS_BIG_ENDIAN)
3521 ibase = bitsize - 1 - i;
3524 *vp++ = tmp[ibase / 32] >> i % 32;
3527 /* It shouldn't matter what's done here, so fill it with
3529 for (; i < elem_bitsize; i += value_bit)
3539 /* Now, pick the right byte to start with. */
3540 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3541 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3542 will already have offset 0. */
3543 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3545 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3547 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3548 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3549 byte = (subword_byte % UNITS_PER_WORD
3550 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3553 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3554 so if it's become negative it will instead be very large.) */
3555 gcc_assert (byte < GET_MODE_SIZE (innermode));
3557 /* Convert from bytes to chunks of size value_bit. */
3558 value_start = byte * (BITS_PER_UNIT / value_bit);
3560 /* Re-pack the value. */
3562 if (VECTOR_MODE_P (outermode))
3564 num_elem = GET_MODE_NUNITS (outermode);
3565 result_v = rtvec_alloc (num_elem);
3566 elems = &RTVEC_ELT (result_v, 0);
3567 outer_submode = GET_MODE_INNER (outermode);
3573 outer_submode = outermode;
3576 outer_class = GET_MODE_CLASS (outer_submode);
3577 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3579 gcc_assert (elem_bitsize % value_bit == 0);
3580 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3582 for (elem = 0; elem < num_elem; elem++)
3586 /* Vectors are stored in target memory order. (This is probably
3589 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3590 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3592 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3593 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3594 unsigned bytele = (subword_byte % UNITS_PER_WORD
3595 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3596 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3599 switch (outer_class)
3602 case MODE_PARTIAL_INT:
3604 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3607 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3609 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3610 for (; i < elem_bitsize; i += value_bit)
3611 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3612 << (i - HOST_BITS_PER_WIDE_INT));
3614 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3616 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3617 elems[elem] = gen_int_mode (lo, outer_submode);
3619 elems[elem] = immed_double_const (lo, hi, outer_submode);
3626 long tmp[max_bitsize / 32];
3628 /* real_from_target wants its input in words affected by
3629 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3630 and use WORDS_BIG_ENDIAN instead; see the documentation
3631 of SUBREG in rtl.texi. */
3632 for (i = 0; i < max_bitsize / 32; i++)
3634 for (i = 0; i < elem_bitsize; i += value_bit)
3637 if (WORDS_BIG_ENDIAN)
3638 ibase = elem_bitsize - 1 - i;
3641 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3644 real_from_target (&r, tmp, outer_submode);
3645 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3653 if (VECTOR_MODE_P (outermode))
3654 return gen_rtx_CONST_VECTOR (outermode, result_v);
3659 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3660 Return 0 if no simplifications are possible. */
3662 simplify_subreg (enum machine_mode outermode, rtx op,
3663 enum machine_mode innermode, unsigned int byte)
3665 /* Little bit of sanity checking. */
3666 gcc_assert (innermode != VOIDmode);
3667 gcc_assert (outermode != VOIDmode);
3668 gcc_assert (innermode != BLKmode);
3669 gcc_assert (outermode != BLKmode);
3671 gcc_assert (GET_MODE (op) == innermode
3672 || GET_MODE (op) == VOIDmode);
3674 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3675 gcc_assert (byte < GET_MODE_SIZE (innermode));
3677 if (outermode == innermode && !byte)
3680 if (GET_CODE (op) == CONST_INT
3681 || GET_CODE (op) == CONST_DOUBLE
3682 || GET_CODE (op) == CONST_VECTOR)
3683 return simplify_immed_subreg (outermode, op, innermode, byte);
3685 /* Changing mode twice with SUBREG => just change it once,
3686 or not at all if changing back op starting mode. */
3687 if (GET_CODE (op) == SUBREG)
3689 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3690 int final_offset = byte + SUBREG_BYTE (op);
3693 if (outermode == innermostmode
3694 && byte == 0 && SUBREG_BYTE (op) == 0)
3695 return SUBREG_REG (op);
3697 /* The SUBREG_BYTE represents offset, as if the value were stored
3698 in memory. Irritating exception is paradoxical subreg, where
3699 we define SUBREG_BYTE to be 0. On big endian machines, this
3700 value should be negative. For a moment, undo this exception. */
3701 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3703 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3704 if (WORDS_BIG_ENDIAN)
3705 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3706 if (BYTES_BIG_ENDIAN)
3707 final_offset += difference % UNITS_PER_WORD;
3709 if (SUBREG_BYTE (op) == 0
3710 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3712 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3713 if (WORDS_BIG_ENDIAN)
3714 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3715 if (BYTES_BIG_ENDIAN)
3716 final_offset += difference % UNITS_PER_WORD;
3719 /* See whether resulting subreg will be paradoxical. */
3720 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3722 /* In nonparadoxical subregs we can't handle negative offsets. */
3723 if (final_offset < 0)
3725 /* Bail out in case resulting subreg would be incorrect. */
3726 if (final_offset % GET_MODE_SIZE (outermode)
3727 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3733 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3735 /* In paradoxical subreg, see if we are still looking on lower part.
3736 If so, our SUBREG_BYTE will be 0. */
3737 if (WORDS_BIG_ENDIAN)
3738 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3739 if (BYTES_BIG_ENDIAN)
3740 offset += difference % UNITS_PER_WORD;
3741 if (offset == final_offset)
3747 /* Recurse for further possible simplifications. */
3748 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3752 if (validate_subreg (outermode, innermostmode,
3753 SUBREG_REG (op), final_offset))
3754 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3758 /* SUBREG of a hard register => just change the register number
3759 and/or mode. If the hard register is not valid in that mode,
3760 suppress this simplification. If the hard register is the stack,
3761 frame, or argument pointer, leave this as a SUBREG. */
3764 && REGNO (op) < FIRST_PSEUDO_REGISTER
3765 #ifdef CANNOT_CHANGE_MODE_CLASS
3766 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3767 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3768 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3770 && ((reload_completed && !frame_pointer_needed)
3771 || (REGNO (op) != FRAME_POINTER_REGNUM
3772 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3773 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3776 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3777 && REGNO (op) != ARG_POINTER_REGNUM
3779 && REGNO (op) != STACK_POINTER_REGNUM
3780 && subreg_offset_representable_p (REGNO (op), innermode,
3783 unsigned int regno = REGNO (op);
3784 unsigned int final_regno
3785 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3787 /* ??? We do allow it if the current REG is not valid for
3788 its mode. This is a kludge to work around how float/complex
3789 arguments are passed on 32-bit SPARC and should be fixed. */
3790 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3791 || ! HARD_REGNO_MODE_OK (regno, innermode))
3793 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3795 /* Propagate original regno. We don't have any way to specify
3796 the offset inside original regno, so do so only for lowpart.
3797 The information is used only by alias analysis that can not
3798 grog partial register anyway. */
3800 if (subreg_lowpart_offset (outermode, innermode) == byte)
3801 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3806 /* If we have a SUBREG of a register that we are replacing and we are
3807 replacing it with a MEM, make a new MEM and try replacing the
3808 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3809 or if we would be widening it. */
3812 && ! mode_dependent_address_p (XEXP (op, 0))
3813 /* Allow splitting of volatile memory references in case we don't
3814 have instruction to move the whole thing. */
3815 && (! MEM_VOLATILE_P (op)
3816 || ! have_insn_for (SET, innermode))
3817 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3818 return adjust_address_nv (op, outermode, byte);
3820 /* Handle complex values represented as CONCAT
3821 of real and imaginary part. */
3822 if (GET_CODE (op) == CONCAT)
3824 unsigned int inner_size, final_offset;
3827 inner_size = GET_MODE_UNIT_SIZE (innermode);
3828 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3829 final_offset = byte % inner_size;
3830 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3833 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3836 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3837 return gen_rtx_SUBREG (outermode, part, final_offset);
3841 /* Optimize SUBREG truncations of zero and sign extended values. */
3842 if ((GET_CODE (op) == ZERO_EXTEND
3843 || GET_CODE (op) == SIGN_EXTEND)
3844 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3846 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3848 /* If we're requesting the lowpart of a zero or sign extension,
3849 there are three possibilities. If the outermode is the same
3850 as the origmode, we can omit both the extension and the subreg.
3851 If the outermode is not larger than the origmode, we can apply
3852 the truncation without the extension. Finally, if the outermode
3853 is larger than the origmode, but both are integer modes, we
3854 can just extend to the appropriate mode. */
3857 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3858 if (outermode == origmode)
3859 return XEXP (op, 0);
3860 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3861 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3862 subreg_lowpart_offset (outermode,
3864 if (SCALAR_INT_MODE_P (outermode))
3865 return simplify_gen_unary (GET_CODE (op), outermode,
3866 XEXP (op, 0), origmode);
3869 /* A SUBREG resulting from a zero extension may fold to zero if
3870 it extracts higher bits that the ZERO_EXTEND's source bits. */
3871 if (GET_CODE (op) == ZERO_EXTEND
3872 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3873 return CONST0_RTX (outermode);
3876 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3877 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3878 the outer subreg is effectively a truncation to the original mode. */
3879 if ((GET_CODE (op) == LSHIFTRT
3880 || GET_CODE (op) == ASHIFTRT)
3881 && SCALAR_INT_MODE_P (outermode)
3882 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3883 to avoid the possibility that an outer LSHIFTRT shifts by more
3884 than the sign extension's sign_bit_copies and introduces zeros
3885 into the high bits of the result. */
3886 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3887 && GET_CODE (XEXP (op, 1)) == CONST_INT
3888 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3889 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3890 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3891 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3892 return simplify_gen_binary (ASHIFTRT, outermode,
3893 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3895 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3896 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3897 the outer subreg is effectively a truncation to the original mode. */
3898 if ((GET_CODE (op) == LSHIFTRT
3899 || GET_CODE (op) == ASHIFTRT)
3900 && SCALAR_INT_MODE_P (outermode)
3901 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3902 && GET_CODE (XEXP (op, 1)) == CONST_INT
3903 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3904 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3905 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3906 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3907 return simplify_gen_binary (LSHIFTRT, outermode,
3908 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3910 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3911 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3912 the outer subreg is effectively a truncation to the original mode. */
3913 if (GET_CODE (op) == ASHIFT
3914 && SCALAR_INT_MODE_P (outermode)
3915 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3916 && GET_CODE (XEXP (op, 1)) == CONST_INT
3917 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3918 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3919 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3920 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3921 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3922 return simplify_gen_binary (ASHIFT, outermode,
3923 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3928 /* Make a SUBREG operation or equivalent if it folds. */
3931 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3932 enum machine_mode innermode, unsigned int byte)
3936 newx = simplify_subreg (outermode, op, innermode, byte);
3940 if (GET_CODE (op) == SUBREG
3941 || GET_CODE (op) == CONCAT
3942 || GET_MODE (op) == VOIDmode)
3945 if (validate_subreg (outermode, innermode, op, byte))
3946 return gen_rtx_SUBREG (outermode, op, byte);
3951 /* Simplify X, an rtx expression.
3953 Return the simplified expression or NULL if no simplifications
3956 This is the preferred entry point into the simplification routines;
3957 however, we still allow passes to call the more specific routines.
3959 Right now GCC has three (yes, three) major bodies of RTL simplification
3960 code that need to be unified.
3962 1. fold_rtx in cse.c. This code uses various CSE specific
3963 information to aid in RTL simplification.
3965 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3966 it uses combine specific information to aid in RTL
3969 3. The routines in this file.
3972 Long term we want to only have one body of simplification code; to
3973 get to that state I recommend the following steps:
3975 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3976 which are not pass dependent state into these routines.
3978 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3979 use this routine whenever possible.
3981 3. Allow for pass dependent state to be provided to these
3982 routines and add simplifications based on the pass dependent
3983 state. Remove code from cse.c & combine.c that becomes
3986 It will take time, but ultimately the compiler will be easier to
3987 maintain and improve. It's totally silly that when we add a
3988 simplification that it needs to be added to 4 places (3 for RTL
3989 simplification and 1 for tree simplification. */
3992 simplify_rtx (rtx x)
3994 enum rtx_code code = GET_CODE (x);
3995 enum machine_mode mode = GET_MODE (x);
3997 switch (GET_RTX_CLASS (code))
4000 return simplify_unary_operation (code, mode,
4001 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4002 case RTX_COMM_ARITH:
4003 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4004 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4006 /* Fall through.... */
4009 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4012 case RTX_BITFIELD_OPS:
4013 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4014 XEXP (x, 0), XEXP (x, 1),
4018 case RTX_COMM_COMPARE:
4019 return simplify_relational_operation (code, mode,
4020 ((GET_MODE (XEXP (x, 0))
4022 ? GET_MODE (XEXP (x, 0))
4023 : GET_MODE (XEXP (x, 1))),
4029 return simplify_gen_subreg (mode, SUBREG_REG (x),
4030 GET_MODE (SUBREG_REG (x)),
4037 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4038 if (GET_CODE (XEXP (x, 0)) == HIGH
4039 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))