1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
84 width = GET_MODE_BITSIZE (mode);
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
125 /* Handle addition and subtraction specially. Otherwise, just form
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
152 /* Handle float extensions of constant pool references. */
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
261 switch (GET_RTX_CLASS (code))
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
269 return simplify_gen_unary (code, mode, op0, op_mode);
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
277 return simplify_gen_binary (code, mode, op0, op1);
280 case RTX_COMM_COMPARE:
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
291 case RTX_BITFIELD_OPS:
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
304 /* The only case we try to handle is a SUBREG. */
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
313 return op0 ? op0 : x;
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (rtx_equal_p (x, old_rtx))
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 gcc_assert (VECTOR_MODE_P (mode));
364 if (GET_MODE (trueop) != VOIDmode)
366 if (!VECTOR_MODE_P (GET_MODE (trueop)))
367 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
369 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
370 (GET_MODE (trueop)));
372 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_VECTOR)
375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
376 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
377 rtvec v = rtvec_alloc (n_elts);
380 if (GET_CODE (trueop) != CONST_VECTOR)
381 for (i = 0; i < n_elts; i++)
382 RTVEC_ELT (v, i) = trueop;
385 enum machine_mode inmode = GET_MODE (trueop);
386 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
387 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
389 gcc_assert (in_n_elts < n_elts);
390 gcc_assert ((n_elts % in_n_elts) == 0);
391 for (i = 0; i < n_elts; i++)
392 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
394 return gen_rtx_CONST_VECTOR (mode, v);
397 else if (GET_CODE (op) == CONST)
398 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
400 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 enum machine_mode opmode = GET_MODE (trueop);
405 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
406 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
407 rtvec v = rtvec_alloc (n_elts);
410 gcc_assert (op_n_elts == n_elts);
411 for (i = 0; i < n_elts; i++)
413 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
414 CONST_VECTOR_ELT (trueop, i),
415 GET_MODE_INNER (opmode));
418 RTVEC_ELT (v, i) = x;
420 return gen_rtx_CONST_VECTOR (mode, v);
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
427 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
428 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
430 HOST_WIDE_INT hv, lv;
433 if (GET_CODE (trueop) == CONST_INT)
434 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
436 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 d = real_value_truncate (mode, d);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
442 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE
444 || GET_CODE (trueop) == CONST_INT))
446 HOST_WIDE_INT hv, lv;
449 if (GET_CODE (trueop) == CONST_INT)
450 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
452 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
454 if (op_mode == VOIDmode)
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
461 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
464 hv = 0, lv &= GET_MODE_MASK (op_mode);
466 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
467 d = real_value_truncate (mode, d);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
471 if (GET_CODE (trueop) == CONST_INT
472 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
474 HOST_WIDE_INT arg0 = INTVAL (trueop);
488 val = (arg0 >= 0 ? arg0 : - arg0);
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0 &= GET_MODE_MASK (mode);
495 val = exact_log2 (arg0 & (- arg0)) + 1;
499 arg0 &= GET_MODE_MASK (mode);
500 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
503 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
507 arg0 &= GET_MODE_MASK (mode);
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
513 val = GET_MODE_BITSIZE (mode);
516 val = exact_log2 (arg0 & -arg0);
520 arg0 &= GET_MODE_MASK (mode);
523 val++, arg0 &= arg0 - 1;
527 arg0 &= GET_MODE_MASK (mode);
530 val++, arg0 &= arg0 - 1;
539 /* When zero-extending a CONST_INT, we need to know its
541 gcc_assert (op_mode != VOIDmode);
542 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
550 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
551 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
557 if (op_mode == VOIDmode)
559 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
570 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
572 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
573 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
590 val = trunc_int_for_mode (val, mode);
592 return GEN_INT (val);
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop) == VOIDmode
598 && width <= HOST_BITS_PER_WIDE_INT * 2
599 && (GET_CODE (trueop) == CONST_DOUBLE
600 || GET_CODE (trueop) == CONST_INT))
602 unsigned HOST_WIDE_INT l1, lv;
603 HOST_WIDE_INT h1, hv;
605 if (GET_CODE (trueop) == CONST_DOUBLE)
606 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
608 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
618 neg_double (l1, h1, &lv, &hv);
623 neg_double (l1, h1, &lv, &hv);
635 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
638 lv = exact_log2 (l1 & -l1) + 1;
644 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
645 - HOST_BITS_PER_WIDE_INT;
647 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
649 lv = GET_MODE_BITSIZE (mode);
655 lv = exact_log2 (l1 & -l1);
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
659 lv = GET_MODE_BITSIZE (mode);
682 /* This is just a change-of-mode, so do nothing. */
687 gcc_assert (op_mode != VOIDmode);
689 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
693 lv = l1 & GET_MODE_MASK (op_mode);
697 if (op_mode == VOIDmode
698 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
702 lv = l1 & GET_MODE_MASK (op_mode);
703 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
704 && (lv & ((HOST_WIDE_INT) 1
705 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
706 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
708 hv = HWI_SIGN_EXTEND (lv);
719 return immed_double_const (lv, hv, mode);
722 else if (GET_CODE (trueop) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode) == MODE_FLOAT)
725 REAL_VALUE_TYPE d, t;
726 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
731 if (HONOR_SNANS (mode) && real_isnan (&d))
733 real_sqrt (&t, mode, &d);
737 d = REAL_VALUE_ABS (d);
740 d = REAL_VALUE_NEGATE (d);
743 d = real_value_truncate (mode, d);
746 /* All this does is change the mode. */
749 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
756 real_to_target (tmp, &d, GET_MODE (trueop));
757 for (i = 0; i < 4; i++)
759 real_from_target (&d, tmp, mode);
764 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
767 else if (GET_CODE (trueop) == CONST_DOUBLE
768 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
769 && GET_MODE_CLASS (mode) == MODE_INT
770 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
772 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
773 operators are intentionally left unspecified (to ease implementation
774 by target backends), for consistency, this routine implements the
775 same semantics for constant folding as used by the middle-end. */
777 HOST_WIDE_INT xh, xl, th, tl;
778 REAL_VALUE_TYPE x, t;
779 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
783 if (REAL_VALUE_ISNAN (x))
786 /* Test against the signed upper bound. */
787 if (width > HOST_BITS_PER_WIDE_INT)
789 th = ((unsigned HOST_WIDE_INT) 1
790 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
796 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
798 real_from_integer (&t, VOIDmode, tl, th, 0);
799 if (REAL_VALUES_LESS (t, x))
806 /* Test against the signed lower bound. */
807 if (width > HOST_BITS_PER_WIDE_INT)
809 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
815 tl = (HOST_WIDE_INT) -1 << (width - 1);
817 real_from_integer (&t, VOIDmode, tl, th, 0);
818 if (REAL_VALUES_LESS (x, t))
824 REAL_VALUE_TO_INT (&xl, &xh, x);
828 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
831 /* Test against the unsigned upper bound. */
832 if (width == 2*HOST_BITS_PER_WIDE_INT)
837 else if (width >= HOST_BITS_PER_WIDE_INT)
839 th = ((unsigned HOST_WIDE_INT) 1
840 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
846 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
848 real_from_integer (&t, VOIDmode, tl, th, 1);
849 if (REAL_VALUES_LESS (t, x))
856 REAL_VALUE_TO_INT (&xl, &xh, x);
862 return immed_double_const (xl, xh, mode);
865 /* This was formerly used only for non-IEEE float.
866 eggert@twinsun.com says it is safe for IEEE also. */
869 enum rtx_code reversed;
872 /* There are some simplifications we can do even if the operands
877 /* (not (not X)) == X. */
878 if (GET_CODE (op) == NOT)
881 /* (not (eq X Y)) == (ne X Y), etc. */
882 if (COMPARISON_P (op)
883 && (mode == BImode || STORE_FLAG_VALUE == -1)
884 && ((reversed = reversed_comparison_code (op, NULL_RTX))
886 return simplify_gen_relational (reversed, mode, VOIDmode,
887 XEXP (op, 0), XEXP (op, 1));
889 /* (not (plus X -1)) can become (neg X). */
890 if (GET_CODE (op) == PLUS
891 && XEXP (op, 1) == constm1_rtx)
892 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
894 /* Similarly, (not (neg X)) is (plus X -1). */
895 if (GET_CODE (op) == NEG)
896 return plus_constant (XEXP (op, 0), -1);
898 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
899 if (GET_CODE (op) == XOR
900 && GET_CODE (XEXP (op, 1)) == CONST_INT
901 && (temp = simplify_unary_operation (NOT, mode,
904 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
906 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
907 if (GET_CODE (op) == PLUS
908 && GET_CODE (XEXP (op, 1)) == CONST_INT
909 && mode_signbit_p (mode, XEXP (op, 1))
910 && (temp = simplify_unary_operation (NOT, mode,
913 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
917 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
918 operands other than 1, but that is not valid. We could do a
919 similar simplification for (not (lshiftrt C X)) where C is
920 just the sign bit, but this doesn't seem common enough to
922 if (GET_CODE (op) == ASHIFT
923 && XEXP (op, 0) == const1_rtx)
925 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
926 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
929 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
930 by reversing the comparison code if valid. */
931 if (STORE_FLAG_VALUE == -1
933 && (reversed = reversed_comparison_code (op, NULL_RTX))
935 return simplify_gen_relational (reversed, mode, VOIDmode,
936 XEXP (op, 0), XEXP (op, 1));
938 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 so we can perform the above simplification. */
942 if (STORE_FLAG_VALUE == -1
943 && GET_CODE (op) == ASHIFTRT
944 && GET_CODE (XEXP (op, 1)) == CONST_INT
945 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
946 return simplify_gen_relational (GE, mode, VOIDmode,
947 XEXP (op, 0), const0_rtx);
952 /* (neg (neg X)) == X. */
953 if (GET_CODE (op) == NEG)
956 /* (neg (plus X 1)) can become (not X). */
957 if (GET_CODE (op) == PLUS
958 && XEXP (op, 1) == const1_rtx)
959 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
961 /* Similarly, (neg (not X)) is (plus X 1). */
962 if (GET_CODE (op) == NOT)
963 return plus_constant (XEXP (op, 0), 1);
965 /* (neg (minus X Y)) can become (minus Y X). This transformation
966 isn't safe for modes with signed zeros, since if X and Y are
967 both +0, (minus Y X) is the same as (minus X Y). If the
968 rounding mode is towards +infinity (or -infinity) then the two
969 expressions will be rounded differently. */
970 if (GET_CODE (op) == MINUS
971 && !HONOR_SIGNED_ZEROS (mode)
972 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
973 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
976 if (GET_CODE (op) == PLUS
977 && !HONOR_SIGNED_ZEROS (mode)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
980 /* (neg (plus A C)) is simplified to (minus -C A). */
981 if (GET_CODE (XEXP (op, 1)) == CONST_INT
982 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
984 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
987 return simplify_gen_binary (MINUS, mode, temp,
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
993 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
996 /* (neg (mult A B)) becomes (mult (neg A) B).
997 This works even for floating-point values. */
998 if (GET_CODE (op) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1001 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1002 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1008 if (GET_CODE (op) == ASHIFT)
1010 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1013 return simplify_gen_binary (ASHIFT, mode, temp,
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && GET_CODE (XEXP (op, 1)) == CONST_INT
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && GET_CODE (XEXP (op, 1)) == CONST_INT
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1036 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037 becomes just the MINUS if its mode is MODE. This allows
1038 folding switch statements on machines using casesi (such as
1040 if (GET_CODE (op) == TRUNCATE
1041 && GET_MODE (XEXP (op, 0)) == mode
1042 && GET_CODE (XEXP (op, 0)) == MINUS
1043 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1044 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1045 return XEXP (op, 0);
1047 /* Check for a sign extension of a subreg of a promoted
1048 variable, where the promotion is sign-extended, and the
1049 target mode is the same as the variable's promotion. */
1050 if (GET_CODE (op) == SUBREG
1051 && SUBREG_PROMOTED_VAR_P (op)
1052 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1053 && GET_MODE (XEXP (op, 0)) == mode)
1054 return XEXP (op, 0);
1056 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057 if (! POINTERS_EXTEND_UNSIGNED
1058 && mode == Pmode && GET_MODE (op) == ptr_mode
1060 || (GET_CODE (op) == SUBREG
1061 && REG_P (SUBREG_REG (op))
1062 && REG_POINTER (SUBREG_REG (op))
1063 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1064 return convert_memory_address (Pmode, op);
1069 /* Check for a zero extension of a subreg of a promoted
1070 variable, where the promotion is zero-extended, and the
1071 target mode is the same as the variable's promotion. */
1072 if (GET_CODE (op) == SUBREG
1073 && SUBREG_PROMOTED_VAR_P (op)
1074 && SUBREG_PROMOTED_UNSIGNED_P (op)
1075 && GET_MODE (XEXP (op, 0)) == mode)
1076 return XEXP (op, 0);
1078 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079 if (POINTERS_EXTEND_UNSIGNED > 0
1080 && mode == Pmode && GET_MODE (op) == ptr_mode
1082 || (GET_CODE (op) == SUBREG
1083 && REG_P (SUBREG_REG (op))
1084 && REG_POINTER (SUBREG_REG (op))
1085 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1086 return convert_memory_address (Pmode, op);
1098 /* Subroutine of simplify_binary_operation to simplify a commutative,
1099 associative binary operation CODE with result mode MODE, operating
1100 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1101 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1102 canonicalization is possible. */
1105 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1110 /* Linearize the operator to the left. */
1111 if (GET_CODE (op1) == code)
1113 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1114 if (GET_CODE (op0) == code)
1116 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1117 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1120 /* "a op (b op c)" becomes "(b op c) op a". */
1121 if (! swap_commutative_operands_p (op1, op0))
1122 return simplify_gen_binary (code, mode, op1, op0);
1129 if (GET_CODE (op0) == code)
1131 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1132 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1134 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1135 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1138 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1139 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1140 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1141 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1143 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1145 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1150 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1156 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1157 and OP1. Return 0 if no simplification is possible.
1159 Don't use this for relational operations such as EQ or LT.
1160 Use simplify_relational_operation instead. */
1162 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1165 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1167 unsigned int width = GET_MODE_BITSIZE (mode);
1168 rtx trueop0, trueop1;
1171 /* Relational operations don't work here. We must know the mode
1172 of the operands in order to do the comparison correctly.
1173 Assuming a full word can give incorrect results.
1174 Consider comparing 128 with -128 in QImode. */
1175 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1176 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1178 /* Make sure the constant is second. */
1179 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1180 && swap_commutative_operands_p (op0, op1))
1182 tem = op0, op0 = op1, op1 = tem;
1185 trueop0 = avoid_constant_pool_reference (op0);
1186 trueop1 = avoid_constant_pool_reference (op1);
1188 if (VECTOR_MODE_P (mode)
1189 && code != VEC_CONCAT
1190 && GET_CODE (trueop0) == CONST_VECTOR
1191 && GET_CODE (trueop1) == CONST_VECTOR)
1193 unsigned n_elts = GET_MODE_NUNITS (mode);
1194 enum machine_mode op0mode = GET_MODE (trueop0);
1195 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
1196 enum machine_mode op1mode = GET_MODE (trueop1);
1197 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
1198 rtvec v = rtvec_alloc (n_elts);
1201 gcc_assert (op0_n_elts == n_elts);
1202 gcc_assert (op1_n_elts == n_elts);
1203 for (i = 0; i < n_elts; i++)
1205 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1206 CONST_VECTOR_ELT (trueop0, i),
1207 CONST_VECTOR_ELT (trueop1, i));
1210 RTVEC_ELT (v, i) = x;
1213 return gen_rtx_CONST_VECTOR (mode, v);
1216 if (VECTOR_MODE_P (mode)
1217 && code == VEC_CONCAT
1218 && CONSTANT_P (trueop0) && CONSTANT_P (trueop1))
1220 unsigned n_elts = GET_MODE_NUNITS (mode);
1221 rtvec v = rtvec_alloc (n_elts);
1223 gcc_assert (n_elts >= 2);
1226 gcc_assert (GET_CODE (trueop0) != CONST_VECTOR);
1227 gcc_assert (GET_CODE (trueop1) != CONST_VECTOR);
1229 RTVEC_ELT (v, 0) = trueop0;
1230 RTVEC_ELT (v, 1) = trueop1;
1234 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0));
1235 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1));
1238 gcc_assert (GET_CODE (trueop0) == CONST_VECTOR);
1239 gcc_assert (GET_CODE (trueop1) == CONST_VECTOR);
1240 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
1242 for (i = 0; i < op0_n_elts; ++i)
1243 RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i);
1244 for (i = 0; i < op1_n_elts; ++i)
1245 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i);
1248 return gen_rtx_CONST_VECTOR (mode, v);
1251 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1252 && GET_CODE (trueop0) == CONST_DOUBLE
1253 && GET_CODE (trueop1) == CONST_DOUBLE
1254 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1265 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1267 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1269 for (i = 0; i < 4; i++)
1286 real_from_target (&r, tmp0, mode);
1287 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1291 REAL_VALUE_TYPE f0, f1, value;
1293 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1294 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1295 f0 = real_value_truncate (mode, f0);
1296 f1 = real_value_truncate (mode, f1);
1298 if (HONOR_SNANS (mode)
1299 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1303 && REAL_VALUES_EQUAL (f1, dconst0)
1304 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1307 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1308 && flag_trapping_math
1309 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1311 int s0 = REAL_VALUE_NEGATIVE (f0);
1312 int s1 = REAL_VALUE_NEGATIVE (f1);
1317 /* Inf + -Inf = NaN plus exception. */
1322 /* Inf - Inf = NaN plus exception. */
1327 /* Inf / Inf = NaN plus exception. */
1334 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1335 && flag_trapping_math
1336 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1337 || (REAL_VALUE_ISINF (f1)
1338 && REAL_VALUES_EQUAL (f0, dconst0))))
1339 /* Inf * 0 = NaN plus exception. */
1342 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1344 value = real_value_truncate (mode, value);
1345 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1349 /* We can fold some multi-word operations. */
1350 if (GET_MODE_CLASS (mode) == MODE_INT
1351 && width == HOST_BITS_PER_WIDE_INT * 2
1352 && (GET_CODE (trueop0) == CONST_DOUBLE
1353 || GET_CODE (trueop0) == CONST_INT)
1354 && (GET_CODE (trueop1) == CONST_DOUBLE
1355 || GET_CODE (trueop1) == CONST_INT))
1357 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1358 HOST_WIDE_INT h1, h2, hv, ht;
1360 if (GET_CODE (trueop0) == CONST_DOUBLE)
1361 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1363 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1365 if (GET_CODE (trueop1) == CONST_DOUBLE)
1366 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1368 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1373 /* A - B == A + (-B). */
1374 neg_double (l2, h2, &lv, &hv);
1377 /* Fall through.... */
1380 add_double (l1, h1, l2, h2, &lv, &hv);
1384 mul_double (l1, h1, l2, h2, &lv, &hv);
1388 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1389 &lv, &hv, <, &ht))
1394 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1395 <, &ht, &lv, &hv))
1400 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1401 &lv, &hv, <, &ht))
1406 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1407 <, &ht, &lv, &hv))
1412 lv = l1 & l2, hv = h1 & h2;
1416 lv = l1 | l2, hv = h1 | h2;
1420 lv = l1 ^ l2, hv = h1 ^ h2;
1426 && ((unsigned HOST_WIDE_INT) l1
1427 < (unsigned HOST_WIDE_INT) l2)))
1436 && ((unsigned HOST_WIDE_INT) l1
1437 > (unsigned HOST_WIDE_INT) l2)))
1444 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1446 && ((unsigned HOST_WIDE_INT) l1
1447 < (unsigned HOST_WIDE_INT) l2)))
1454 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1456 && ((unsigned HOST_WIDE_INT) l1
1457 > (unsigned HOST_WIDE_INT) l2)))
1463 case LSHIFTRT: case ASHIFTRT:
1465 case ROTATE: case ROTATERT:
1466 if (SHIFT_COUNT_TRUNCATED)
1467 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1469 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1472 if (code == LSHIFTRT || code == ASHIFTRT)
1473 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1475 else if (code == ASHIFT)
1476 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1477 else if (code == ROTATE)
1478 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1479 else /* code == ROTATERT */
1480 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1487 return immed_double_const (lv, hv, mode);
1490 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1491 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1493 /* Even if we can't compute a constant result,
1494 there are some cases worth simplifying. */
1499 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1500 when x is NaN, infinite, or finite and nonzero. They aren't
1501 when x is -0 and the rounding mode is not towards -infinity,
1502 since (-0) + 0 is then 0. */
1503 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1506 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1507 transformations are safe even for IEEE. */
1508 if (GET_CODE (op0) == NEG)
1509 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1510 else if (GET_CODE (op1) == NEG)
1511 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1513 /* (~a) + 1 -> -a */
1514 if (INTEGRAL_MODE_P (mode)
1515 && GET_CODE (op0) == NOT
1516 && trueop1 == const1_rtx)
1517 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1519 /* Handle both-operands-constant cases. We can only add
1520 CONST_INTs to constants since the sum of relocatable symbols
1521 can't be handled by most assemblers. Don't add CONST_INT
1522 to CONST_INT since overflow won't be computed properly if wider
1523 than HOST_BITS_PER_WIDE_INT. */
1525 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1526 && GET_CODE (op1) == CONST_INT)
1527 return plus_constant (op0, INTVAL (op1));
1528 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1529 && GET_CODE (op0) == CONST_INT)
1530 return plus_constant (op1, INTVAL (op0));
1532 /* See if this is something like X * C - X or vice versa or
1533 if the multiplication is written as a shift. If so, we can
1534 distribute and make a new multiply, shift, or maybe just
1535 have X (if C is 2 in the example above). But don't make
1536 something more expensive than we had before. */
1538 if (! FLOAT_MODE_P (mode))
1540 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1541 rtx lhs = op0, rhs = op1;
1543 if (GET_CODE (lhs) == NEG)
1544 coeff0 = -1, lhs = XEXP (lhs, 0);
1545 else if (GET_CODE (lhs) == MULT
1546 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1548 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1550 else if (GET_CODE (lhs) == ASHIFT
1551 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1552 && INTVAL (XEXP (lhs, 1)) >= 0
1553 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1555 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1556 lhs = XEXP (lhs, 0);
1559 if (GET_CODE (rhs) == NEG)
1560 coeff1 = -1, rhs = XEXP (rhs, 0);
1561 else if (GET_CODE (rhs) == MULT
1562 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1564 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1566 else if (GET_CODE (rhs) == ASHIFT
1567 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1568 && INTVAL (XEXP (rhs, 1)) >= 0
1569 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1571 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1572 rhs = XEXP (rhs, 0);
1575 if (rtx_equal_p (lhs, rhs))
1577 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1578 tem = simplify_gen_binary (MULT, mode, lhs,
1579 GEN_INT (coeff0 + coeff1));
1580 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1585 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1586 if ((GET_CODE (op1) == CONST_INT
1587 || GET_CODE (op1) == CONST_DOUBLE)
1588 && GET_CODE (op0) == XOR
1589 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1590 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1591 && mode_signbit_p (mode, op1))
1592 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1593 simplify_gen_binary (XOR, mode, op1,
1596 /* If one of the operands is a PLUS or a MINUS, see if we can
1597 simplify this by the associative law.
1598 Don't use the associative law for floating point.
1599 The inaccuracy makes it nonassociative,
1600 and subtle programs can break if operations are associated. */
1602 if (INTEGRAL_MODE_P (mode)
1603 && (plus_minus_operand_p (op0)
1604 || plus_minus_operand_p (op1))
1605 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1608 /* Reassociate floating point addition only when the user
1609 specifies unsafe math optimizations. */
1610 if (FLOAT_MODE_P (mode)
1611 && flag_unsafe_math_optimizations)
1613 tem = simplify_associative_operation (code, mode, op0, op1);
1621 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1622 using cc0, in which case we want to leave it as a COMPARE
1623 so we can distinguish it from a register-register-copy.
1625 In IEEE floating point, x-0 is not the same as x. */
1627 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1628 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1629 && trueop1 == CONST0_RTX (mode))
1633 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1634 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1635 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1636 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1638 rtx xop00 = XEXP (op0, 0);
1639 rtx xop10 = XEXP (op1, 0);
1642 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1644 if (REG_P (xop00) && REG_P (xop10)
1645 && GET_MODE (xop00) == GET_MODE (xop10)
1646 && REGNO (xop00) == REGNO (xop10)
1647 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1648 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1655 /* We can't assume x-x is 0 even with non-IEEE floating point,
1656 but since it is zero except in very strange circumstances, we
1657 will treat it as zero with -funsafe-math-optimizations. */
1658 if (rtx_equal_p (trueop0, trueop1)
1659 && ! side_effects_p (op0)
1660 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1661 return CONST0_RTX (mode);
1663 /* Change subtraction from zero into negation. (0 - x) is the
1664 same as -x when x is NaN, infinite, or finite and nonzero.
1665 But if the mode has signed zeros, and does not round towards
1666 -infinity, then 0 - 0 is 0, not -0. */
1667 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1668 return simplify_gen_unary (NEG, mode, op1, mode);
1670 /* (-1 - a) is ~a. */
1671 if (trueop0 == constm1_rtx)
1672 return simplify_gen_unary (NOT, mode, op1, mode);
1674 /* Subtracting 0 has no effect unless the mode has signed zeros
1675 and supports rounding towards -infinity. In such a case,
1677 if (!(HONOR_SIGNED_ZEROS (mode)
1678 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1679 && trueop1 == CONST0_RTX (mode))
1682 /* See if this is something like X * C - X or vice versa or
1683 if the multiplication is written as a shift. If so, we can
1684 distribute and make a new multiply, shift, or maybe just
1685 have X (if C is 2 in the example above). But don't make
1686 something more expensive than we had before. */
1688 if (! FLOAT_MODE_P (mode))
1690 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1691 rtx lhs = op0, rhs = op1;
1693 if (GET_CODE (lhs) == NEG)
1694 coeff0 = -1, lhs = XEXP (lhs, 0);
1695 else if (GET_CODE (lhs) == MULT
1696 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1698 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1700 else if (GET_CODE (lhs) == ASHIFT
1701 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1702 && INTVAL (XEXP (lhs, 1)) >= 0
1703 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1705 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1706 lhs = XEXP (lhs, 0);
1709 if (GET_CODE (rhs) == NEG)
1710 coeff1 = - 1, rhs = XEXP (rhs, 0);
1711 else if (GET_CODE (rhs) == MULT
1712 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1714 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1716 else if (GET_CODE (rhs) == ASHIFT
1717 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1718 && INTVAL (XEXP (rhs, 1)) >= 0
1719 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1721 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1722 rhs = XEXP (rhs, 0);
1725 if (rtx_equal_p (lhs, rhs))
1727 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1728 tem = simplify_gen_binary (MULT, mode, lhs,
1729 GEN_INT (coeff0 - coeff1));
1730 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1735 /* (a - (-b)) -> (a + b). True even for IEEE. */
1736 if (GET_CODE (op1) == NEG)
1737 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1739 /* (-x - c) may be simplified as (-c - x). */
1740 if (GET_CODE (op0) == NEG
1741 && (GET_CODE (op1) == CONST_INT
1742 || GET_CODE (op1) == CONST_DOUBLE))
1744 tem = simplify_unary_operation (NEG, mode, op1, mode);
1746 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1749 /* If one of the operands is a PLUS or a MINUS, see if we can
1750 simplify this by the associative law.
1751 Don't use the associative law for floating point.
1752 The inaccuracy makes it nonassociative,
1753 and subtle programs can break if operations are associated. */
1755 if (INTEGRAL_MODE_P (mode)
1756 && (plus_minus_operand_p (op0)
1757 || plus_minus_operand_p (op1))
1758 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1761 /* Don't let a relocatable value get a negative coeff. */
1762 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1763 return simplify_gen_binary (PLUS, mode,
1765 neg_const_int (mode, op1));
1767 /* (x - (x & y)) -> (x & ~y) */
1768 if (GET_CODE (op1) == AND)
1770 if (rtx_equal_p (op0, XEXP (op1, 0)))
1772 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1773 GET_MODE (XEXP (op1, 1)));
1774 return simplify_gen_binary (AND, mode, op0, tem);
1776 if (rtx_equal_p (op0, XEXP (op1, 1)))
1778 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1779 GET_MODE (XEXP (op1, 0)));
1780 return simplify_gen_binary (AND, mode, op0, tem);
1786 if (trueop1 == constm1_rtx)
1787 return simplify_gen_unary (NEG, mode, op0, mode);
1789 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1790 x is NaN, since x * 0 is then also NaN. Nor is it valid
1791 when the mode has signed zeros, since multiplying a negative
1792 number by 0 will give -0, not 0. */
1793 if (!HONOR_NANS (mode)
1794 && !HONOR_SIGNED_ZEROS (mode)
1795 && trueop1 == CONST0_RTX (mode)
1796 && ! side_effects_p (op0))
1799 /* In IEEE floating point, x*1 is not equivalent to x for
1801 if (!HONOR_SNANS (mode)
1802 && trueop1 == CONST1_RTX (mode))
1805 /* Convert multiply by constant power of two into shift unless
1806 we are still generating RTL. This test is a kludge. */
1807 if (GET_CODE (trueop1) == CONST_INT
1808 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1809 /* If the mode is larger than the host word size, and the
1810 uppermost bit is set, then this isn't a power of two due
1811 to implicit sign extension. */
1812 && (width <= HOST_BITS_PER_WIDE_INT
1813 || val != HOST_BITS_PER_WIDE_INT - 1))
1814 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1816 /* x*2 is x+x and x*(-1) is -x */
1817 if (GET_CODE (trueop1) == CONST_DOUBLE
1818 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1819 && GET_MODE (op0) == mode)
1822 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1824 if (REAL_VALUES_EQUAL (d, dconst2))
1825 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1827 if (REAL_VALUES_EQUAL (d, dconstm1))
1828 return simplify_gen_unary (NEG, mode, op0, mode);
1831 /* Reassociate multiplication, but for floating point MULTs
1832 only when the user specifies unsafe math optimizations. */
1833 if (! FLOAT_MODE_P (mode)
1834 || flag_unsafe_math_optimizations)
1836 tem = simplify_associative_operation (code, mode, op0, op1);
1843 if (trueop1 == const0_rtx)
1845 if (GET_CODE (trueop1) == CONST_INT
1846 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1847 == GET_MODE_MASK (mode)))
1849 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1851 /* A | (~A) -> -1 */
1852 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1853 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1854 && ! side_effects_p (op0)
1855 && GET_MODE_CLASS (mode) != MODE_CC)
1857 tem = simplify_associative_operation (code, mode, op0, op1);
1863 if (trueop1 == const0_rtx)
1865 if (GET_CODE (trueop1) == CONST_INT
1866 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1867 == GET_MODE_MASK (mode)))
1868 return simplify_gen_unary (NOT, mode, op0, mode);
1869 if (trueop0 == trueop1
1870 && ! side_effects_p (op0)
1871 && GET_MODE_CLASS (mode) != MODE_CC)
1874 /* Canonicalize XOR of the most significant bit to PLUS. */
1875 if ((GET_CODE (op1) == CONST_INT
1876 || GET_CODE (op1) == CONST_DOUBLE)
1877 && mode_signbit_p (mode, op1))
1878 return simplify_gen_binary (PLUS, mode, op0, op1);
1879 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1880 if ((GET_CODE (op1) == CONST_INT
1881 || GET_CODE (op1) == CONST_DOUBLE)
1882 && GET_CODE (op0) == PLUS
1883 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1884 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1885 && mode_signbit_p (mode, XEXP (op0, 1)))
1886 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1887 simplify_gen_binary (XOR, mode, op1,
1890 tem = simplify_associative_operation (code, mode, op0, op1);
1896 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1898 /* If we are turning off bits already known off in OP0, we need
1900 if (GET_CODE (trueop1) == CONST_INT
1901 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1902 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1904 if (trueop0 == trueop1 && ! side_effects_p (op0)
1905 && GET_MODE_CLASS (mode) != MODE_CC)
1908 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1909 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1910 && ! side_effects_p (op0)
1911 && GET_MODE_CLASS (mode) != MODE_CC)
1913 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1914 ((A & N) + B) & M -> (A + B) & M
1915 Similarly if (N & M) == 0,
1916 ((A | N) + B) & M -> (A + B) & M
1917 and for - instead of + and/or ^ instead of |. */
1918 if (GET_CODE (trueop1) == CONST_INT
1919 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1920 && ~INTVAL (trueop1)
1921 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1922 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1927 pmop[0] = XEXP (op0, 0);
1928 pmop[1] = XEXP (op0, 1);
1930 for (which = 0; which < 2; which++)
1933 switch (GET_CODE (tem))
1936 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1937 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1938 == INTVAL (trueop1))
1939 pmop[which] = XEXP (tem, 0);
1943 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1944 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1945 pmop[which] = XEXP (tem, 0);
1952 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1954 tem = simplify_gen_binary (GET_CODE (op0), mode,
1956 return simplify_gen_binary (code, mode, tem, op1);
1959 tem = simplify_associative_operation (code, mode, op0, op1);
1965 /* 0/x is 0 (or x&0 if x has side-effects). */
1966 if (trueop0 == const0_rtx)
1967 return side_effects_p (op1)
1968 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1971 if (trueop1 == const1_rtx)
1973 /* Handle narrowing UDIV. */
1974 rtx x = gen_lowpart_common (mode, op0);
1977 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1978 return gen_lowpart_SUBREG (mode, op0);
1981 /* Convert divide by power of two into shift. */
1982 if (GET_CODE (trueop1) == CONST_INT
1983 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1984 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1988 /* Handle floating point and integers separately. */
1989 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1991 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1992 safe for modes with NaNs, since 0.0 / 0.0 will then be
1993 NaN rather than 0.0. Nor is it safe for modes with signed
1994 zeros, since dividing 0 by a negative number gives -0.0 */
1995 if (trueop0 == CONST0_RTX (mode)
1996 && !HONOR_NANS (mode)
1997 && !HONOR_SIGNED_ZEROS (mode)
1998 && ! side_effects_p (op1))
2001 if (trueop1 == CONST1_RTX (mode)
2002 && !HONOR_SNANS (mode))
2005 if (GET_CODE (trueop1) == CONST_DOUBLE
2006 && trueop1 != CONST0_RTX (mode))
2009 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2012 if (REAL_VALUES_EQUAL (d, dconstm1)
2013 && !HONOR_SNANS (mode))
2014 return simplify_gen_unary (NEG, mode, op0, mode);
2016 /* Change FP division by a constant into multiplication.
2017 Only do this with -funsafe-math-optimizations. */
2018 if (flag_unsafe_math_optimizations
2019 && !REAL_VALUES_EQUAL (d, dconst0))
2021 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2022 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2023 return simplify_gen_binary (MULT, mode, op0, tem);
2029 /* 0/x is 0 (or x&0 if x has side-effects). */
2030 if (trueop0 == const0_rtx)
2031 return side_effects_p (op1)
2032 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2035 if (trueop1 == const1_rtx)
2037 /* Handle narrowing DIV. */
2038 rtx x = gen_lowpart_common (mode, op0);
2041 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2042 return gen_lowpart_SUBREG (mode, op0);
2046 if (trueop1 == constm1_rtx)
2048 rtx x = gen_lowpart_common (mode, op0);
2050 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2051 ? gen_lowpart_SUBREG (mode, op0) : op0;
2052 return simplify_gen_unary (NEG, mode, x, mode);
2058 /* 0%x is 0 (or x&0 if x has side-effects). */
2059 if (trueop0 == const0_rtx)
2060 return side_effects_p (op1)
2061 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2063 /* x%1 is 0 (of x&0 if x has side-effects). */
2064 if (trueop1 == const1_rtx)
2065 return side_effects_p (op0)
2066 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2068 /* Implement modulus by power of two as AND. */
2069 if (GET_CODE (trueop1) == CONST_INT
2070 && exact_log2 (INTVAL (trueop1)) > 0)
2071 return simplify_gen_binary (AND, mode, op0,
2072 GEN_INT (INTVAL (op1) - 1));
2076 /* 0%x is 0 (or x&0 if x has side-effects). */
2077 if (trueop0 == const0_rtx)
2078 return side_effects_p (op1)
2079 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2081 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2082 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2083 return side_effects_p (op0)
2084 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2091 /* Rotating ~0 always results in ~0. */
2092 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2093 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2094 && ! side_effects_p (op1))
2097 /* Fall through.... */
2101 if (trueop1 == const0_rtx)
2103 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2108 if (width <= HOST_BITS_PER_WIDE_INT
2109 && GET_CODE (trueop1) == CONST_INT
2110 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2111 && ! side_effects_p (op0))
2113 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2115 tem = simplify_associative_operation (code, mode, op0, op1);
2121 if (width <= HOST_BITS_PER_WIDE_INT
2122 && GET_CODE (trueop1) == CONST_INT
2123 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2124 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2125 && ! side_effects_p (op0))
2127 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2129 tem = simplify_associative_operation (code, mode, op0, op1);
2135 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2137 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2139 tem = simplify_associative_operation (code, mode, op0, op1);
2145 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2147 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2149 tem = simplify_associative_operation (code, mode, op0, op1);
2158 /* ??? There are simplifications that can be done. */
2162 if (!VECTOR_MODE_P (mode))
2164 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2165 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2166 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2167 gcc_assert (XVECLEN (trueop1, 0) == 1);
2168 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2170 if (GET_CODE (trueop0) == CONST_VECTOR)
2171 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2176 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2177 gcc_assert (GET_MODE_INNER (mode)
2178 == GET_MODE_INNER (GET_MODE (trueop0)));
2179 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2181 if (GET_CODE (trueop0) == CONST_VECTOR)
2183 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2184 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2185 rtvec v = rtvec_alloc (n_elts);
2188 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2189 for (i = 0; i < n_elts; i++)
2191 rtx x = XVECEXP (trueop1, 0, i);
2193 gcc_assert (GET_CODE (x) == CONST_INT);
2194 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2198 return gen_rtx_CONST_VECTOR (mode, v);
2204 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2205 ? GET_MODE (trueop0)
2206 : GET_MODE_INNER (mode));
2207 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2208 ? GET_MODE (trueop1)
2209 : GET_MODE_INNER (mode));
2211 gcc_assert (VECTOR_MODE_P (mode));
2212 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2213 == GET_MODE_SIZE (mode));
2215 if (VECTOR_MODE_P (op0_mode))
2216 gcc_assert (GET_MODE_INNER (mode)
2217 == GET_MODE_INNER (op0_mode));
2219 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2221 if (VECTOR_MODE_P (op1_mode))
2222 gcc_assert (GET_MODE_INNER (mode)
2223 == GET_MODE_INNER (op1_mode));
2225 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2227 if ((GET_CODE (trueop0) == CONST_VECTOR
2228 || GET_CODE (trueop0) == CONST_INT
2229 || GET_CODE (trueop0) == CONST_DOUBLE)
2230 && (GET_CODE (trueop1) == CONST_VECTOR
2231 || GET_CODE (trueop1) == CONST_INT
2232 || GET_CODE (trueop1) == CONST_DOUBLE))
2234 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2235 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2236 rtvec v = rtvec_alloc (n_elts);
2238 unsigned in_n_elts = 1;
2240 if (VECTOR_MODE_P (op0_mode))
2241 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2242 for (i = 0; i < n_elts; i++)
2246 if (!VECTOR_MODE_P (op0_mode))
2247 RTVEC_ELT (v, i) = trueop0;
2249 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2253 if (!VECTOR_MODE_P (op1_mode))
2254 RTVEC_ELT (v, i) = trueop1;
2256 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2261 return gen_rtx_CONST_VECTOR (mode, v);
2273 /* Get the integer argument values in two forms:
2274 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2276 arg0 = INTVAL (trueop0);
2277 arg1 = INTVAL (trueop1);
2279 if (width < HOST_BITS_PER_WIDE_INT)
2281 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2282 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2285 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2286 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2289 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2290 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2298 /* Compute the value of the arithmetic. */
2303 val = arg0s + arg1s;
2307 val = arg0s - arg1s;
2311 val = arg0s * arg1s;
2316 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2319 val = arg0s / arg1s;
2324 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2327 val = arg0s % arg1s;
2332 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2335 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2340 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2343 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2361 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2362 value is in range. We can't return any old value for out-of-range
2363 arguments because either the middle-end (via shift_truncation_mask)
2364 or the back-end might be relying on target-specific knowledge.
2365 Nor can we rely on shift_truncation_mask, since the shift might
2366 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2367 if (SHIFT_COUNT_TRUNCATED)
2368 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2369 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2372 val = (code == ASHIFT
2373 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2374 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2376 /* Sign-extend the result for arithmetic right shifts. */
2377 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2378 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2386 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2387 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2395 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2396 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2400 /* Do nothing here. */
2404 val = arg0s <= arg1s ? arg0s : arg1s;
2408 val = ((unsigned HOST_WIDE_INT) arg0
2409 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2413 val = arg0s > arg1s ? arg0s : arg1s;
2417 val = ((unsigned HOST_WIDE_INT) arg0
2418 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2425 /* ??? There are simplifications that can be done. */
2432 val = trunc_int_for_mode (val, mode);
2434 return GEN_INT (val);
2437 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2440 Rather than test for specific case, we do this by a brute-force method
2441 and do all possible simplifications until no more changes occur. Then
2442 we rebuild the operation.
2444 If FORCE is true, then always generate the rtx. This is used to
2445 canonicalize stuff emitted from simplify_gen_binary. Note that this
2446 can still fail if the rtx is too complex. It won't fail just because
2447 the result is not 'simpler' than the input, however. */
2449 struct simplify_plus_minus_op_data
2456 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2458 const struct simplify_plus_minus_op_data *d1 = p1;
2459 const struct simplify_plus_minus_op_data *d2 = p2;
2461 return (commutative_operand_precedence (d2->op)
2462 - commutative_operand_precedence (d1->op));
2466 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2469 struct simplify_plus_minus_op_data ops[8];
2471 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2475 memset (ops, 0, sizeof ops);
2477 /* Set up the two operands and then expand them until nothing has been
2478 changed. If we run out of room in our array, give up; this should
2479 almost never happen. */
2484 ops[1].neg = (code == MINUS);
2490 for (i = 0; i < n_ops; i++)
2492 rtx this_op = ops[i].op;
2493 int this_neg = ops[i].neg;
2494 enum rtx_code this_code = GET_CODE (this_op);
2503 ops[n_ops].op = XEXP (this_op, 1);
2504 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2507 ops[i].op = XEXP (this_op, 0);
2513 ops[i].op = XEXP (this_op, 0);
2514 ops[i].neg = ! this_neg;
2520 && GET_CODE (XEXP (this_op, 0)) == PLUS
2521 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2522 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2524 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2525 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2526 ops[n_ops].neg = this_neg;
2534 /* ~a -> (-a - 1) */
2537 ops[n_ops].op = constm1_rtx;
2538 ops[n_ops++].neg = this_neg;
2539 ops[i].op = XEXP (this_op, 0);
2540 ops[i].neg = !this_neg;
2548 ops[i].op = neg_const_int (mode, this_op);
2561 /* If we only have two operands, we can't do anything. */
2562 if (n_ops <= 2 && !force)
2565 /* Count the number of CONSTs we didn't split above. */
2566 for (i = 0; i < n_ops; i++)
2567 if (GET_CODE (ops[i].op) == CONST)
2570 /* Now simplify each pair of operands until nothing changes. The first
2571 time through just simplify constants against each other. */
2578 for (i = 0; i < n_ops - 1; i++)
2579 for (j = i + 1; j < n_ops; j++)
2581 rtx lhs = ops[i].op, rhs = ops[j].op;
2582 int lneg = ops[i].neg, rneg = ops[j].neg;
2584 if (lhs != 0 && rhs != 0
2585 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2587 enum rtx_code ncode = PLUS;
2593 tem = lhs, lhs = rhs, rhs = tem;
2595 else if (swap_commutative_operands_p (lhs, rhs))
2596 tem = lhs, lhs = rhs, rhs = tem;
2598 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2600 /* Reject "simplifications" that just wrap the two
2601 arguments in a CONST. Failure to do so can result
2602 in infinite recursion with simplify_binary_operation
2603 when it calls us to simplify CONST operations. */
2605 && ! (GET_CODE (tem) == CONST
2606 && GET_CODE (XEXP (tem, 0)) == ncode
2607 && XEXP (XEXP (tem, 0), 0) == lhs
2608 && XEXP (XEXP (tem, 0), 1) == rhs)
2609 /* Don't allow -x + -1 -> ~x simplifications in the
2610 first pass. This allows us the chance to combine
2611 the -1 with other constants. */
2613 && GET_CODE (tem) == NOT
2614 && XEXP (tem, 0) == rhs))
2617 if (GET_CODE (tem) == NEG)
2618 tem = XEXP (tem, 0), lneg = !lneg;
2619 if (GET_CODE (tem) == CONST_INT && lneg)
2620 tem = neg_const_int (mode, tem), lneg = 0;
2624 ops[j].op = NULL_RTX;
2634 /* Pack all the operands to the lower-numbered entries. */
2635 for (i = 0, j = 0; j < n_ops; j++)
2640 /* Sort the operations based on swap_commutative_operands_p. */
2641 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2643 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2645 && GET_CODE (ops[1].op) == CONST_INT
2646 && CONSTANT_P (ops[0].op)
2648 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2650 /* We suppressed creation of trivial CONST expressions in the
2651 combination loop to avoid recursion. Create one manually now.
2652 The combination loop should have ensured that there is exactly
2653 one CONST_INT, and the sort will have ensured that it is last
2654 in the array and that any other constant will be next-to-last. */
2657 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2658 && CONSTANT_P (ops[n_ops - 2].op))
2660 rtx value = ops[n_ops - 1].op;
2661 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2662 value = neg_const_int (mode, value);
2663 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2667 /* Count the number of CONSTs that we generated. */
2669 for (i = 0; i < n_ops; i++)
2670 if (GET_CODE (ops[i].op) == CONST)
2673 /* Give up if we didn't reduce the number of operands we had. Make
2674 sure we count a CONST as two operands. If we have the same
2675 number of operands, but have made more CONSTs than before, this
2676 is also an improvement, so accept it. */
2678 && (n_ops + n_consts > input_ops
2679 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2682 /* Put a non-negated operand first, if possible. */
2684 for (i = 0; i < n_ops && ops[i].neg; i++)
2687 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2696 /* Now make the result by performing the requested operations. */
2698 for (i = 1; i < n_ops; i++)
2699 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2700 mode, result, ops[i].op);
2705 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2707 plus_minus_operand_p (rtx x)
2709 return GET_CODE (x) == PLUS
2710 || GET_CODE (x) == MINUS
2711 || (GET_CODE (x) == CONST
2712 && GET_CODE (XEXP (x, 0)) == PLUS
2713 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2714 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2717 /* Like simplify_binary_operation except used for relational operators.
2718 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2719 not also be VOIDmode.
2721 CMP_MODE specifies in which mode the comparison is done in, so it is
2722 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2723 the operands or, if both are VOIDmode, the operands are compared in
2724 "infinite precision". */
2726 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2727 enum machine_mode cmp_mode, rtx op0, rtx op1)
2729 rtx tem, trueop0, trueop1;
2731 if (cmp_mode == VOIDmode)
2732 cmp_mode = GET_MODE (op0);
2733 if (cmp_mode == VOIDmode)
2734 cmp_mode = GET_MODE (op1);
2736 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2739 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2741 if (tem == const0_rtx)
2742 return CONST0_RTX (mode);
2743 #ifdef FLOAT_STORE_FLAG_VALUE
2745 REAL_VALUE_TYPE val;
2746 val = FLOAT_STORE_FLAG_VALUE (mode);
2747 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2753 if (VECTOR_MODE_P (mode))
2755 if (tem == const0_rtx)
2756 return CONST0_RTX (mode);
2757 #ifdef VECTOR_STORE_FLAG_VALUE
2762 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2763 if (val == NULL_RTX)
2765 if (val == const1_rtx)
2766 return CONST1_RTX (mode);
2768 units = GET_MODE_NUNITS (mode);
2769 v = rtvec_alloc (units);
2770 for (i = 0; i < units; i++)
2771 RTVEC_ELT (v, i) = val;
2772 return gen_rtx_raw_CONST_VECTOR (mode, v);
2782 /* For the following tests, ensure const0_rtx is op1. */
2783 if (swap_commutative_operands_p (op0, op1)
2784 || (op0 == const0_rtx && op1 != const0_rtx))
2785 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2787 /* If op0 is a compare, extract the comparison arguments from it. */
2788 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2789 return simplify_relational_operation (code, mode, VOIDmode,
2790 XEXP (op0, 0), XEXP (op0, 1));
2792 if (mode == VOIDmode
2793 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2797 trueop0 = avoid_constant_pool_reference (op0);
2798 trueop1 = avoid_constant_pool_reference (op1);
2799 return simplify_relational_operation_1 (code, mode, cmp_mode,
2803 /* This part of simplify_relational_operation is only used when CMP_MODE
2804 is not in class MODE_CC (i.e. it is a real comparison).
2806 MODE is the mode of the result, while CMP_MODE specifies in which
2807 mode the comparison is done in, so it is the mode of the operands. */
2810 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2811 enum machine_mode cmp_mode, rtx op0, rtx op1)
2813 enum rtx_code op0code = GET_CODE (op0);
2815 if (GET_CODE (op1) == CONST_INT)
2817 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2819 /* If op0 is a comparison, extract the comparison arguments form it. */
2822 if (GET_MODE (op0) == cmp_mode)
2823 return simplify_rtx (op0);
2825 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2826 XEXP (op0, 0), XEXP (op0, 1));
2828 else if (code == EQ)
2830 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2831 if (new_code != UNKNOWN)
2832 return simplify_gen_relational (new_code, mode, VOIDmode,
2833 XEXP (op0, 0), XEXP (op0, 1));
2838 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2839 if ((code == EQ || code == NE)
2840 && (op0code == PLUS || op0code == MINUS)
2842 && CONSTANT_P (XEXP (op0, 1))
2843 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2845 rtx x = XEXP (op0, 0);
2846 rtx c = XEXP (op0, 1);
2848 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2850 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2856 /* Check if the given comparison (done in the given MODE) is actually a
2857 tautology or a contradiction.
2858 If no simplification is possible, this function returns zero.
2859 Otherwise, it returns either const_true_rtx or const0_rtx. */
2862 simplify_const_relational_operation (enum rtx_code code,
2863 enum machine_mode mode,
2866 int equal, op0lt, op0ltu, op1lt, op1ltu;
2871 gcc_assert (mode != VOIDmode
2872 || (GET_MODE (op0) == VOIDmode
2873 && GET_MODE (op1) == VOIDmode));
2875 /* If op0 is a compare, extract the comparison arguments from it. */
2876 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2877 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2879 /* We can't simplify MODE_CC values since we don't know what the
2880 actual comparison is. */
2881 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2884 /* Make sure the constant is second. */
2885 if (swap_commutative_operands_p (op0, op1))
2887 tem = op0, op0 = op1, op1 = tem;
2888 code = swap_condition (code);
2891 trueop0 = avoid_constant_pool_reference (op0);
2892 trueop1 = avoid_constant_pool_reference (op1);
2894 /* For integer comparisons of A and B maybe we can simplify A - B and can
2895 then simplify a comparison of that with zero. If A and B are both either
2896 a register or a CONST_INT, this can't help; testing for these cases will
2897 prevent infinite recursion here and speed things up.
2899 If CODE is an unsigned comparison, then we can never do this optimization,
2900 because it gives an incorrect result if the subtraction wraps around zero.
2901 ANSI C defines unsigned operations such that they never overflow, and
2902 thus such cases can not be ignored; but we cannot do it even for
2903 signed comparisons for languages such as Java, so test flag_wrapv. */
2905 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2906 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2907 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2908 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2909 /* We cannot do this for == or != if tem is a nonzero address. */
2910 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2911 && code != GTU && code != GEU && code != LTU && code != LEU)
2912 return simplify_const_relational_operation (signed_condition (code),
2913 mode, tem, const0_rtx);
2915 if (flag_unsafe_math_optimizations && code == ORDERED)
2916 return const_true_rtx;
2918 if (flag_unsafe_math_optimizations && code == UNORDERED)
2921 /* For modes without NaNs, if the two operands are equal, we know the
2922 result except if they have side-effects. */
2923 if (! HONOR_NANS (GET_MODE (trueop0))
2924 && rtx_equal_p (trueop0, trueop1)
2925 && ! side_effects_p (trueop0))
2926 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2928 /* If the operands are floating-point constants, see if we can fold
2930 else if (GET_CODE (trueop0) == CONST_DOUBLE
2931 && GET_CODE (trueop1) == CONST_DOUBLE
2932 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2934 REAL_VALUE_TYPE d0, d1;
2936 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2937 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2939 /* Comparisons are unordered iff at least one of the values is NaN. */
2940 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2950 return const_true_rtx;
2963 equal = REAL_VALUES_EQUAL (d0, d1);
2964 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2965 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2968 /* Otherwise, see if the operands are both integers. */
2969 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2970 && (GET_CODE (trueop0) == CONST_DOUBLE
2971 || GET_CODE (trueop0) == CONST_INT)
2972 && (GET_CODE (trueop1) == CONST_DOUBLE
2973 || GET_CODE (trueop1) == CONST_INT))
2975 int width = GET_MODE_BITSIZE (mode);
2976 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2977 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2979 /* Get the two words comprising each integer constant. */
2980 if (GET_CODE (trueop0) == CONST_DOUBLE)
2982 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2983 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2987 l0u = l0s = INTVAL (trueop0);
2988 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2991 if (GET_CODE (trueop1) == CONST_DOUBLE)
2993 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2994 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2998 l1u = l1s = INTVAL (trueop1);
2999 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3002 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3003 we have to sign or zero-extend the values. */
3004 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3006 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3007 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3009 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3010 l0s |= ((HOST_WIDE_INT) (-1) << width);
3012 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3013 l1s |= ((HOST_WIDE_INT) (-1) << width);
3015 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3016 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3018 equal = (h0u == h1u && l0u == l1u);
3019 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3020 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3021 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3022 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3025 /* Otherwise, there are some code-specific tests we can make. */
3028 /* Optimize comparisons with upper and lower bounds. */
3029 if (SCALAR_INT_MODE_P (mode)
3030 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3043 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3050 /* x >= min is always true. */
3051 if (rtx_equal_p (trueop1, mmin))
3052 tem = const_true_rtx;
3058 /* x <= max is always true. */
3059 if (rtx_equal_p (trueop1, mmax))
3060 tem = const_true_rtx;
3065 /* x > max is always false. */
3066 if (rtx_equal_p (trueop1, mmax))
3072 /* x < min is always false. */
3073 if (rtx_equal_p (trueop1, mmin))
3080 if (tem == const0_rtx
3081 || tem == const_true_rtx)
3088 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3093 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3094 return const_true_rtx;
3098 /* Optimize abs(x) < 0.0. */
3099 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3101 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3103 if (GET_CODE (tem) == ABS)
3109 /* Optimize abs(x) >= 0.0. */
3110 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3112 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3114 if (GET_CODE (tem) == ABS)
3115 return const_true_rtx;
3120 /* Optimize ! (abs(x) < 0.0). */
3121 if (trueop1 == CONST0_RTX (mode))
3123 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3125 if (GET_CODE (tem) == ABS)
3126 return const_true_rtx;
3137 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3143 return equal ? const_true_rtx : const0_rtx;
3146 return ! equal ? const_true_rtx : const0_rtx;
3149 return op0lt ? const_true_rtx : const0_rtx;
3152 return op1lt ? const_true_rtx : const0_rtx;
3154 return op0ltu ? const_true_rtx : const0_rtx;
3156 return op1ltu ? const_true_rtx : const0_rtx;
3159 return equal || op0lt ? const_true_rtx : const0_rtx;
3162 return equal || op1lt ? const_true_rtx : const0_rtx;
3164 return equal || op0ltu ? const_true_rtx : const0_rtx;
3166 return equal || op1ltu ? const_true_rtx : const0_rtx;
3168 return const_true_rtx;
3176 /* Simplify CODE, an operation with result mode MODE and three operands,
3177 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3178 a constant. Return 0 if no simplifications is possible. */
3181 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3182 enum machine_mode op0_mode, rtx op0, rtx op1,
3185 unsigned int width = GET_MODE_BITSIZE (mode);
3187 /* VOIDmode means "infinite" precision. */
3189 width = HOST_BITS_PER_WIDE_INT;
3195 if (GET_CODE (op0) == CONST_INT
3196 && GET_CODE (op1) == CONST_INT
3197 && GET_CODE (op2) == CONST_INT
3198 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3199 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3201 /* Extracting a bit-field from a constant */
3202 HOST_WIDE_INT val = INTVAL (op0);
3204 if (BITS_BIG_ENDIAN)
3205 val >>= (GET_MODE_BITSIZE (op0_mode)
3206 - INTVAL (op2) - INTVAL (op1));
3208 val >>= INTVAL (op2);
3210 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3212 /* First zero-extend. */
3213 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3214 /* If desired, propagate sign bit. */
3215 if (code == SIGN_EXTRACT
3216 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3217 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3220 /* Clear the bits that don't belong in our mode,
3221 unless they and our sign bit are all one.
3222 So we get either a reasonable negative value or a reasonable
3223 unsigned value for this mode. */
3224 if (width < HOST_BITS_PER_WIDE_INT
3225 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3226 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3227 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3229 return gen_int_mode (val, mode);
3234 if (GET_CODE (op0) == CONST_INT)
3235 return op0 != const0_rtx ? op1 : op2;
3237 /* Convert c ? a : a into "a". */
3238 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3241 /* Convert a != b ? a : b into "a". */
3242 if (GET_CODE (op0) == NE
3243 && ! side_effects_p (op0)
3244 && ! HONOR_NANS (mode)
3245 && ! HONOR_SIGNED_ZEROS (mode)
3246 && ((rtx_equal_p (XEXP (op0, 0), op1)
3247 && rtx_equal_p (XEXP (op0, 1), op2))
3248 || (rtx_equal_p (XEXP (op0, 0), op2)
3249 && rtx_equal_p (XEXP (op0, 1), op1))))
3252 /* Convert a == b ? a : b into "b". */
3253 if (GET_CODE (op0) == EQ
3254 && ! side_effects_p (op0)
3255 && ! HONOR_NANS (mode)
3256 && ! HONOR_SIGNED_ZEROS (mode)
3257 && ((rtx_equal_p (XEXP (op0, 0), op1)
3258 && rtx_equal_p (XEXP (op0, 1), op2))
3259 || (rtx_equal_p (XEXP (op0, 0), op2)
3260 && rtx_equal_p (XEXP (op0, 1), op1))))
3263 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3265 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3266 ? GET_MODE (XEXP (op0, 1))
3267 : GET_MODE (XEXP (op0, 0)));
3270 /* Look for happy constants in op1 and op2. */
3271 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3273 HOST_WIDE_INT t = INTVAL (op1);
3274 HOST_WIDE_INT f = INTVAL (op2);
3276 if (t == STORE_FLAG_VALUE && f == 0)
3277 code = GET_CODE (op0);
3278 else if (t == 0 && f == STORE_FLAG_VALUE)
3281 tmp = reversed_comparison_code (op0, NULL_RTX);
3289 return simplify_gen_relational (code, mode, cmp_mode,
3290 XEXP (op0, 0), XEXP (op0, 1));
3293 if (cmp_mode == VOIDmode)
3294 cmp_mode = op0_mode;
3295 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3296 cmp_mode, XEXP (op0, 0),
3299 /* See if any simplifications were possible. */
3302 if (GET_CODE (temp) == CONST_INT)
3303 return temp == const0_rtx ? op2 : op1;
3305 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3311 gcc_assert (GET_MODE (op0) == mode);
3312 gcc_assert (GET_MODE (op1) == mode);
3313 gcc_assert (VECTOR_MODE_P (mode));
3314 op2 = avoid_constant_pool_reference (op2);
3315 if (GET_CODE (op2) == CONST_INT)
3317 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3318 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3319 int mask = (1 << n_elts) - 1;
3321 if (!(INTVAL (op2) & mask))
3323 if ((INTVAL (op2) & mask) == mask)
3326 op0 = avoid_constant_pool_reference (op0);
3327 op1 = avoid_constant_pool_reference (op1);
3328 if (GET_CODE (op0) == CONST_VECTOR
3329 && GET_CODE (op1) == CONST_VECTOR)
3331 rtvec v = rtvec_alloc (n_elts);
3334 for (i = 0; i < n_elts; i++)
3335 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3336 ? CONST_VECTOR_ELT (op0, i)
3337 : CONST_VECTOR_ELT (op1, i));
3338 return gen_rtx_CONST_VECTOR (mode, v);
3350 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3351 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3353 Works by unpacking OP into a collection of 8-bit values
3354 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3355 and then repacking them again for OUTERMODE. */
3358 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3359 enum machine_mode innermode, unsigned int byte)
3361 /* We support up to 512-bit values (for V8DFmode). */
3365 value_mask = (1 << value_bit) - 1
3367 unsigned char value[max_bitsize / value_bit];
3376 rtvec result_v = NULL;
3377 enum mode_class outer_class;
3378 enum machine_mode outer_submode;
3380 /* Some ports misuse CCmode. */
3381 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3384 /* We have no way to represent a complex constant at the rtl level. */
3385 if (COMPLEX_MODE_P (outermode))
3388 /* Unpack the value. */
3390 if (GET_CODE (op) == CONST_VECTOR)
3392 num_elem = CONST_VECTOR_NUNITS (op);
3393 elems = &CONST_VECTOR_ELT (op, 0);
3394 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3400 elem_bitsize = max_bitsize;
3402 /* If this asserts, it is too complicated; reducing value_bit may help. */
3403 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3404 /* I don't know how to handle endianness of sub-units. */
3405 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3407 for (elem = 0; elem < num_elem; elem++)
3410 rtx el = elems[elem];
3412 /* Vectors are kept in target memory order. (This is probably
3415 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3416 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3418 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3419 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3420 unsigned bytele = (subword_byte % UNITS_PER_WORD
3421 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3422 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3425 switch (GET_CODE (el))
3429 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3431 *vp++ = INTVAL (el) >> i;
3432 /* CONST_INTs are always logically sign-extended. */
3433 for (; i < elem_bitsize; i += value_bit)
3434 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3438 if (GET_MODE (el) == VOIDmode)
3440 /* If this triggers, someone should have generated a
3441 CONST_INT instead. */
3442 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3444 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3445 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3446 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3449 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3452 /* It shouldn't matter what's done here, so fill it with
3454 for (; i < max_bitsize; i += value_bit)
3459 long tmp[max_bitsize / 32];
3460 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3462 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3463 gcc_assert (bitsize <= elem_bitsize);
3464 gcc_assert (bitsize % value_bit == 0);
3466 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3469 /* real_to_target produces its result in words affected by
3470 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3471 and use WORDS_BIG_ENDIAN instead; see the documentation
3472 of SUBREG in rtl.texi. */
3473 for (i = 0; i < bitsize; i += value_bit)
3476 if (WORDS_BIG_ENDIAN)
3477 ibase = bitsize - 1 - i;
3480 *vp++ = tmp[ibase / 32] >> i % 32;
3483 /* It shouldn't matter what's done here, so fill it with
3485 for (; i < elem_bitsize; i += value_bit)
3495 /* Now, pick the right byte to start with. */
3496 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3497 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3498 will already have offset 0. */
3499 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3501 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3503 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3504 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3505 byte = (subword_byte % UNITS_PER_WORD
3506 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3509 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3510 so if it's become negative it will instead be very large.) */
3511 gcc_assert (byte < GET_MODE_SIZE (innermode));
3513 /* Convert from bytes to chunks of size value_bit. */
3514 value_start = byte * (BITS_PER_UNIT / value_bit);
3516 /* Re-pack the value. */
3518 if (VECTOR_MODE_P (outermode))
3520 num_elem = GET_MODE_NUNITS (outermode);
3521 result_v = rtvec_alloc (num_elem);
3522 elems = &RTVEC_ELT (result_v, 0);
3523 outer_submode = GET_MODE_INNER (outermode);
3529 outer_submode = outermode;
3532 outer_class = GET_MODE_CLASS (outer_submode);
3533 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3535 gcc_assert (elem_bitsize % value_bit == 0);
3536 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3538 for (elem = 0; elem < num_elem; elem++)
3542 /* Vectors are stored in target memory order. (This is probably
3545 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3546 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3548 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3549 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3550 unsigned bytele = (subword_byte % UNITS_PER_WORD
3551 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3552 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3555 switch (outer_class)
3558 case MODE_PARTIAL_INT:
3560 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3563 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3565 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3566 for (; i < elem_bitsize; i += value_bit)
3567 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3568 << (i - HOST_BITS_PER_WIDE_INT));
3570 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3572 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3573 elems[elem] = gen_int_mode (lo, outer_submode);
3575 elems[elem] = immed_double_const (lo, hi, outer_submode);
3582 long tmp[max_bitsize / 32];
3584 /* real_from_target wants its input in words affected by
3585 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3586 and use WORDS_BIG_ENDIAN instead; see the documentation
3587 of SUBREG in rtl.texi. */
3588 for (i = 0; i < max_bitsize / 32; i++)
3590 for (i = 0; i < elem_bitsize; i += value_bit)
3593 if (WORDS_BIG_ENDIAN)
3594 ibase = elem_bitsize - 1 - i;
3597 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3600 real_from_target (&r, tmp, outer_submode);
3601 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3609 if (VECTOR_MODE_P (outermode))
3610 return gen_rtx_CONST_VECTOR (outermode, result_v);
3615 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3616 Return 0 if no simplifications are possible. */
3618 simplify_subreg (enum machine_mode outermode, rtx op,
3619 enum machine_mode innermode, unsigned int byte)
3621 /* Little bit of sanity checking. */
3622 gcc_assert (innermode != VOIDmode);
3623 gcc_assert (outermode != VOIDmode);
3624 gcc_assert (innermode != BLKmode);
3625 gcc_assert (outermode != BLKmode);
3627 gcc_assert (GET_MODE (op) == innermode
3628 || GET_MODE (op) == VOIDmode);
3630 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3631 gcc_assert (byte < GET_MODE_SIZE (innermode));
3633 if (outermode == innermode && !byte)
3636 if (GET_CODE (op) == CONST_INT
3637 || GET_CODE (op) == CONST_DOUBLE
3638 || GET_CODE (op) == CONST_VECTOR)
3639 return simplify_immed_subreg (outermode, op, innermode, byte);
3641 /* Changing mode twice with SUBREG => just change it once,
3642 or not at all if changing back op starting mode. */
3643 if (GET_CODE (op) == SUBREG)
3645 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3646 int final_offset = byte + SUBREG_BYTE (op);
3649 if (outermode == innermostmode
3650 && byte == 0 && SUBREG_BYTE (op) == 0)
3651 return SUBREG_REG (op);
3653 /* The SUBREG_BYTE represents offset, as if the value were stored
3654 in memory. Irritating exception is paradoxical subreg, where
3655 we define SUBREG_BYTE to be 0. On big endian machines, this
3656 value should be negative. For a moment, undo this exception. */
3657 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3659 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3660 if (WORDS_BIG_ENDIAN)
3661 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3662 if (BYTES_BIG_ENDIAN)
3663 final_offset += difference % UNITS_PER_WORD;
3665 if (SUBREG_BYTE (op) == 0
3666 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3668 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3669 if (WORDS_BIG_ENDIAN)
3670 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3671 if (BYTES_BIG_ENDIAN)
3672 final_offset += difference % UNITS_PER_WORD;
3675 /* See whether resulting subreg will be paradoxical. */
3676 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3678 /* In nonparadoxical subregs we can't handle negative offsets. */
3679 if (final_offset < 0)
3681 /* Bail out in case resulting subreg would be incorrect. */
3682 if (final_offset % GET_MODE_SIZE (outermode)
3683 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3689 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3691 /* In paradoxical subreg, see if we are still looking on lower part.
3692 If so, our SUBREG_BYTE will be 0. */
3693 if (WORDS_BIG_ENDIAN)
3694 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3695 if (BYTES_BIG_ENDIAN)
3696 offset += difference % UNITS_PER_WORD;
3697 if (offset == final_offset)
3703 /* Recurse for further possible simplifications. */
3704 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3708 if (validate_subreg (outermode, innermostmode,
3709 SUBREG_REG (op), final_offset))
3710 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3714 /* SUBREG of a hard register => just change the register number
3715 and/or mode. If the hard register is not valid in that mode,
3716 suppress this simplification. If the hard register is the stack,
3717 frame, or argument pointer, leave this as a SUBREG. */
3720 && REGNO (op) < FIRST_PSEUDO_REGISTER
3721 #ifdef CANNOT_CHANGE_MODE_CLASS
3722 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3723 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3724 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3726 && ((reload_completed && !frame_pointer_needed)
3727 || (REGNO (op) != FRAME_POINTER_REGNUM
3728 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3729 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3732 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3733 && REGNO (op) != ARG_POINTER_REGNUM
3735 && REGNO (op) != STACK_POINTER_REGNUM
3736 && subreg_offset_representable_p (REGNO (op), innermode,
3739 unsigned int regno = REGNO (op);
3740 unsigned int final_regno
3741 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3743 /* ??? We do allow it if the current REG is not valid for
3744 its mode. This is a kludge to work around how float/complex
3745 arguments are passed on 32-bit SPARC and should be fixed. */
3746 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3747 || ! HARD_REGNO_MODE_OK (regno, innermode))
3749 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3751 /* Propagate original regno. We don't have any way to specify
3752 the offset inside original regno, so do so only for lowpart.
3753 The information is used only by alias analysis that can not
3754 grog partial register anyway. */
3756 if (subreg_lowpart_offset (outermode, innermode) == byte)
3757 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3762 /* If we have a SUBREG of a register that we are replacing and we are
3763 replacing it with a MEM, make a new MEM and try replacing the
3764 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3765 or if we would be widening it. */
3768 && ! mode_dependent_address_p (XEXP (op, 0))
3769 /* Allow splitting of volatile memory references in case we don't
3770 have instruction to move the whole thing. */
3771 && (! MEM_VOLATILE_P (op)
3772 || ! have_insn_for (SET, innermode))
3773 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3774 return adjust_address_nv (op, outermode, byte);
3776 /* Handle complex values represented as CONCAT
3777 of real and imaginary part. */
3778 if (GET_CODE (op) == CONCAT)
3780 unsigned int inner_size, final_offset;
3783 inner_size = GET_MODE_UNIT_SIZE (innermode);
3784 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3785 final_offset = byte % inner_size;
3786 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3789 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3792 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3793 return gen_rtx_SUBREG (outermode, part, final_offset);
3797 /* Optimize SUBREG truncations of zero and sign extended values. */
3798 if ((GET_CODE (op) == ZERO_EXTEND
3799 || GET_CODE (op) == SIGN_EXTEND)
3800 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3802 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3804 /* If we're requesting the lowpart of a zero or sign extension,
3805 there are three possibilities. If the outermode is the same
3806 as the origmode, we can omit both the extension and the subreg.
3807 If the outermode is not larger than the origmode, we can apply
3808 the truncation without the extension. Finally, if the outermode
3809 is larger than the origmode, but both are integer modes, we
3810 can just extend to the appropriate mode. */
3813 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3814 if (outermode == origmode)
3815 return XEXP (op, 0);
3816 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3817 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3818 subreg_lowpart_offset (outermode,
3820 if (SCALAR_INT_MODE_P (outermode))
3821 return simplify_gen_unary (GET_CODE (op), outermode,
3822 XEXP (op, 0), origmode);
3825 /* A SUBREG resulting from a zero extension may fold to zero if
3826 it extracts higher bits that the ZERO_EXTEND's source bits. */
3827 if (GET_CODE (op) == ZERO_EXTEND
3828 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3829 return CONST0_RTX (outermode);
3832 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
3833 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
3834 the outer subreg is effectively a truncation to the original mode. */
3835 if ((GET_CODE (op) == LSHIFTRT
3836 || GET_CODE (op) == ASHIFTRT)
3837 && SCALAR_INT_MODE_P (outermode)
3838 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
3839 to avoid the possibility that an outer LSHIFTRT shifts by more
3840 than the sign extension's sign_bit_copies and introduces zeros
3841 into the high bits of the result. */
3842 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
3843 && GET_CODE (XEXP (op, 1)) == CONST_INT
3844 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
3845 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3846 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3847 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3848 return simplify_gen_binary (ASHIFTRT, outermode,
3849 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3851 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
3852 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
3853 the outer subreg is effectively a truncation to the original mode. */
3854 if ((GET_CODE (op) == LSHIFTRT
3855 || GET_CODE (op) == ASHIFTRT)
3856 && SCALAR_INT_MODE_P (outermode)
3857 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3858 && GET_CODE (XEXP (op, 1)) == CONST_INT
3859 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3860 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3861 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3862 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3863 return simplify_gen_binary (LSHIFTRT, outermode,
3864 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3866 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
3867 to (ashift:QI (x:QI) C), where C is a suitable small constant and
3868 the outer subreg is effectively a truncation to the original mode. */
3869 if (GET_CODE (op) == ASHIFT
3870 && SCALAR_INT_MODE_P (outermode)
3871 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
3872 && GET_CODE (XEXP (op, 1)) == CONST_INT
3873 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
3874 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
3875 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
3876 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
3877 && subreg_lsb_1 (outermode, innermode, byte) == 0)
3878 return simplify_gen_binary (ASHIFT, outermode,
3879 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
3884 /* Make a SUBREG operation or equivalent if it folds. */
3887 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3888 enum machine_mode innermode, unsigned int byte)
3892 newx = simplify_subreg (outermode, op, innermode, byte);
3896 if (GET_CODE (op) == SUBREG
3897 || GET_CODE (op) == CONCAT
3898 || GET_MODE (op) == VOIDmode)
3901 if (validate_subreg (outermode, innermode, op, byte))
3902 return gen_rtx_SUBREG (outermode, op, byte);
3907 /* Simplify X, an rtx expression.
3909 Return the simplified expression or NULL if no simplifications
3912 This is the preferred entry point into the simplification routines;
3913 however, we still allow passes to call the more specific routines.
3915 Right now GCC has three (yes, three) major bodies of RTL simplification
3916 code that need to be unified.
3918 1. fold_rtx in cse.c. This code uses various CSE specific
3919 information to aid in RTL simplification.
3921 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3922 it uses combine specific information to aid in RTL
3925 3. The routines in this file.
3928 Long term we want to only have one body of simplification code; to
3929 get to that state I recommend the following steps:
3931 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3932 which are not pass dependent state into these routines.
3934 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3935 use this routine whenever possible.
3937 3. Allow for pass dependent state to be provided to these
3938 routines and add simplifications based on the pass dependent
3939 state. Remove code from cse.c & combine.c that becomes
3942 It will take time, but ultimately the compiler will be easier to
3943 maintain and improve. It's totally silly that when we add a
3944 simplification that it needs to be added to 4 places (3 for RTL
3945 simplification and 1 for tree simplification. */
3948 simplify_rtx (rtx x)
3950 enum rtx_code code = GET_CODE (x);
3951 enum machine_mode mode = GET_MODE (x);
3953 switch (GET_RTX_CLASS (code))
3956 return simplify_unary_operation (code, mode,
3957 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3958 case RTX_COMM_ARITH:
3959 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3960 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3962 /* Fall through.... */
3965 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3968 case RTX_BITFIELD_OPS:
3969 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3970 XEXP (x, 0), XEXP (x, 1),
3974 case RTX_COMM_COMPARE:
3975 return simplify_relational_operation (code, mode,
3976 ((GET_MODE (XEXP (x, 0))
3978 ? GET_MODE (XEXP (x, 0))
3979 : GET_MODE (XEXP (x, 1))),
3985 return simplify_gen_subreg (mode, SUBREG_REG (x),
3986 GET_MODE (SUBREG_REG (x)),
3993 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3994 if (GET_CODE (XEXP (x, 0)) == HIGH
3995 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))