1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
52 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
54 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
55 enum machine_mode, rtx,
58 /* Negate a CONST_INT rtx, truncating (because a conversion from a
59 maximally negative number can overflow). */
61 neg_const_int (mode, i)
62 enum machine_mode mode;
65 return gen_int_mode (- INTVAL (i), mode);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
73 simplify_gen_binary (code, mode, op0, op1)
75 enum machine_mode mode;
80 /* Put complex operands first and constants second if commutative. */
81 if (GET_RTX_CLASS (code) == 'c'
82 && swap_commutative_operands_p (op0, op1))
83 tem = op0, op0 = op1, op1 = tem;
85 /* If this simplifies, do it. */
86 tem = simplify_binary_operation (code, mode, op0, op1);
90 /* Handle addition and subtraction specially. Otherwise, just form
93 if (code == PLUS || code == MINUS)
95 tem = simplify_plus_minus (code, mode, op0, op1, 1);
100 return gen_rtx_fmt_ee (code, mode, op0, op1);
103 /* If X is a MEM referencing the constant pool, return the real value.
104 Otherwise return X. */
106 avoid_constant_pool_reference (x)
110 enum machine_mode cmode;
112 if (GET_CODE (x) != MEM)
116 if (GET_CODE (addr) == LO_SUM)
117 addr = XEXP (addr, 1);
119 if (GET_CODE (addr) != SYMBOL_REF
120 || ! CONSTANT_POOL_ADDRESS_P (addr))
123 c = get_pool_constant (addr);
124 cmode = get_pool_mode (addr);
126 /* If we're accessing the constant in a different mode than it was
127 originally stored, attempt to fix that up via subreg simplifications.
128 If that fails we have no choice but to return the original memory. */
129 if (cmode != GET_MODE (x))
131 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
138 /* Make a unary operation by first seeing if it folds and otherwise making
139 the specified operation. */
142 simplify_gen_unary (code, mode, op, op_mode)
144 enum machine_mode mode;
146 enum machine_mode op_mode;
150 /* If this simplifies, use it. */
151 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
154 return gen_rtx_fmt_e (code, mode, op);
157 /* Likewise for ternary operations. */
160 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
162 enum machine_mode mode, op0_mode;
167 /* If this simplifies, use it. */
168 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
172 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
175 /* Likewise, for relational operations.
176 CMP_MODE specifies mode comparison is done in.
180 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
182 enum machine_mode mode;
183 enum machine_mode cmp_mode;
188 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
191 /* For the following tests, ensure const0_rtx is op1. */
192 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
193 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
195 /* If op0 is a compare, extract the comparison arguments from it. */
196 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
197 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
199 /* If op0 is a comparison, extract the comparison arguments form it. */
200 if (code == NE && op1 == const0_rtx
201 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
203 else if (code == EQ && op1 == const0_rtx)
205 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
206 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
216 /* Put complex operands first and constants second. */
217 if (swap_commutative_operands_p (op0, op1))
218 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
220 return gen_rtx_fmt_ee (code, mode, op0, op1);
223 /* Replace all occurrences of OLD in X with NEW and try to simplify the
224 resulting RTX. Return a new RTX which is as simplified as possible. */
227 simplify_replace_rtx (x, old, new)
232 enum rtx_code code = GET_CODE (x);
233 enum machine_mode mode = GET_MODE (x);
235 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
236 to build a new expression substituting recursively. If we can't do
237 anything, return our input. */
242 switch (GET_RTX_CLASS (code))
246 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
247 rtx op = (XEXP (x, 0) == old
248 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
250 return simplify_gen_unary (code, mode, op, op_mode);
256 simplify_gen_binary (code, mode,
257 simplify_replace_rtx (XEXP (x, 0), old, new),
258 simplify_replace_rtx (XEXP (x, 1), old, new));
261 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
262 ? GET_MODE (XEXP (x, 0))
263 : GET_MODE (XEXP (x, 1)));
264 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
265 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
268 simplify_gen_relational (code, mode,
271 : GET_MODE (op0) != VOIDmode
280 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
281 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
284 simplify_gen_ternary (code, mode,
289 simplify_replace_rtx (XEXP (x, 1), old, new),
290 simplify_replace_rtx (XEXP (x, 2), old, new));
294 /* The only case we try to handle is a SUBREG. */
298 exp = simplify_gen_subreg (GET_MODE (x),
299 simplify_replace_rtx (SUBREG_REG (x),
301 GET_MODE (SUBREG_REG (x)),
310 return replace_equiv_address_nv (x,
311 simplify_replace_rtx (XEXP (x, 0),
313 else if (code == LO_SUM)
315 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
316 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
318 /* (lo_sum (high x) x) -> x */
319 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
322 return gen_rtx_LO_SUM (mode, op0, op1);
324 else if (code == REG)
326 if (REG_P (old) && REGNO (x) == REGNO (old))
338 /* Try to simplify a unary operation CODE whose output mode is to be
339 MODE with input operand OP whose mode was originally OP_MODE.
340 Return zero if no simplification can be made. */
342 simplify_unary_operation (code, mode, op, op_mode)
344 enum machine_mode mode;
346 enum machine_mode op_mode;
348 unsigned int width = GET_MODE_BITSIZE (mode);
349 rtx trueop = avoid_constant_pool_reference (op);
351 /* The order of these tests is critical so that, for example, we don't
352 check the wrong mode (input vs. output) for a conversion operation,
353 such as FIX. At some point, this should be simplified. */
355 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
356 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
358 HOST_WIDE_INT hv, lv;
361 if (GET_CODE (trueop) == CONST_INT)
362 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
364 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
366 REAL_VALUE_FROM_INT (d, lv, hv, mode);
367 d = real_value_truncate (mode, d);
368 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
370 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
371 && (GET_CODE (trueop) == CONST_DOUBLE
372 || GET_CODE (trueop) == CONST_INT))
374 HOST_WIDE_INT hv, lv;
377 if (GET_CODE (trueop) == CONST_INT)
378 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
380 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
382 if (op_mode == VOIDmode)
384 /* We don't know how to interpret negative-looking numbers in
385 this case, so don't try to fold those. */
389 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
392 hv = 0, lv &= GET_MODE_MASK (op_mode);
394 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
395 d = real_value_truncate (mode, d);
396 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
399 if (GET_CODE (trueop) == CONST_INT
400 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
402 HOST_WIDE_INT arg0 = INTVAL (trueop);
416 val = (arg0 >= 0 ? arg0 : - arg0);
420 /* Don't use ffs here. Instead, get low order bit and then its
421 number. If arg0 is zero, this will return 0, as desired. */
422 arg0 &= GET_MODE_MASK (mode);
423 val = exact_log2 (arg0 & (- arg0)) + 1;
427 arg0 &= GET_MODE_MASK (mode);
428 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
432 arg0 &= GET_MODE_MASK (mode);
434 ? GET_MODE_BITSIZE (mode)
435 : exact_log2 (arg0 & -arg0);
439 arg0 &= GET_MODE_MASK (mode);
442 val++, arg0 &= arg0 - 1;
446 arg0 &= GET_MODE_MASK (mode);
449 val++, arg0 &= arg0 - 1;
458 /* When zero-extending a CONST_INT, we need to know its
460 if (op_mode == VOIDmode)
462 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
464 /* If we were really extending the mode,
465 we would have to distinguish between zero-extension
466 and sign-extension. */
467 if (width != GET_MODE_BITSIZE (op_mode))
471 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
472 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
478 if (op_mode == VOIDmode)
480 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
482 /* If we were really extending the mode,
483 we would have to distinguish between zero-extension
484 and sign-extension. */
485 if (width != GET_MODE_BITSIZE (op_mode))
489 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
492 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
494 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
495 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
512 val = trunc_int_for_mode (val, mode);
514 return GEN_INT (val);
517 /* We can do some operations on integer CONST_DOUBLEs. Also allow
518 for a DImode operation on a CONST_INT. */
519 else if (GET_MODE (trueop) == VOIDmode
520 && width <= HOST_BITS_PER_WIDE_INT * 2
521 && (GET_CODE (trueop) == CONST_DOUBLE
522 || GET_CODE (trueop) == CONST_INT))
524 unsigned HOST_WIDE_INT l1, lv;
525 HOST_WIDE_INT h1, hv;
527 if (GET_CODE (trueop) == CONST_DOUBLE)
528 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
530 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
540 neg_double (l1, h1, &lv, &hv);
545 neg_double (l1, h1, &lv, &hv);
557 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
560 lv = exact_log2 (l1 & -l1) + 1;
566 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
568 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
569 - HOST_BITS_PER_WIDE_INT;
577 lv = GET_MODE_BITSIZE (mode);
579 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
582 lv = exact_log2 (l1 & -l1);
605 /* This is just a change-of-mode, so do nothing. */
610 if (op_mode == VOIDmode)
613 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
617 lv = l1 & GET_MODE_MASK (op_mode);
621 if (op_mode == VOIDmode
622 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
626 lv = l1 & GET_MODE_MASK (op_mode);
627 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
628 && (lv & ((HOST_WIDE_INT) 1
629 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
630 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
632 hv = HWI_SIGN_EXTEND (lv);
643 return immed_double_const (lv, hv, mode);
646 else if (GET_CODE (trueop) == CONST_DOUBLE
647 && GET_MODE_CLASS (mode) == MODE_FLOAT)
649 REAL_VALUE_TYPE d, t;
650 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
655 if (HONOR_SNANS (mode) && real_isnan (&d))
657 real_sqrt (&t, mode, &d);
661 d = REAL_VALUE_ABS (d);
664 d = REAL_VALUE_NEGATE (d);
667 d = real_value_truncate (mode, d);
670 /* All this does is change the mode. */
673 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
679 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
682 else if (GET_CODE (trueop) == CONST_DOUBLE
683 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
684 && GET_MODE_CLASS (mode) == MODE_INT
685 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
689 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
692 case FIX: i = REAL_VALUE_FIX (d); break;
693 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
697 return gen_int_mode (i, mode);
700 /* This was formerly used only for non-IEEE float.
701 eggert@twinsun.com says it is safe for IEEE also. */
704 enum rtx_code reversed;
705 /* There are some simplifications we can do even if the operands
710 /* (not (not X)) == X. */
711 if (GET_CODE (op) == NOT)
714 /* (not (eq X Y)) == (ne X Y), etc. */
715 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
716 && ((reversed = reversed_comparison_code (op, NULL_RTX))
718 return gen_rtx_fmt_ee (reversed,
719 op_mode, XEXP (op, 0), XEXP (op, 1));
723 /* (neg (neg X)) == X. */
724 if (GET_CODE (op) == NEG)
729 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
730 becomes just the MINUS if its mode is MODE. This allows
731 folding switch statements on machines using casesi (such as
733 if (GET_CODE (op) == TRUNCATE
734 && GET_MODE (XEXP (op, 0)) == mode
735 && GET_CODE (XEXP (op, 0)) == MINUS
736 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
737 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
740 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
741 if (! POINTERS_EXTEND_UNSIGNED
742 && mode == Pmode && GET_MODE (op) == ptr_mode
744 || (GET_CODE (op) == SUBREG
745 && GET_CODE (SUBREG_REG (op)) == REG
746 && REG_POINTER (SUBREG_REG (op))
747 && GET_MODE (SUBREG_REG (op)) == Pmode)))
748 return convert_memory_address (Pmode, op);
752 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
754 if (POINTERS_EXTEND_UNSIGNED > 0
755 && mode == Pmode && GET_MODE (op) == ptr_mode
757 || (GET_CODE (op) == SUBREG
758 && GET_CODE (SUBREG_REG (op)) == REG
759 && REG_POINTER (SUBREG_REG (op))
760 && GET_MODE (SUBREG_REG (op)) == Pmode)))
761 return convert_memory_address (Pmode, op);
773 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
774 and OP1. Return 0 if no simplification is possible.
776 Don't use this for relational operations such as EQ or LT.
777 Use simplify_relational_operation instead. */
779 simplify_binary_operation (code, mode, op0, op1)
781 enum machine_mode mode;
784 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
786 unsigned int width = GET_MODE_BITSIZE (mode);
788 rtx trueop0 = avoid_constant_pool_reference (op0);
789 rtx trueop1 = avoid_constant_pool_reference (op1);
791 /* Relational operations don't work here. We must know the mode
792 of the operands in order to do the comparison correctly.
793 Assuming a full word can give incorrect results.
794 Consider comparing 128 with -128 in QImode. */
796 if (GET_RTX_CLASS (code) == '<')
799 /* Make sure the constant is second. */
800 if (GET_RTX_CLASS (code) == 'c'
801 && swap_commutative_operands_p (trueop0, trueop1))
803 tem = op0, op0 = op1, op1 = tem;
804 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
807 if (GET_MODE_CLASS (mode) == MODE_FLOAT
808 && GET_CODE (trueop0) == CONST_DOUBLE
809 && GET_CODE (trueop1) == CONST_DOUBLE
810 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
812 REAL_VALUE_TYPE f0, f1, value;
814 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
815 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
816 f0 = real_value_truncate (mode, f0);
817 f1 = real_value_truncate (mode, f1);
820 && !MODE_HAS_INFINITIES (mode)
821 && REAL_VALUES_EQUAL (f1, dconst0))
824 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
826 value = real_value_truncate (mode, value);
827 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
830 /* We can fold some multi-word operations. */
831 if (GET_MODE_CLASS (mode) == MODE_INT
832 && width == HOST_BITS_PER_WIDE_INT * 2
833 && (GET_CODE (trueop0) == CONST_DOUBLE
834 || GET_CODE (trueop0) == CONST_INT)
835 && (GET_CODE (trueop1) == CONST_DOUBLE
836 || GET_CODE (trueop1) == CONST_INT))
838 unsigned HOST_WIDE_INT l1, l2, lv;
839 HOST_WIDE_INT h1, h2, hv;
841 if (GET_CODE (trueop0) == CONST_DOUBLE)
842 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
844 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
846 if (GET_CODE (trueop1) == CONST_DOUBLE)
847 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
849 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
854 /* A - B == A + (-B). */
855 neg_double (l2, h2, &lv, &hv);
858 /* .. fall through ... */
861 add_double (l1, h1, l2, h2, &lv, &hv);
865 mul_double (l1, h1, l2, h2, &lv, &hv);
868 case DIV: case MOD: case UDIV: case UMOD:
869 /* We'd need to include tree.h to do this and it doesn't seem worth
874 lv = l1 & l2, hv = h1 & h2;
878 lv = l1 | l2, hv = h1 | h2;
882 lv = l1 ^ l2, hv = h1 ^ h2;
888 && ((unsigned HOST_WIDE_INT) l1
889 < (unsigned HOST_WIDE_INT) l2)))
898 && ((unsigned HOST_WIDE_INT) l1
899 > (unsigned HOST_WIDE_INT) l2)))
906 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
908 && ((unsigned HOST_WIDE_INT) l1
909 < (unsigned HOST_WIDE_INT) l2)))
916 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
918 && ((unsigned HOST_WIDE_INT) l1
919 > (unsigned HOST_WIDE_INT) l2)))
925 case LSHIFTRT: case ASHIFTRT:
927 case ROTATE: case ROTATERT:
928 #ifdef SHIFT_COUNT_TRUNCATED
929 if (SHIFT_COUNT_TRUNCATED)
930 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
933 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
936 if (code == LSHIFTRT || code == ASHIFTRT)
937 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
939 else if (code == ASHIFT)
940 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
941 else if (code == ROTATE)
942 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
943 else /* code == ROTATERT */
944 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
951 return immed_double_const (lv, hv, mode);
954 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
955 || width > HOST_BITS_PER_WIDE_INT || width == 0)
957 /* Even if we can't compute a constant result,
958 there are some cases worth simplifying. */
963 /* Maybe simplify x + 0 to x. The two expressions are equivalent
964 when x is NaN, infinite, or finite and nonzero. They aren't
965 when x is -0 and the rounding mode is not towards -infinity,
966 since (-0) + 0 is then 0. */
967 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
970 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
971 transformations are safe even for IEEE. */
972 if (GET_CODE (op0) == NEG)
973 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
974 else if (GET_CODE (op1) == NEG)
975 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
978 if (INTEGRAL_MODE_P (mode)
979 && GET_CODE (op0) == NOT
980 && trueop1 == const1_rtx)
981 return gen_rtx_NEG (mode, XEXP (op0, 0));
983 /* Handle both-operands-constant cases. We can only add
984 CONST_INTs to constants since the sum of relocatable symbols
985 can't be handled by most assemblers. Don't add CONST_INT
986 to CONST_INT since overflow won't be computed properly if wider
987 than HOST_BITS_PER_WIDE_INT. */
989 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
990 && GET_CODE (op1) == CONST_INT)
991 return plus_constant (op0, INTVAL (op1));
992 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
993 && GET_CODE (op0) == CONST_INT)
994 return plus_constant (op1, INTVAL (op0));
996 /* See if this is something like X * C - X or vice versa or
997 if the multiplication is written as a shift. If so, we can
998 distribute and make a new multiply, shift, or maybe just
999 have X (if C is 2 in the example above). But don't make
1000 real multiply if we didn't have one before. */
1002 if (! FLOAT_MODE_P (mode))
1004 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1005 rtx lhs = op0, rhs = op1;
1008 if (GET_CODE (lhs) == NEG)
1009 coeff0 = -1, lhs = XEXP (lhs, 0);
1010 else if (GET_CODE (lhs) == MULT
1011 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1013 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1016 else if (GET_CODE (lhs) == ASHIFT
1017 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1018 && INTVAL (XEXP (lhs, 1)) >= 0
1019 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1021 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1022 lhs = XEXP (lhs, 0);
1025 if (GET_CODE (rhs) == NEG)
1026 coeff1 = -1, rhs = XEXP (rhs, 0);
1027 else if (GET_CODE (rhs) == MULT
1028 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1030 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1033 else if (GET_CODE (rhs) == ASHIFT
1034 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1035 && INTVAL (XEXP (rhs, 1)) >= 0
1036 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1038 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1039 rhs = XEXP (rhs, 0);
1042 if (rtx_equal_p (lhs, rhs))
1044 tem = simplify_gen_binary (MULT, mode, lhs,
1045 GEN_INT (coeff0 + coeff1));
1046 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1050 /* If one of the operands is a PLUS or a MINUS, see if we can
1051 simplify this by the associative law.
1052 Don't use the associative law for floating point.
1053 The inaccuracy makes it nonassociative,
1054 and subtle programs can break if operations are associated. */
1056 if (INTEGRAL_MODE_P (mode)
1057 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1058 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1059 || (GET_CODE (op0) == CONST
1060 && GET_CODE (XEXP (op0, 0)) == PLUS)
1061 || (GET_CODE (op1) == CONST
1062 && GET_CODE (XEXP (op1, 0)) == PLUS))
1063 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1069 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1070 using cc0, in which case we want to leave it as a COMPARE
1071 so we can distinguish it from a register-register-copy.
1073 In IEEE floating point, x-0 is not the same as x. */
1075 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1076 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1077 && trueop1 == CONST0_RTX (mode))
1081 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1082 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1083 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1084 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1086 rtx xop00 = XEXP (op0, 0);
1087 rtx xop10 = XEXP (op1, 0);
1090 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1092 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1093 && GET_MODE (xop00) == GET_MODE (xop10)
1094 && REGNO (xop00) == REGNO (xop10)
1095 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1096 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1103 /* We can't assume x-x is 0 even with non-IEEE floating point,
1104 but since it is zero except in very strange circumstances, we
1105 will treat it as zero with -funsafe-math-optimizations. */
1106 if (rtx_equal_p (trueop0, trueop1)
1107 && ! side_effects_p (op0)
1108 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1109 return CONST0_RTX (mode);
1111 /* Change subtraction from zero into negation. (0 - x) is the
1112 same as -x when x is NaN, infinite, or finite and nonzero.
1113 But if the mode has signed zeros, and does not round towards
1114 -infinity, then 0 - 0 is 0, not -0. */
1115 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1116 return gen_rtx_NEG (mode, op1);
1118 /* (-1 - a) is ~a. */
1119 if (trueop0 == constm1_rtx)
1120 return gen_rtx_NOT (mode, op1);
1122 /* Subtracting 0 has no effect unless the mode has signed zeros
1123 and supports rounding towards -infinity. In such a case,
1125 if (!(HONOR_SIGNED_ZEROS (mode)
1126 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1127 && trueop1 == CONST0_RTX (mode))
1130 /* See if this is something like X * C - X or vice versa or
1131 if the multiplication is written as a shift. If so, we can
1132 distribute and make a new multiply, shift, or maybe just
1133 have X (if C is 2 in the example above). But don't make
1134 real multiply if we didn't have one before. */
1136 if (! FLOAT_MODE_P (mode))
1138 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1139 rtx lhs = op0, rhs = op1;
1142 if (GET_CODE (lhs) == NEG)
1143 coeff0 = -1, lhs = XEXP (lhs, 0);
1144 else if (GET_CODE (lhs) == MULT
1145 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1147 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1150 else if (GET_CODE (lhs) == ASHIFT
1151 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1152 && INTVAL (XEXP (lhs, 1)) >= 0
1153 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1155 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1156 lhs = XEXP (lhs, 0);
1159 if (GET_CODE (rhs) == NEG)
1160 coeff1 = - 1, rhs = XEXP (rhs, 0);
1161 else if (GET_CODE (rhs) == MULT
1162 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1164 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1167 else if (GET_CODE (rhs) == ASHIFT
1168 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1169 && INTVAL (XEXP (rhs, 1)) >= 0
1170 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1172 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1173 rhs = XEXP (rhs, 0);
1176 if (rtx_equal_p (lhs, rhs))
1178 tem = simplify_gen_binary (MULT, mode, lhs,
1179 GEN_INT (coeff0 - coeff1));
1180 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1184 /* (a - (-b)) -> (a + b). True even for IEEE. */
1185 if (GET_CODE (op1) == NEG)
1186 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1188 /* If one of the operands is a PLUS or a MINUS, see if we can
1189 simplify this by the associative law.
1190 Don't use the associative law for floating point.
1191 The inaccuracy makes it nonassociative,
1192 and subtle programs can break if operations are associated. */
1194 if (INTEGRAL_MODE_P (mode)
1195 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1196 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1197 || (GET_CODE (op0) == CONST
1198 && GET_CODE (XEXP (op0, 0)) == PLUS)
1199 || (GET_CODE (op1) == CONST
1200 && GET_CODE (XEXP (op1, 0)) == PLUS))
1201 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1204 /* Don't let a relocatable value get a negative coeff. */
1205 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1206 return simplify_gen_binary (PLUS, mode,
1208 neg_const_int (mode, op1));
1210 /* (x - (x & y)) -> (x & ~y) */
1211 if (GET_CODE (op1) == AND)
1213 if (rtx_equal_p (op0, XEXP (op1, 0)))
1214 return simplify_gen_binary (AND, mode, op0,
1215 gen_rtx_NOT (mode, XEXP (op1, 1)));
1216 if (rtx_equal_p (op0, XEXP (op1, 1)))
1217 return simplify_gen_binary (AND, mode, op0,
1218 gen_rtx_NOT (mode, XEXP (op1, 0)));
1223 if (trueop1 == constm1_rtx)
1225 tem = simplify_unary_operation (NEG, mode, op0, mode);
1227 return tem ? tem : gen_rtx_NEG (mode, op0);
1230 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1231 x is NaN, since x * 0 is then also NaN. Nor is it valid
1232 when the mode has signed zeros, since multiplying a negative
1233 number by 0 will give -0, not 0. */
1234 if (!HONOR_NANS (mode)
1235 && !HONOR_SIGNED_ZEROS (mode)
1236 && trueop1 == CONST0_RTX (mode)
1237 && ! side_effects_p (op0))
1240 /* In IEEE floating point, x*1 is not equivalent to x for
1242 if (!HONOR_SNANS (mode)
1243 && trueop1 == CONST1_RTX (mode))
1246 /* Convert multiply by constant power of two into shift unless
1247 we are still generating RTL. This test is a kludge. */
1248 if (GET_CODE (trueop1) == CONST_INT
1249 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1250 /* If the mode is larger than the host word size, and the
1251 uppermost bit is set, then this isn't a power of two due
1252 to implicit sign extension. */
1253 && (width <= HOST_BITS_PER_WIDE_INT
1254 || val != HOST_BITS_PER_WIDE_INT - 1)
1255 && ! rtx_equal_function_value_matters)
1256 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1258 /* x*2 is x+x and x*(-1) is -x */
1259 if (GET_CODE (trueop1) == CONST_DOUBLE
1260 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1261 && GET_MODE (op0) == mode)
1264 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1266 if (REAL_VALUES_EQUAL (d, dconst2))
1267 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1269 if (REAL_VALUES_EQUAL (d, dconstm1))
1270 return gen_rtx_NEG (mode, op0);
1275 if (trueop1 == const0_rtx)
1277 if (GET_CODE (trueop1) == CONST_INT
1278 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1279 == GET_MODE_MASK (mode)))
1281 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1283 /* A | (~A) -> -1 */
1284 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1285 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1286 && ! side_effects_p (op0)
1287 && GET_MODE_CLASS (mode) != MODE_CC)
1292 if (trueop1 == const0_rtx)
1294 if (GET_CODE (trueop1) == CONST_INT
1295 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1296 == GET_MODE_MASK (mode)))
1297 return gen_rtx_NOT (mode, op0);
1298 if (trueop0 == trueop1 && ! side_effects_p (op0)
1299 && GET_MODE_CLASS (mode) != MODE_CC)
1304 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1306 if (GET_CODE (trueop1) == CONST_INT
1307 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1308 == GET_MODE_MASK (mode)))
1310 if (trueop0 == trueop1 && ! side_effects_p (op0)
1311 && GET_MODE_CLASS (mode) != MODE_CC)
1314 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1315 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1316 && ! side_effects_p (op0)
1317 && GET_MODE_CLASS (mode) != MODE_CC)
1322 /* Convert divide by power of two into shift (divide by 1 handled
1324 if (GET_CODE (trueop1) == CONST_INT
1325 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1326 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1328 /* ... fall through ... */
1331 if (trueop1 == CONST1_RTX (mode))
1333 /* On some platforms DIV uses narrower mode than its
1335 rtx x = gen_lowpart_common (mode, op0);
1338 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1339 return gen_lowpart_SUBREG (mode, op0);
1344 /* Maybe change 0 / x to 0. This transformation isn't safe for
1345 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1346 Nor is it safe for modes with signed zeros, since dividing
1347 0 by a negative number gives -0, not 0. */
1348 if (!HONOR_NANS (mode)
1349 && !HONOR_SIGNED_ZEROS (mode)
1350 && trueop0 == CONST0_RTX (mode)
1351 && ! side_effects_p (op1))
1354 /* Change division by a constant into multiplication. Only do
1355 this with -funsafe-math-optimizations. */
1356 else if (GET_CODE (trueop1) == CONST_DOUBLE
1357 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1358 && trueop1 != CONST0_RTX (mode)
1359 && flag_unsafe_math_optimizations)
1362 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1364 if (! REAL_VALUES_EQUAL (d, dconst0))
1366 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1367 return gen_rtx_MULT (mode, op0,
1368 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1374 /* Handle modulus by power of two (mod with 1 handled below). */
1375 if (GET_CODE (trueop1) == CONST_INT
1376 && exact_log2 (INTVAL (trueop1)) > 0)
1377 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1379 /* ... fall through ... */
1382 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1383 && ! side_effects_p (op0) && ! side_effects_p (op1))
1390 /* Rotating ~0 always results in ~0. */
1391 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1392 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1393 && ! side_effects_p (op1))
1396 /* ... fall through ... */
1400 if (trueop1 == const0_rtx)
1402 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1407 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1408 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1409 && ! side_effects_p (op0))
1411 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1416 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1417 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1418 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1419 && ! side_effects_p (op0))
1421 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1426 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1428 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1433 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1435 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1443 /* ??? There are simplifications that can be done. */
1453 /* Get the integer argument values in two forms:
1454 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1456 arg0 = INTVAL (trueop0);
1457 arg1 = INTVAL (trueop1);
1459 if (width < HOST_BITS_PER_WIDE_INT)
1461 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1462 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1465 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1466 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1469 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1470 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1478 /* Compute the value of the arithmetic. */
1483 val = arg0s + arg1s;
1487 val = arg0s - arg1s;
1491 val = arg0s * arg1s;
1496 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1499 val = arg0s / arg1s;
1504 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1507 val = arg0s % arg1s;
1512 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1515 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1520 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1523 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1539 /* If shift count is undefined, don't fold it; let the machine do
1540 what it wants. But truncate it if the machine will do that. */
1544 #ifdef SHIFT_COUNT_TRUNCATED
1545 if (SHIFT_COUNT_TRUNCATED)
1549 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1556 #ifdef SHIFT_COUNT_TRUNCATED
1557 if (SHIFT_COUNT_TRUNCATED)
1561 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1568 #ifdef SHIFT_COUNT_TRUNCATED
1569 if (SHIFT_COUNT_TRUNCATED)
1573 val = arg0s >> arg1;
1575 /* Bootstrap compiler may not have sign extended the right shift.
1576 Manually extend the sign to insure bootstrap cc matches gcc. */
1577 if (arg0s < 0 && arg1 > 0)
1578 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1587 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1588 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1596 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1597 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1601 /* Do nothing here. */
1605 val = arg0s <= arg1s ? arg0s : arg1s;
1609 val = ((unsigned HOST_WIDE_INT) arg0
1610 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1614 val = arg0s > arg1s ? arg0s : arg1s;
1618 val = ((unsigned HOST_WIDE_INT) arg0
1619 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1626 val = trunc_int_for_mode (val, mode);
1628 return GEN_INT (val);
1631 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1634 Rather than test for specific case, we do this by a brute-force method
1635 and do all possible simplifications until no more changes occur. Then
1636 we rebuild the operation.
1638 If FORCE is true, then always generate the rtx. This is used to
1639 canonicalize stuff emitted from simplify_gen_binary. Note that this
1640 can still fail if the rtx is too complex. It won't fail just because
1641 the result is not 'simpler' than the input, however. */
1643 struct simplify_plus_minus_op_data
1650 simplify_plus_minus_op_data_cmp (p1, p2)
1654 const struct simplify_plus_minus_op_data *d1 = p1;
1655 const struct simplify_plus_minus_op_data *d2 = p2;
1657 return (commutative_operand_precedence (d2->op)
1658 - commutative_operand_precedence (d1->op));
1662 simplify_plus_minus (code, mode, op0, op1, force)
1664 enum machine_mode mode;
1668 struct simplify_plus_minus_op_data ops[8];
1670 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1671 int first, negate, changed;
1674 memset ((char *) ops, 0, sizeof ops);
1676 /* Set up the two operands and then expand them until nothing has been
1677 changed. If we run out of room in our array, give up; this should
1678 almost never happen. */
1683 ops[1].neg = (code == MINUS);
1689 for (i = 0; i < n_ops; i++)
1691 rtx this_op = ops[i].op;
1692 int this_neg = ops[i].neg;
1693 enum rtx_code this_code = GET_CODE (this_op);
1702 ops[n_ops].op = XEXP (this_op, 1);
1703 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1706 ops[i].op = XEXP (this_op, 0);
1712 ops[i].op = XEXP (this_op, 0);
1713 ops[i].neg = ! this_neg;
1719 && GET_CODE (XEXP (this_op, 0)) == PLUS
1720 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1721 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1723 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1724 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1725 ops[n_ops].neg = this_neg;
1733 /* ~a -> (-a - 1) */
1736 ops[n_ops].op = constm1_rtx;
1737 ops[n_ops++].neg = this_neg;
1738 ops[i].op = XEXP (this_op, 0);
1739 ops[i].neg = !this_neg;
1747 ops[i].op = neg_const_int (mode, this_op);
1760 /* If we only have two operands, we can't do anything. */
1761 if (n_ops <= 2 && !force)
1764 /* Count the number of CONSTs we didn't split above. */
1765 for (i = 0; i < n_ops; i++)
1766 if (GET_CODE (ops[i].op) == CONST)
1769 /* Now simplify each pair of operands until nothing changes. The first
1770 time through just simplify constants against each other. */
1777 for (i = 0; i < n_ops - 1; i++)
1778 for (j = i + 1; j < n_ops; j++)
1780 rtx lhs = ops[i].op, rhs = ops[j].op;
1781 int lneg = ops[i].neg, rneg = ops[j].neg;
1783 if (lhs != 0 && rhs != 0
1784 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1786 enum rtx_code ncode = PLUS;
1792 tem = lhs, lhs = rhs, rhs = tem;
1794 else if (swap_commutative_operands_p (lhs, rhs))
1795 tem = lhs, lhs = rhs, rhs = tem;
1797 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1799 /* Reject "simplifications" that just wrap the two
1800 arguments in a CONST. Failure to do so can result
1801 in infinite recursion with simplify_binary_operation
1802 when it calls us to simplify CONST operations. */
1804 && ! (GET_CODE (tem) == CONST
1805 && GET_CODE (XEXP (tem, 0)) == ncode
1806 && XEXP (XEXP (tem, 0), 0) == lhs
1807 && XEXP (XEXP (tem, 0), 1) == rhs)
1808 /* Don't allow -x + -1 -> ~x simplifications in the
1809 first pass. This allows us the chance to combine
1810 the -1 with other constants. */
1812 && GET_CODE (tem) == NOT
1813 && XEXP (tem, 0) == rhs))
1816 if (GET_CODE (tem) == NEG)
1817 tem = XEXP (tem, 0), lneg = !lneg;
1818 if (GET_CODE (tem) == CONST_INT && lneg)
1819 tem = neg_const_int (mode, tem), lneg = 0;
1823 ops[j].op = NULL_RTX;
1833 /* Pack all the operands to the lower-numbered entries. */
1834 for (i = 0, j = 0; j < n_ops; j++)
1839 /* Sort the operations based on swap_commutative_operands_p. */
1840 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1842 /* We suppressed creation of trivial CONST expressions in the
1843 combination loop to avoid recursion. Create one manually now.
1844 The combination loop should have ensured that there is exactly
1845 one CONST_INT, and the sort will have ensured that it is last
1846 in the array and that any other constant will be next-to-last. */
1849 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1850 && CONSTANT_P (ops[n_ops - 2].op))
1852 rtx value = ops[n_ops - 1].op;
1853 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1854 value = neg_const_int (mode, value);
1855 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1859 /* Count the number of CONSTs that we generated. */
1861 for (i = 0; i < n_ops; i++)
1862 if (GET_CODE (ops[i].op) == CONST)
1865 /* Give up if we didn't reduce the number of operands we had. Make
1866 sure we count a CONST as two operands. If we have the same
1867 number of operands, but have made more CONSTs than before, this
1868 is also an improvement, so accept it. */
1870 && (n_ops + n_consts > input_ops
1871 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1874 /* Put a non-negated operand first. If there aren't any, make all
1875 operands positive and negate the whole thing later. */
1878 for (i = 0; i < n_ops && ops[i].neg; i++)
1882 for (i = 0; i < n_ops; i++)
1894 /* Now make the result by performing the requested operations. */
1896 for (i = 1; i < n_ops; i++)
1897 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1898 mode, result, ops[i].op);
1900 return negate ? gen_rtx_NEG (mode, result) : result;
1903 /* Like simplify_binary_operation except used for relational operators.
1904 MODE is the mode of the operands, not that of the result. If MODE
1905 is VOIDmode, both operands must also be VOIDmode and we compare the
1906 operands in "infinite precision".
1908 If no simplification is possible, this function returns zero. Otherwise,
1909 it returns either const_true_rtx or const0_rtx. */
1912 simplify_relational_operation (code, mode, op0, op1)
1914 enum machine_mode mode;
1917 int equal, op0lt, op0ltu, op1lt, op1ltu;
1922 if (mode == VOIDmode
1923 && (GET_MODE (op0) != VOIDmode
1924 || GET_MODE (op1) != VOIDmode))
1927 /* If op0 is a compare, extract the comparison arguments from it. */
1928 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1929 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1931 trueop0 = avoid_constant_pool_reference (op0);
1932 trueop1 = avoid_constant_pool_reference (op1);
1934 /* We can't simplify MODE_CC values since we don't know what the
1935 actual comparison is. */
1936 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1943 /* Make sure the constant is second. */
1944 if (swap_commutative_operands_p (trueop0, trueop1))
1946 tem = op0, op0 = op1, op1 = tem;
1947 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1948 code = swap_condition (code);
1951 /* For integer comparisons of A and B maybe we can simplify A - B and can
1952 then simplify a comparison of that with zero. If A and B are both either
1953 a register or a CONST_INT, this can't help; testing for these cases will
1954 prevent infinite recursion here and speed things up.
1956 If CODE is an unsigned comparison, then we can never do this optimization,
1957 because it gives an incorrect result if the subtraction wraps around zero.
1958 ANSI C defines unsigned operations such that they never overflow, and
1959 thus such cases can not be ignored. */
1961 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1962 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1963 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1964 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1965 && code != GTU && code != GEU && code != LTU && code != LEU)
1966 return simplify_relational_operation (signed_condition (code),
1967 mode, tem, const0_rtx);
1969 if (flag_unsafe_math_optimizations && code == ORDERED)
1970 return const_true_rtx;
1972 if (flag_unsafe_math_optimizations && code == UNORDERED)
1975 /* For modes without NaNs, if the two operands are equal, we know the
1977 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
1978 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1980 /* If the operands are floating-point constants, see if we can fold
1982 else if (GET_CODE (trueop0) == CONST_DOUBLE
1983 && GET_CODE (trueop1) == CONST_DOUBLE
1984 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1986 REAL_VALUE_TYPE d0, d1;
1988 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
1989 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
1991 /* Comparisons are unordered iff at least one of the values is NaN. */
1992 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2002 return const_true_rtx;
2015 equal = REAL_VALUES_EQUAL (d0, d1);
2016 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2017 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2020 /* Otherwise, see if the operands are both integers. */
2021 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2022 && (GET_CODE (trueop0) == CONST_DOUBLE
2023 || GET_CODE (trueop0) == CONST_INT)
2024 && (GET_CODE (trueop1) == CONST_DOUBLE
2025 || GET_CODE (trueop1) == CONST_INT))
2027 int width = GET_MODE_BITSIZE (mode);
2028 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2029 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2031 /* Get the two words comprising each integer constant. */
2032 if (GET_CODE (trueop0) == CONST_DOUBLE)
2034 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2035 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2039 l0u = l0s = INTVAL (trueop0);
2040 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2043 if (GET_CODE (trueop1) == CONST_DOUBLE)
2045 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2046 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2050 l1u = l1s = INTVAL (trueop1);
2051 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2054 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2055 we have to sign or zero-extend the values. */
2056 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2058 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2059 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2061 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2062 l0s |= ((HOST_WIDE_INT) (-1) << width);
2064 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2065 l1s |= ((HOST_WIDE_INT) (-1) << width);
2067 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2068 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2070 equal = (h0u == h1u && l0u == l1u);
2071 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2072 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2073 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2074 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2077 /* Otherwise, there are some code-specific tests we can make. */
2083 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2088 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2089 return const_true_rtx;
2093 /* Unsigned values are never negative. */
2094 if (trueop1 == const0_rtx)
2095 return const_true_rtx;
2099 if (trueop1 == const0_rtx)
2104 /* Unsigned values are never greater than the largest
2106 if (GET_CODE (trueop1) == CONST_INT
2107 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2108 && INTEGRAL_MODE_P (mode))
2109 return const_true_rtx;
2113 if (GET_CODE (trueop1) == CONST_INT
2114 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2115 && INTEGRAL_MODE_P (mode))
2120 /* Optimize abs(x) < 0.0. */
2121 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2123 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2125 if (GET_CODE (tem) == ABS)
2131 /* Optimize abs(x) >= 0.0. */
2132 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2134 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2136 if (GET_CODE (tem) == ABS)
2148 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2154 return equal ? const_true_rtx : const0_rtx;
2157 return ! equal ? const_true_rtx : const0_rtx;
2160 return op0lt ? const_true_rtx : const0_rtx;
2163 return op1lt ? const_true_rtx : const0_rtx;
2165 return op0ltu ? const_true_rtx : const0_rtx;
2167 return op1ltu ? const_true_rtx : const0_rtx;
2170 return equal || op0lt ? const_true_rtx : const0_rtx;
2173 return equal || op1lt ? const_true_rtx : const0_rtx;
2175 return equal || op0ltu ? const_true_rtx : const0_rtx;
2177 return equal || op1ltu ? const_true_rtx : const0_rtx;
2179 return const_true_rtx;
2187 /* Simplify CODE, an operation with result mode MODE and three operands,
2188 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2189 a constant. Return 0 if no simplifications is possible. */
2192 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2194 enum machine_mode mode, op0_mode;
2197 unsigned int width = GET_MODE_BITSIZE (mode);
2199 /* VOIDmode means "infinite" precision. */
2201 width = HOST_BITS_PER_WIDE_INT;
2207 if (GET_CODE (op0) == CONST_INT
2208 && GET_CODE (op1) == CONST_INT
2209 && GET_CODE (op2) == CONST_INT
2210 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2211 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2213 /* Extracting a bit-field from a constant */
2214 HOST_WIDE_INT val = INTVAL (op0);
2216 if (BITS_BIG_ENDIAN)
2217 val >>= (GET_MODE_BITSIZE (op0_mode)
2218 - INTVAL (op2) - INTVAL (op1));
2220 val >>= INTVAL (op2);
2222 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2224 /* First zero-extend. */
2225 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2226 /* If desired, propagate sign bit. */
2227 if (code == SIGN_EXTRACT
2228 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2229 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2232 /* Clear the bits that don't belong in our mode,
2233 unless they and our sign bit are all one.
2234 So we get either a reasonable negative value or a reasonable
2235 unsigned value for this mode. */
2236 if (width < HOST_BITS_PER_WIDE_INT
2237 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2238 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2239 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2241 return GEN_INT (val);
2246 if (GET_CODE (op0) == CONST_INT)
2247 return op0 != const0_rtx ? op1 : op2;
2249 /* Convert a == b ? b : a to "a". */
2250 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2251 && !HONOR_NANS (mode)
2252 && rtx_equal_p (XEXP (op0, 0), op1)
2253 && rtx_equal_p (XEXP (op0, 1), op2))
2255 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2256 && !HONOR_NANS (mode)
2257 && rtx_equal_p (XEXP (op0, 1), op1)
2258 && rtx_equal_p (XEXP (op0, 0), op2))
2260 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2262 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2263 ? GET_MODE (XEXP (op0, 1))
2264 : GET_MODE (XEXP (op0, 0)));
2266 if (cmp_mode == VOIDmode)
2267 cmp_mode = op0_mode;
2268 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2269 XEXP (op0, 0), XEXP (op0, 1));
2271 /* See if any simplifications were possible. */
2272 if (temp == const0_rtx)
2274 else if (temp == const1_rtx)
2279 /* Look for happy constants in op1 and op2. */
2280 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2282 HOST_WIDE_INT t = INTVAL (op1);
2283 HOST_WIDE_INT f = INTVAL (op2);
2285 if (t == STORE_FLAG_VALUE && f == 0)
2286 code = GET_CODE (op0);
2287 else if (t == 0 && f == STORE_FLAG_VALUE)
2290 tmp = reversed_comparison_code (op0, NULL_RTX);
2298 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2310 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2311 Return 0 if no simplifications is possible. */
2313 simplify_subreg (outermode, op, innermode, byte)
2316 enum machine_mode outermode, innermode;
2318 /* Little bit of sanity checking. */
2319 if (innermode == VOIDmode || outermode == VOIDmode
2320 || innermode == BLKmode || outermode == BLKmode)
2323 if (GET_MODE (op) != innermode
2324 && GET_MODE (op) != VOIDmode)
2327 if (byte % GET_MODE_SIZE (outermode)
2328 || byte >= GET_MODE_SIZE (innermode))
2331 if (outermode == innermode && !byte)
2334 /* Simplify subregs of vector constants. */
2335 if (GET_CODE (op) == CONST_VECTOR)
2337 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2338 const unsigned int offset = byte / elt_size;
2341 if (GET_MODE_INNER (innermode) == outermode)
2343 elt = CONST_VECTOR_ELT (op, offset);
2345 /* ?? We probably don't need this copy_rtx because constants
2346 can be shared. ?? */
2348 return copy_rtx (elt);
2350 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2351 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2353 return (gen_rtx_CONST_VECTOR
2355 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2356 &CONST_VECTOR_ELT (op, offset))));
2358 else if (GET_MODE_CLASS (outermode) == MODE_INT
2359 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2361 /* This happens when the target register size is smaller then
2362 the vector mode, and we synthesize operations with vectors
2363 of elements that are smaller than the register size. */
2364 HOST_WIDE_INT sum = 0, high = 0;
2365 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2366 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2367 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2368 int shift = BITS_PER_UNIT * elt_size;
2370 for (; n_elts--; i += step)
2372 elt = CONST_VECTOR_ELT (op, i);
2373 if (GET_CODE (elt) == CONST_DOUBLE
2374 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2376 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2381 if (GET_CODE (elt) != CONST_INT)
2383 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2384 sum = (sum << shift) + INTVAL (elt);
2386 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2387 return GEN_INT (trunc_int_for_mode (sum, outermode));
2388 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2389 return immed_double_const (high, sum, outermode);
2393 else if (GET_MODE_CLASS (outermode) == MODE_INT
2394 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2396 enum machine_mode new_mode
2397 = int_mode_for_mode (GET_MODE_INNER (innermode));
2398 int subbyte = byte % elt_size;
2400 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2403 return simplify_subreg (outermode, op, new_mode, subbyte);
2405 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2406 /* This shouldn't happen, but let's not do anything stupid. */
2410 /* Attempt to simplify constant to non-SUBREG expression. */
2411 if (CONSTANT_P (op))
2414 unsigned HOST_WIDE_INT val = 0;
2416 if (GET_MODE_CLASS (outermode) == MODE_VECTOR_INT
2417 || GET_MODE_CLASS (outermode) == MODE_VECTOR_FLOAT)
2419 /* Construct a CONST_VECTOR from individual subregs. */
2420 enum machine_mode submode = GET_MODE_INNER (outermode);
2421 int subsize = GET_MODE_UNIT_SIZE (outermode);
2422 int i, elts = GET_MODE_NUNITS (outermode);
2423 rtvec v = rtvec_alloc (elts);
2426 for (i = 0; i < elts; i++, byte += subsize)
2428 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2429 /* ??? It would be nice if we could actually make such subregs
2430 on targets that allow such relocations. */
2431 if (byte >= GET_MODE_UNIT_SIZE (innermode))
2432 elt = CONST0_RTX (submode);
2434 elt = simplify_subreg (submode, op, innermode, byte);
2437 RTVEC_ELT (v, i) = elt;
2439 return gen_rtx_CONST_VECTOR (outermode, v);
2442 /* ??? This code is partly redundant with code below, but can handle
2443 the subregs of floats and similar corner cases.
2444 Later it we should move all simplification code here and rewrite
2445 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2446 using SIMPLIFY_SUBREG. */
2447 if (subreg_lowpart_offset (outermode, innermode) == byte
2448 && GET_CODE (op) != CONST_VECTOR)
2450 rtx new = gen_lowpart_if_possible (outermode, op);
2455 /* Similar comment as above apply here. */
2456 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2457 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2458 && GET_MODE_CLASS (outermode) == MODE_INT)
2460 rtx new = constant_subword (op,
2461 (byte / UNITS_PER_WORD),
2467 if (GET_MODE_CLASS (outermode) != MODE_INT
2468 && GET_MODE_CLASS (outermode) != MODE_CC)
2470 enum machine_mode new_mode = int_mode_for_mode (outermode);
2472 if (new_mode != innermode || byte != 0)
2474 op = simplify_subreg (new_mode, op, innermode, byte);
2477 return simplify_subreg (outermode, op, new_mode, 0);
2481 offset = byte * BITS_PER_UNIT;
2482 switch (GET_CODE (op))
2485 if (GET_MODE (op) != VOIDmode)
2488 /* We can't handle this case yet. */
2489 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2492 part = offset >= HOST_BITS_PER_WIDE_INT;
2493 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2494 && BYTES_BIG_ENDIAN)
2495 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2496 && WORDS_BIG_ENDIAN))
2498 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2499 offset %= HOST_BITS_PER_WIDE_INT;
2501 /* We've already picked the word we want from a double, so
2502 pretend this is actually an integer. */
2503 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2507 if (GET_CODE (op) == CONST_INT)
2510 /* We don't handle synthesizing of non-integral constants yet. */
2511 if (GET_MODE_CLASS (outermode) != MODE_INT)
2514 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2516 if (WORDS_BIG_ENDIAN)
2517 offset = (GET_MODE_BITSIZE (innermode)
2518 - GET_MODE_BITSIZE (outermode) - offset);
2519 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2520 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2521 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2522 - 2 * (offset % BITS_PER_WORD));
2525 if (offset >= HOST_BITS_PER_WIDE_INT)
2526 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2530 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2531 val = trunc_int_for_mode (val, outermode);
2532 return GEN_INT (val);
2539 /* Changing mode twice with SUBREG => just change it once,
2540 or not at all if changing back op starting mode. */
2541 if (GET_CODE (op) == SUBREG)
2543 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2544 int final_offset = byte + SUBREG_BYTE (op);
2547 if (outermode == innermostmode
2548 && byte == 0 && SUBREG_BYTE (op) == 0)
2549 return SUBREG_REG (op);
2551 /* The SUBREG_BYTE represents offset, as if the value were stored
2552 in memory. Irritating exception is paradoxical subreg, where
2553 we define SUBREG_BYTE to be 0. On big endian machines, this
2554 value should be negative. For a moment, undo this exception. */
2555 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2557 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2558 if (WORDS_BIG_ENDIAN)
2559 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2560 if (BYTES_BIG_ENDIAN)
2561 final_offset += difference % UNITS_PER_WORD;
2563 if (SUBREG_BYTE (op) == 0
2564 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2566 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2567 if (WORDS_BIG_ENDIAN)
2568 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2569 if (BYTES_BIG_ENDIAN)
2570 final_offset += difference % UNITS_PER_WORD;
2573 /* See whether resulting subreg will be paradoxical. */
2574 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2576 /* In nonparadoxical subregs we can't handle negative offsets. */
2577 if (final_offset < 0)
2579 /* Bail out in case resulting subreg would be incorrect. */
2580 if (final_offset % GET_MODE_SIZE (outermode)
2581 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2587 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2589 /* In paradoxical subreg, see if we are still looking on lower part.
2590 If so, our SUBREG_BYTE will be 0. */
2591 if (WORDS_BIG_ENDIAN)
2592 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2593 if (BYTES_BIG_ENDIAN)
2594 offset += difference % UNITS_PER_WORD;
2595 if (offset == final_offset)
2601 /* Recurse for futher possible simplifications. */
2602 new = simplify_subreg (outermode, SUBREG_REG (op),
2603 GET_MODE (SUBREG_REG (op)),
2607 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2610 /* SUBREG of a hard register => just change the register number
2611 and/or mode. If the hard register is not valid in that mode,
2612 suppress this simplification. If the hard register is the stack,
2613 frame, or argument pointer, leave this as a SUBREG. */
2616 && (! REG_FUNCTION_VALUE_P (op)
2617 || ! rtx_equal_function_value_matters)
2618 && REGNO (op) < FIRST_PSEUDO_REGISTER
2619 #ifdef CANNOT_CHANGE_MODE_CLASS
2620 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
2621 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2622 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2624 && ((reload_completed && !frame_pointer_needed)
2625 || (REGNO (op) != FRAME_POINTER_REGNUM
2626 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2627 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2630 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2631 && REGNO (op) != ARG_POINTER_REGNUM
2633 && REGNO (op) != STACK_POINTER_REGNUM)
2635 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2638 /* ??? We do allow it if the current REG is not valid for
2639 its mode. This is a kludge to work around how float/complex
2640 arguments are passed on 32-bit SPARC and should be fixed. */
2641 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2642 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2644 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
2646 /* Propagate original regno. We don't have any way to specify
2647 the offset inside original regno, so do so only for lowpart.
2648 The information is used only by alias analysis that can not
2649 grog partial register anyway. */
2651 if (subreg_lowpart_offset (outermode, innermode) == byte)
2652 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2657 /* If we have a SUBREG of a register that we are replacing and we are
2658 replacing it with a MEM, make a new MEM and try replacing the
2659 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2660 or if we would be widening it. */
2662 if (GET_CODE (op) == MEM
2663 && ! mode_dependent_address_p (XEXP (op, 0))
2664 /* Allow splitting of volatile memory references in case we don't
2665 have instruction to move the whole thing. */
2666 && (! MEM_VOLATILE_P (op)
2667 || ! have_insn_for (SET, innermode))
2668 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2669 return adjust_address_nv (op, outermode, byte);
2671 /* Handle complex values represented as CONCAT
2672 of real and imaginary part. */
2673 if (GET_CODE (op) == CONCAT)
2675 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2676 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2677 unsigned int final_offset;
2680 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2681 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2684 /* We can at least simplify it by referring directly to the relevant part. */
2685 return gen_rtx_SUBREG (outermode, part, final_offset);
2690 /* Make a SUBREG operation or equivalent if it folds. */
2693 simplify_gen_subreg (outermode, op, innermode, byte)
2696 enum machine_mode outermode, innermode;
2699 /* Little bit of sanity checking. */
2700 if (innermode == VOIDmode || outermode == VOIDmode
2701 || innermode == BLKmode || outermode == BLKmode)
2704 if (GET_MODE (op) != innermode
2705 && GET_MODE (op) != VOIDmode)
2708 if (byte % GET_MODE_SIZE (outermode)
2709 || byte >= GET_MODE_SIZE (innermode))
2712 if (GET_CODE (op) == QUEUED)
2715 new = simplify_subreg (outermode, op, innermode, byte);
2719 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2722 return gen_rtx_SUBREG (outermode, op, byte);
2724 /* Simplify X, an rtx expression.
2726 Return the simplified expression or NULL if no simplifications
2729 This is the preferred entry point into the simplification routines;
2730 however, we still allow passes to call the more specific routines.
2732 Right now GCC has three (yes, three) major bodies of RTL simplification
2733 code that need to be unified.
2735 1. fold_rtx in cse.c. This code uses various CSE specific
2736 information to aid in RTL simplification.
2738 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2739 it uses combine specific information to aid in RTL
2742 3. The routines in this file.
2745 Long term we want to only have one body of simplification code; to
2746 get to that state I recommend the following steps:
2748 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2749 which are not pass dependent state into these routines.
2751 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2752 use this routine whenever possible.
2754 3. Allow for pass dependent state to be provided to these
2755 routines and add simplifications based on the pass dependent
2756 state. Remove code from cse.c & combine.c that becomes
2759 It will take time, but ultimately the compiler will be easier to
2760 maintain and improve. It's totally silly that when we add a
2761 simplification that it needs to be added to 4 places (3 for RTL
2762 simplification and 1 for tree simplification. */
2768 enum rtx_code code = GET_CODE (x);
2769 enum machine_mode mode = GET_MODE (x);
2771 switch (GET_RTX_CLASS (code))
2774 return simplify_unary_operation (code, mode,
2775 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2777 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2782 XEXP (x, 0) = XEXP (x, 1);
2784 return simplify_binary_operation (code, mode,
2785 XEXP (x, 0), XEXP (x, 1));
2789 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2793 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2794 XEXP (x, 0), XEXP (x, 1),
2798 return simplify_relational_operation (code,
2799 ((GET_MODE (XEXP (x, 0))
2801 ? GET_MODE (XEXP (x, 0))
2802 : GET_MODE (XEXP (x, 1))),
2803 XEXP (x, 0), XEXP (x, 1));
2806 return simplify_gen_subreg (mode, SUBREG_REG (x),
2807 GET_MODE (SUBREG_REG (x)),
2809 if (code == CONSTANT_P_RTX)
2811 if (CONSTANT_P (XEXP (x,0)))