1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 static bool associative_constant_p (rtx);
57 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
63 neg_const_int (enum machine_mode mode, rtx i)
65 return gen_int_mode (- INTVAL (i), mode);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
73 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code) == 'c'
80 && swap_commutative_operands_p (op0, op1))
81 tem = op0, op0 = op1, op1 = tem;
83 /* If this simplifies, do it. */
84 tem = simplify_binary_operation (code, mode, op0, op1);
88 /* Handle addition and subtraction specially. Otherwise, just form
91 if (code == PLUS || code == MINUS)
93 tem = simplify_plus_minus (code, mode, op0, op1, 1);
98 return gen_rtx_fmt_ee (code, mode, op0, op1);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x)
107 enum machine_mode cmode;
109 switch (GET_CODE (x))
115 /* Handle float extensions of constant pool references. */
117 c = avoid_constant_pool_reference (tmp);
118 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
122 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr = (*targetm.delegitimize_address) (addr);
136 if (GET_CODE (addr) == LO_SUM)
137 addr = XEXP (addr, 1);
139 if (GET_CODE (addr) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr))
143 c = get_pool_constant (addr);
144 cmode = get_pool_mode (addr);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode != GET_MODE (x))
151 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
163 enum machine_mode op_mode)
167 /* If this simplifies, use it. */
168 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
171 return gen_rtx_fmt_e (code, mode, op);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
178 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
182 /* If this simplifies, use it. */
183 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
187 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
196 enum machine_mode cmp_mode, rtx op0, rtx op1)
200 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
203 /* For the following tests, ensure const0_rtx is op1. */
204 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
205 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
207 /* If op0 is a compare, extract the comparison arguments from it. */
208 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
209 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
211 /* If op0 is a comparison, extract the comparison arguments form it. */
212 if (code == NE && op1 == const0_rtx
213 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
215 else if (code == EQ && op1 == const0_rtx)
217 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
218 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
228 /* Put complex operands first and constants second. */
229 if (swap_commutative_operands_p (op0, op1))
230 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
232 return gen_rtx_fmt_ee (code, mode, op0, op1);
235 /* Replace all occurrences of OLD in X with NEW and try to simplify the
236 resulting RTX. Return a new RTX which is as simplified as possible. */
239 simplify_replace_rtx (rtx x, rtx old, rtx new)
241 enum rtx_code code = GET_CODE (x);
242 enum machine_mode mode = GET_MODE (x);
244 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
245 to build a new expression substituting recursively. If we can't do
246 anything, return our input. */
251 switch (GET_RTX_CLASS (code))
255 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
256 rtx op = (XEXP (x, 0) == old
257 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
259 return simplify_gen_unary (code, mode, op, op_mode);
265 simplify_gen_binary (code, mode,
266 simplify_replace_rtx (XEXP (x, 0), old, new),
267 simplify_replace_rtx (XEXP (x, 1), old, new));
270 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
271 ? GET_MODE (XEXP (x, 0))
272 : GET_MODE (XEXP (x, 1)));
273 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
274 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
275 rtx temp = simplify_gen_relational (code, mode,
278 : GET_MODE (op0) != VOIDmode
282 #ifdef FLOAT_STORE_FLAG_VALUE
283 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
285 if (temp == const0_rtx)
286 temp = CONST0_RTX (mode);
287 else if (temp == const_true_rtx)
288 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
298 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
299 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
302 simplify_gen_ternary (code, mode,
307 simplify_replace_rtx (XEXP (x, 1), old, new),
308 simplify_replace_rtx (XEXP (x, 2), old, new));
312 /* The only case we try to handle is a SUBREG. */
316 exp = simplify_gen_subreg (GET_MODE (x),
317 simplify_replace_rtx (SUBREG_REG (x),
319 GET_MODE (SUBREG_REG (x)),
328 return replace_equiv_address_nv (x,
329 simplify_replace_rtx (XEXP (x, 0),
331 else if (code == LO_SUM)
333 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
334 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
336 /* (lo_sum (high x) x) -> x */
337 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
340 return gen_rtx_LO_SUM (mode, op0, op1);
342 else if (code == REG)
344 if (REG_P (old) && REGNO (x) == REGNO (old))
356 /* Try to simplify a unary operation CODE whose output mode is to be
357 MODE with input operand OP whose mode was originally OP_MODE.
358 Return zero if no simplification can be made. */
360 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
361 rtx op, enum machine_mode op_mode)
363 unsigned int width = GET_MODE_BITSIZE (mode);
364 rtx trueop = avoid_constant_pool_reference (op);
366 if (code == VEC_DUPLICATE)
368 if (!VECTOR_MODE_P (mode))
370 if (GET_MODE (trueop) != VOIDmode
371 && !VECTOR_MODE_P (GET_MODE (trueop))
372 && GET_MODE_INNER (mode) != GET_MODE (trueop))
374 if (GET_MODE (trueop) != VOIDmode
375 && VECTOR_MODE_P (GET_MODE (trueop))
376 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
378 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
379 || GET_CODE (trueop) == CONST_VECTOR)
381 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
382 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
383 rtvec v = rtvec_alloc (n_elts);
386 if (GET_CODE (trueop) != CONST_VECTOR)
387 for (i = 0; i < n_elts; i++)
388 RTVEC_ELT (v, i) = trueop;
391 enum machine_mode inmode = GET_MODE (trueop);
392 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
393 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
395 if (in_n_elts >= n_elts || n_elts % in_n_elts)
397 for (i = 0; i < n_elts; i++)
398 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
400 return gen_rtx_CONST_VECTOR (mode, v);
404 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
406 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
407 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
408 enum machine_mode opmode = GET_MODE (trueop);
409 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
410 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
411 rtvec v = rtvec_alloc (n_elts);
414 if (op_n_elts != n_elts)
417 for (i = 0; i < n_elts; i++)
419 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
420 CONST_VECTOR_ELT (trueop, i),
421 GET_MODE_INNER (opmode));
424 RTVEC_ELT (v, i) = x;
426 return gen_rtx_CONST_VECTOR (mode, v);
429 /* The order of these tests is critical so that, for example, we don't
430 check the wrong mode (input vs. output) for a conversion operation,
431 such as FIX. At some point, this should be simplified. */
433 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
434 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
436 HOST_WIDE_INT hv, lv;
439 if (GET_CODE (trueop) == CONST_INT)
440 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
442 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
444 REAL_VALUE_FROM_INT (d, lv, hv, mode);
445 d = real_value_truncate (mode, d);
446 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
448 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
449 && (GET_CODE (trueop) == CONST_DOUBLE
450 || GET_CODE (trueop) == CONST_INT))
452 HOST_WIDE_INT hv, lv;
455 if (GET_CODE (trueop) == CONST_INT)
456 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
458 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
460 if (op_mode == VOIDmode)
462 /* We don't know how to interpret negative-looking numbers in
463 this case, so don't try to fold those. */
467 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
470 hv = 0, lv &= GET_MODE_MASK (op_mode);
472 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
473 d = real_value_truncate (mode, d);
474 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
477 if (GET_CODE (trueop) == CONST_INT
478 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
480 HOST_WIDE_INT arg0 = INTVAL (trueop);
494 val = (arg0 >= 0 ? arg0 : - arg0);
498 /* Don't use ffs here. Instead, get low order bit and then its
499 number. If arg0 is zero, this will return 0, as desired. */
500 arg0 &= GET_MODE_MASK (mode);
501 val = exact_log2 (arg0 & (- arg0)) + 1;
505 arg0 &= GET_MODE_MASK (mode);
506 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
509 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
513 arg0 &= GET_MODE_MASK (mode);
516 /* Even if the value at zero is undefined, we have to come
517 up with some replacement. Seems good enough. */
518 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
519 val = GET_MODE_BITSIZE (mode);
522 val = exact_log2 (arg0 & -arg0);
526 arg0 &= GET_MODE_MASK (mode);
529 val++, arg0 &= arg0 - 1;
533 arg0 &= GET_MODE_MASK (mode);
536 val++, arg0 &= arg0 - 1;
545 /* When zero-extending a CONST_INT, we need to know its
547 if (op_mode == VOIDmode)
549 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
551 /* If we were really extending the mode,
552 we would have to distinguish between zero-extension
553 and sign-extension. */
554 if (width != GET_MODE_BITSIZE (op_mode))
558 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
559 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
565 if (op_mode == VOIDmode)
567 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
569 /* If we were really extending the mode,
570 we would have to distinguish between zero-extension
571 and sign-extension. */
572 if (width != GET_MODE_BITSIZE (op_mode))
576 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
579 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
581 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
582 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
599 val = trunc_int_for_mode (val, mode);
601 return GEN_INT (val);
604 /* We can do some operations on integer CONST_DOUBLEs. Also allow
605 for a DImode operation on a CONST_INT. */
606 else if (GET_MODE (trueop) == VOIDmode
607 && width <= HOST_BITS_PER_WIDE_INT * 2
608 && (GET_CODE (trueop) == CONST_DOUBLE
609 || GET_CODE (trueop) == CONST_INT))
611 unsigned HOST_WIDE_INT l1, lv;
612 HOST_WIDE_INT h1, hv;
614 if (GET_CODE (trueop) == CONST_DOUBLE)
615 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
617 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
627 neg_double (l1, h1, &lv, &hv);
632 neg_double (l1, h1, &lv, &hv);
644 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
647 lv = exact_log2 (l1 & -l1) + 1;
653 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
654 - HOST_BITS_PER_WIDE_INT;
656 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
657 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
658 lv = GET_MODE_BITSIZE (mode);
664 lv = exact_log2 (l1 & -l1);
666 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
667 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
668 lv = GET_MODE_BITSIZE (mode);
691 /* This is just a change-of-mode, so do nothing. */
696 if (op_mode == VOIDmode)
699 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
703 lv = l1 & GET_MODE_MASK (op_mode);
707 if (op_mode == VOIDmode
708 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
712 lv = l1 & GET_MODE_MASK (op_mode);
713 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
714 && (lv & ((HOST_WIDE_INT) 1
715 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
716 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
718 hv = HWI_SIGN_EXTEND (lv);
729 return immed_double_const (lv, hv, mode);
732 else if (GET_CODE (trueop) == CONST_DOUBLE
733 && GET_MODE_CLASS (mode) == MODE_FLOAT)
735 REAL_VALUE_TYPE d, t;
736 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
741 if (HONOR_SNANS (mode) && real_isnan (&d))
743 real_sqrt (&t, mode, &d);
747 d = REAL_VALUE_ABS (d);
750 d = REAL_VALUE_NEGATE (d);
753 d = real_value_truncate (mode, d);
756 /* All this does is change the mode. */
759 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
765 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
768 else if (GET_CODE (trueop) == CONST_DOUBLE
769 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
770 && GET_MODE_CLASS (mode) == MODE_INT
771 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
775 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
778 case FIX: i = REAL_VALUE_FIX (d); break;
779 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
783 return gen_int_mode (i, mode);
786 /* This was formerly used only for non-IEEE float.
787 eggert@twinsun.com says it is safe for IEEE also. */
790 enum rtx_code reversed;
793 /* There are some simplifications we can do even if the operands
798 /* (not (not X)) == X. */
799 if (GET_CODE (op) == NOT)
802 /* (not (eq X Y)) == (ne X Y), etc. */
803 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
804 && ((reversed = reversed_comparison_code (op, NULL_RTX))
806 return simplify_gen_relational (reversed, op_mode, op_mode,
807 XEXP (op, 0), XEXP (op, 1));
809 /* (not (plus X -1)) can become (neg X). */
810 if (GET_CODE (op) == PLUS
811 && XEXP (op, 1) == constm1_rtx)
812 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
814 /* Similarly, (not (neg X)) is (plus X -1). */
815 if (GET_CODE (op) == NEG)
816 return plus_constant (XEXP (op, 0), -1);
818 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
819 if (GET_CODE (op) == XOR
820 && GET_CODE (XEXP (op, 1)) == CONST_INT
821 && (temp = simplify_unary_operation (NOT, mode,
824 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
827 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
828 operands other than 1, but that is not valid. We could do a
829 similar simplification for (not (lshiftrt C X)) where C is
830 just the sign bit, but this doesn't seem common enough to
832 if (GET_CODE (op) == ASHIFT
833 && XEXP (op, 0) == const1_rtx)
835 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
836 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
839 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
840 by reversing the comparison code if valid. */
841 if (STORE_FLAG_VALUE == -1
842 && GET_RTX_CLASS (GET_CODE (op)) == '<'
843 && (reversed = reversed_comparison_code (op, NULL_RTX))
845 return simplify_gen_relational (reversed, op_mode, op_mode,
846 XEXP (op, 0), XEXP (op, 1));
848 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
849 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
850 so we can perform the above simplification. */
852 if (STORE_FLAG_VALUE == -1
853 && GET_CODE (op) == ASHIFTRT
854 && GET_CODE (XEXP (op, 1)) == CONST_INT
855 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
856 return simplify_gen_relational (GE, mode, mode, XEXP (op, 0),
862 /* (neg (neg X)) == X. */
863 if (GET_CODE (op) == NEG)
866 /* (neg (plus X 1)) can become (not X). */
867 if (GET_CODE (op) == PLUS
868 && XEXP (op, 1) == const1_rtx)
869 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
871 /* Similarly, (neg (not X)) is (plus X 1). */
872 if (GET_CODE (op) == NOT)
873 return plus_constant (XEXP (op, 0), 1);
875 /* (neg (minus X Y)) can become (minus Y X). This transformation
876 isn't safe for modes with signed zeros, since if X and Y are
877 both +0, (minus Y X) is the same as (minus X Y). If the
878 rounding mode is towards +infinity (or -infinity) then the two
879 expressions will be rounded differently. */
880 if (GET_CODE (op) == MINUS
881 && !HONOR_SIGNED_ZEROS (mode)
882 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
883 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
886 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
887 if (GET_CODE (op) == PLUS
888 && !HONOR_SIGNED_ZEROS (mode)
889 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
891 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
892 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
895 /* (neg (mult A B)) becomes (mult (neg A) B).
896 This works even for floating-point values. */
897 if (GET_CODE (op) == MULT
898 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
900 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
901 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
904 /* NEG commutes with ASHIFT since it is multiplication. Only do
905 this if we can then eliminate the NEG (e.g., if the operand
907 if (GET_CODE (op) == ASHIFT)
909 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
912 return simplify_gen_binary (ASHIFT, mode, temp,
919 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
920 becomes just the MINUS if its mode is MODE. This allows
921 folding switch statements on machines using casesi (such as
923 if (GET_CODE (op) == TRUNCATE
924 && GET_MODE (XEXP (op, 0)) == mode
925 && GET_CODE (XEXP (op, 0)) == MINUS
926 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
927 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
930 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
931 if (! POINTERS_EXTEND_UNSIGNED
932 && mode == Pmode && GET_MODE (op) == ptr_mode
934 || (GET_CODE (op) == SUBREG
935 && GET_CODE (SUBREG_REG (op)) == REG
936 && REG_POINTER (SUBREG_REG (op))
937 && GET_MODE (SUBREG_REG (op)) == Pmode)))
938 return convert_memory_address (Pmode, op);
942 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
944 if (POINTERS_EXTEND_UNSIGNED > 0
945 && mode == Pmode && GET_MODE (op) == ptr_mode
947 || (GET_CODE (op) == SUBREG
948 && GET_CODE (SUBREG_REG (op)) == REG
949 && REG_POINTER (SUBREG_REG (op))
950 && GET_MODE (SUBREG_REG (op)) == Pmode)))
951 return convert_memory_address (Pmode, op);
963 /* Subroutine of simplify_associative_operation. Return true if rtx OP
964 is a suitable integer or floating point immediate constant. */
966 associative_constant_p (rtx op)
968 if (GET_CODE (op) == CONST_INT
969 || GET_CODE (op) == CONST_DOUBLE)
971 op = avoid_constant_pool_reference (op);
972 return GET_CODE (op) == CONST_INT
973 || GET_CODE (op) == CONST_DOUBLE;
976 /* Subroutine of simplify_binary_operation to simplify an associative
977 binary operation CODE with result mode MODE, operating on OP0 and OP1.
978 Return 0 if no simplification is possible. */
980 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
985 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
986 if (GET_CODE (op0) == code
987 && associative_constant_p (op1)
988 && associative_constant_p (XEXP (op0, 1)))
990 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
993 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
996 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
997 if (GET_CODE (op0) == code
998 && GET_CODE (op1) == code
999 && associative_constant_p (XEXP (op0, 1))
1000 && associative_constant_p (XEXP (op1, 1)))
1002 rtx c = simplify_binary_operation (code, mode,
1003 XEXP (op0, 1), XEXP (op1, 1));
1006 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1007 return simplify_gen_binary (code, mode, tem, c);
1010 /* Canonicalize (x op c) op y as (x op y) op c. */
1011 if (GET_CODE (op0) == code
1012 && associative_constant_p (XEXP (op0, 1)))
1014 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1015 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1018 /* Canonicalize x op (y op c) as (x op y) op c. */
1019 if (GET_CODE (op1) == code
1020 && associative_constant_p (XEXP (op1, 1)))
1022 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1023 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1029 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1030 and OP1. Return 0 if no simplification is possible.
1032 Don't use this for relational operations such as EQ or LT.
1033 Use simplify_relational_operation instead. */
1035 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1038 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1040 unsigned int width = GET_MODE_BITSIZE (mode);
1042 rtx trueop0 = avoid_constant_pool_reference (op0);
1043 rtx trueop1 = avoid_constant_pool_reference (op1);
1045 /* Relational operations don't work here. We must know the mode
1046 of the operands in order to do the comparison correctly.
1047 Assuming a full word can give incorrect results.
1048 Consider comparing 128 with -128 in QImode. */
1050 if (GET_RTX_CLASS (code) == '<')
1053 /* Make sure the constant is second. */
1054 if (GET_RTX_CLASS (code) == 'c'
1055 && swap_commutative_operands_p (trueop0, trueop1))
1057 tem = op0, op0 = op1, op1 = tem;
1058 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1061 if (VECTOR_MODE_P (mode)
1062 && GET_CODE (trueop0) == CONST_VECTOR
1063 && GET_CODE (trueop1) == CONST_VECTOR)
1065 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1066 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1067 enum machine_mode op0mode = GET_MODE (trueop0);
1068 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1069 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1070 enum machine_mode op1mode = GET_MODE (trueop1);
1071 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1072 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1073 rtvec v = rtvec_alloc (n_elts);
1076 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1079 for (i = 0; i < n_elts; i++)
1081 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1082 CONST_VECTOR_ELT (trueop0, i),
1083 CONST_VECTOR_ELT (trueop1, i));
1086 RTVEC_ELT (v, i) = x;
1089 return gen_rtx_CONST_VECTOR (mode, v);
1092 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1093 && GET_CODE (trueop0) == CONST_DOUBLE
1094 && GET_CODE (trueop1) == CONST_DOUBLE
1095 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1097 REAL_VALUE_TYPE f0, f1, value;
1099 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1100 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1101 f0 = real_value_truncate (mode, f0);
1102 f1 = real_value_truncate (mode, f1);
1104 if (HONOR_SNANS (mode)
1105 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1109 && REAL_VALUES_EQUAL (f1, dconst0)
1110 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1113 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1115 value = real_value_truncate (mode, value);
1116 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1119 /* We can fold some multi-word operations. */
1120 if (GET_MODE_CLASS (mode) == MODE_INT
1121 && width == HOST_BITS_PER_WIDE_INT * 2
1122 && (GET_CODE (trueop0) == CONST_DOUBLE
1123 || GET_CODE (trueop0) == CONST_INT)
1124 && (GET_CODE (trueop1) == CONST_DOUBLE
1125 || GET_CODE (trueop1) == CONST_INT))
1127 unsigned HOST_WIDE_INT l1, l2, lv;
1128 HOST_WIDE_INT h1, h2, hv;
1130 if (GET_CODE (trueop0) == CONST_DOUBLE)
1131 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1133 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1135 if (GET_CODE (trueop1) == CONST_DOUBLE)
1136 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1138 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1143 /* A - B == A + (-B). */
1144 neg_double (l2, h2, &lv, &hv);
1147 /* Fall through.... */
1150 add_double (l1, h1, l2, h2, &lv, &hv);
1154 mul_double (l1, h1, l2, h2, &lv, &hv);
1157 case DIV: case MOD: case UDIV: case UMOD:
1158 /* We'd need to include tree.h to do this and it doesn't seem worth
1163 lv = l1 & l2, hv = h1 & h2;
1167 lv = l1 | l2, hv = h1 | h2;
1171 lv = l1 ^ l2, hv = h1 ^ h2;
1177 && ((unsigned HOST_WIDE_INT) l1
1178 < (unsigned HOST_WIDE_INT) l2)))
1187 && ((unsigned HOST_WIDE_INT) l1
1188 > (unsigned HOST_WIDE_INT) l2)))
1195 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1197 && ((unsigned HOST_WIDE_INT) l1
1198 < (unsigned HOST_WIDE_INT) l2)))
1205 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1207 && ((unsigned HOST_WIDE_INT) l1
1208 > (unsigned HOST_WIDE_INT) l2)))
1214 case LSHIFTRT: case ASHIFTRT:
1216 case ROTATE: case ROTATERT:
1217 #ifdef SHIFT_COUNT_TRUNCATED
1218 if (SHIFT_COUNT_TRUNCATED)
1219 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1222 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1225 if (code == LSHIFTRT || code == ASHIFTRT)
1226 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1228 else if (code == ASHIFT)
1229 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1230 else if (code == ROTATE)
1231 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1232 else /* code == ROTATERT */
1233 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1240 return immed_double_const (lv, hv, mode);
1243 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1244 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1246 /* Even if we can't compute a constant result,
1247 there are some cases worth simplifying. */
1252 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1253 when x is NaN, infinite, or finite and nonzero. They aren't
1254 when x is -0 and the rounding mode is not towards -infinity,
1255 since (-0) + 0 is then 0. */
1256 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1259 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1260 transformations are safe even for IEEE. */
1261 if (GET_CODE (op0) == NEG)
1262 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1263 else if (GET_CODE (op1) == NEG)
1264 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1266 /* (~a) + 1 -> -a */
1267 if (INTEGRAL_MODE_P (mode)
1268 && GET_CODE (op0) == NOT
1269 && trueop1 == const1_rtx)
1270 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1272 /* Handle both-operands-constant cases. We can only add
1273 CONST_INTs to constants since the sum of relocatable symbols
1274 can't be handled by most assemblers. Don't add CONST_INT
1275 to CONST_INT since overflow won't be computed properly if wider
1276 than HOST_BITS_PER_WIDE_INT. */
1278 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1279 && GET_CODE (op1) == CONST_INT)
1280 return plus_constant (op0, INTVAL (op1));
1281 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1282 && GET_CODE (op0) == CONST_INT)
1283 return plus_constant (op1, INTVAL (op0));
1285 /* See if this is something like X * C - X or vice versa or
1286 if the multiplication is written as a shift. If so, we can
1287 distribute and make a new multiply, shift, or maybe just
1288 have X (if C is 2 in the example above). But don't make
1289 real multiply if we didn't have one before. */
1291 if (! FLOAT_MODE_P (mode))
1293 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1294 rtx lhs = op0, rhs = op1;
1297 if (GET_CODE (lhs) == NEG)
1298 coeff0 = -1, lhs = XEXP (lhs, 0);
1299 else if (GET_CODE (lhs) == MULT
1300 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1302 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1305 else if (GET_CODE (lhs) == ASHIFT
1306 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1307 && INTVAL (XEXP (lhs, 1)) >= 0
1308 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1310 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1311 lhs = XEXP (lhs, 0);
1314 if (GET_CODE (rhs) == NEG)
1315 coeff1 = -1, rhs = XEXP (rhs, 0);
1316 else if (GET_CODE (rhs) == MULT
1317 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1319 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1322 else if (GET_CODE (rhs) == ASHIFT
1323 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1324 && INTVAL (XEXP (rhs, 1)) >= 0
1325 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1327 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1328 rhs = XEXP (rhs, 0);
1331 if (rtx_equal_p (lhs, rhs))
1333 tem = simplify_gen_binary (MULT, mode, lhs,
1334 GEN_INT (coeff0 + coeff1));
1335 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1339 /* If one of the operands is a PLUS or a MINUS, see if we can
1340 simplify this by the associative law.
1341 Don't use the associative law for floating point.
1342 The inaccuracy makes it nonassociative,
1343 and subtle programs can break if operations are associated. */
1345 if (INTEGRAL_MODE_P (mode)
1346 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1347 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1348 || (GET_CODE (op0) == CONST
1349 && GET_CODE (XEXP (op0, 0)) == PLUS)
1350 || (GET_CODE (op1) == CONST
1351 && GET_CODE (XEXP (op1, 0)) == PLUS))
1352 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1355 /* Reassociate floating point addition only when the user
1356 specifies unsafe math optimizations. */
1357 if (FLOAT_MODE_P (mode)
1358 && flag_unsafe_math_optimizations)
1360 tem = simplify_associative_operation (code, mode, op0, op1);
1368 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1369 using cc0, in which case we want to leave it as a COMPARE
1370 so we can distinguish it from a register-register-copy.
1372 In IEEE floating point, x-0 is not the same as x. */
1374 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1375 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1376 && trueop1 == CONST0_RTX (mode))
1380 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1381 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1382 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1383 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1385 rtx xop00 = XEXP (op0, 0);
1386 rtx xop10 = XEXP (op1, 0);
1389 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1391 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1392 && GET_MODE (xop00) == GET_MODE (xop10)
1393 && REGNO (xop00) == REGNO (xop10)
1394 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1395 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1402 /* We can't assume x-x is 0 even with non-IEEE floating point,
1403 but since it is zero except in very strange circumstances, we
1404 will treat it as zero with -funsafe-math-optimizations. */
1405 if (rtx_equal_p (trueop0, trueop1)
1406 && ! side_effects_p (op0)
1407 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1408 return CONST0_RTX (mode);
1410 /* Change subtraction from zero into negation. (0 - x) is the
1411 same as -x when x is NaN, infinite, or finite and nonzero.
1412 But if the mode has signed zeros, and does not round towards
1413 -infinity, then 0 - 0 is 0, not -0. */
1414 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1415 return simplify_gen_unary (NEG, mode, op1, mode);
1417 /* (-1 - a) is ~a. */
1418 if (trueop0 == constm1_rtx)
1419 return simplify_gen_unary (NOT, mode, op1, mode);
1421 /* Subtracting 0 has no effect unless the mode has signed zeros
1422 and supports rounding towards -infinity. In such a case,
1424 if (!(HONOR_SIGNED_ZEROS (mode)
1425 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1426 && trueop1 == CONST0_RTX (mode))
1429 /* See if this is something like X * C - X or vice versa or
1430 if the multiplication is written as a shift. If so, we can
1431 distribute and make a new multiply, shift, or maybe just
1432 have X (if C is 2 in the example above). But don't make
1433 real multiply if we didn't have one before. */
1435 if (! FLOAT_MODE_P (mode))
1437 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1438 rtx lhs = op0, rhs = op1;
1441 if (GET_CODE (lhs) == NEG)
1442 coeff0 = -1, lhs = XEXP (lhs, 0);
1443 else if (GET_CODE (lhs) == MULT
1444 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1446 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1449 else if (GET_CODE (lhs) == ASHIFT
1450 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1451 && INTVAL (XEXP (lhs, 1)) >= 0
1452 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1454 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1455 lhs = XEXP (lhs, 0);
1458 if (GET_CODE (rhs) == NEG)
1459 coeff1 = - 1, rhs = XEXP (rhs, 0);
1460 else if (GET_CODE (rhs) == MULT
1461 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1463 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1466 else if (GET_CODE (rhs) == ASHIFT
1467 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1468 && INTVAL (XEXP (rhs, 1)) >= 0
1469 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1471 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1472 rhs = XEXP (rhs, 0);
1475 if (rtx_equal_p (lhs, rhs))
1477 tem = simplify_gen_binary (MULT, mode, lhs,
1478 GEN_INT (coeff0 - coeff1));
1479 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1483 /* (a - (-b)) -> (a + b). True even for IEEE. */
1484 if (GET_CODE (op1) == NEG)
1485 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1487 /* If one of the operands is a PLUS or a MINUS, see if we can
1488 simplify this by the associative law.
1489 Don't use the associative law for floating point.
1490 The inaccuracy makes it nonassociative,
1491 and subtle programs can break if operations are associated. */
1493 if (INTEGRAL_MODE_P (mode)
1494 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1495 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1496 || (GET_CODE (op0) == CONST
1497 && GET_CODE (XEXP (op0, 0)) == PLUS)
1498 || (GET_CODE (op1) == CONST
1499 && GET_CODE (XEXP (op1, 0)) == PLUS))
1500 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1503 /* Don't let a relocatable value get a negative coeff. */
1504 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1505 return simplify_gen_binary (PLUS, mode,
1507 neg_const_int (mode, op1));
1509 /* (x - (x & y)) -> (x & ~y) */
1510 if (GET_CODE (op1) == AND)
1512 if (rtx_equal_p (op0, XEXP (op1, 0)))
1514 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1515 GET_MODE (XEXP (op1, 1)));
1516 return simplify_gen_binary (AND, mode, op0, tem);
1518 if (rtx_equal_p (op0, XEXP (op1, 1)))
1520 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1521 GET_MODE (XEXP (op1, 0)));
1522 return simplify_gen_binary (AND, mode, op0, tem);
1528 if (trueop1 == constm1_rtx)
1529 return simplify_gen_unary (NEG, mode, op0, mode);
1531 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1532 x is NaN, since x * 0 is then also NaN. Nor is it valid
1533 when the mode has signed zeros, since multiplying a negative
1534 number by 0 will give -0, not 0. */
1535 if (!HONOR_NANS (mode)
1536 && !HONOR_SIGNED_ZEROS (mode)
1537 && trueop1 == CONST0_RTX (mode)
1538 && ! side_effects_p (op0))
1541 /* In IEEE floating point, x*1 is not equivalent to x for
1543 if (!HONOR_SNANS (mode)
1544 && trueop1 == CONST1_RTX (mode))
1547 /* Convert multiply by constant power of two into shift unless
1548 we are still generating RTL. This test is a kludge. */
1549 if (GET_CODE (trueop1) == CONST_INT
1550 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1551 /* If the mode is larger than the host word size, and the
1552 uppermost bit is set, then this isn't a power of two due
1553 to implicit sign extension. */
1554 && (width <= HOST_BITS_PER_WIDE_INT
1555 || val != HOST_BITS_PER_WIDE_INT - 1)
1556 && ! rtx_equal_function_value_matters)
1557 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1559 /* x*2 is x+x and x*(-1) is -x */
1560 if (GET_CODE (trueop1) == CONST_DOUBLE
1561 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1562 && GET_MODE (op0) == mode)
1565 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1567 if (REAL_VALUES_EQUAL (d, dconst2))
1568 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1570 if (REAL_VALUES_EQUAL (d, dconstm1))
1571 return simplify_gen_unary (NEG, mode, op0, mode);
1574 /* Reassociate multiplication, but for floating point MULTs
1575 only when the user specifies unsafe math optimizations. */
1576 if (! FLOAT_MODE_P (mode)
1577 || flag_unsafe_math_optimizations)
1579 tem = simplify_associative_operation (code, mode, op0, op1);
1586 if (trueop1 == const0_rtx)
1588 if (GET_CODE (trueop1) == CONST_INT
1589 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1590 == GET_MODE_MASK (mode)))
1592 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1594 /* A | (~A) -> -1 */
1595 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1596 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1597 && ! side_effects_p (op0)
1598 && GET_MODE_CLASS (mode) != MODE_CC)
1600 tem = simplify_associative_operation (code, mode, op0, op1);
1606 if (trueop1 == const0_rtx)
1608 if (GET_CODE (trueop1) == CONST_INT
1609 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1610 == GET_MODE_MASK (mode)))
1611 return simplify_gen_unary (NOT, mode, op0, mode);
1612 if (trueop0 == trueop1 && ! side_effects_p (op0)
1613 && GET_MODE_CLASS (mode) != MODE_CC)
1615 tem = simplify_associative_operation (code, mode, op0, op1);
1621 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1623 if (GET_CODE (trueop1) == CONST_INT
1624 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1625 == GET_MODE_MASK (mode)))
1627 if (trueop0 == trueop1 && ! side_effects_p (op0)
1628 && GET_MODE_CLASS (mode) != MODE_CC)
1631 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1632 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1633 && ! side_effects_p (op0)
1634 && GET_MODE_CLASS (mode) != MODE_CC)
1636 tem = simplify_associative_operation (code, mode, op0, op1);
1642 /* Convert divide by power of two into shift (divide by 1 handled
1644 if (GET_CODE (trueop1) == CONST_INT
1645 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1646 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1648 /* Fall through.... */
1651 if (trueop1 == CONST1_RTX (mode))
1653 /* On some platforms DIV uses narrower mode than its
1655 rtx x = gen_lowpart_common (mode, op0);
1658 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1659 return gen_lowpart_SUBREG (mode, op0);
1664 /* Maybe change 0 / x to 0. This transformation isn't safe for
1665 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1666 Nor is it safe for modes with signed zeros, since dividing
1667 0 by a negative number gives -0, not 0. */
1668 if (!HONOR_NANS (mode)
1669 && !HONOR_SIGNED_ZEROS (mode)
1670 && trueop0 == CONST0_RTX (mode)
1671 && ! side_effects_p (op1))
1674 /* Change division by a constant into multiplication. Only do
1675 this with -funsafe-math-optimizations. */
1676 else if (GET_CODE (trueop1) == CONST_DOUBLE
1677 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1678 && trueop1 != CONST0_RTX (mode)
1679 && flag_unsafe_math_optimizations)
1682 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1684 if (! REAL_VALUES_EQUAL (d, dconst0))
1686 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1687 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1688 return simplify_gen_binary (MULT, mode, op0, tem);
1694 /* Handle modulus by power of two (mod with 1 handled below). */
1695 if (GET_CODE (trueop1) == CONST_INT
1696 && exact_log2 (INTVAL (trueop1)) > 0)
1697 return simplify_gen_binary (AND, mode, op0,
1698 GEN_INT (INTVAL (op1) - 1));
1700 /* Fall through.... */
1703 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1704 && ! side_effects_p (op0) && ! side_effects_p (op1))
1711 /* Rotating ~0 always results in ~0. */
1712 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1713 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1714 && ! side_effects_p (op1))
1717 /* Fall through.... */
1721 if (trueop1 == const0_rtx)
1723 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1728 if (width <= HOST_BITS_PER_WIDE_INT
1729 && GET_CODE (trueop1) == CONST_INT
1730 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1731 && ! side_effects_p (op0))
1733 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1735 tem = simplify_associative_operation (code, mode, op0, op1);
1741 if (width <= HOST_BITS_PER_WIDE_INT
1742 && GET_CODE (trueop1) == CONST_INT
1743 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1744 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1745 && ! side_effects_p (op0))
1747 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1749 tem = simplify_associative_operation (code, mode, op0, op1);
1755 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1757 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1759 tem = simplify_associative_operation (code, mode, op0, op1);
1765 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1767 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1769 tem = simplify_associative_operation (code, mode, op0, op1);
1778 /* ??? There are simplifications that can be done. */
1782 if (!VECTOR_MODE_P (mode))
1784 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1786 != GET_MODE_INNER (GET_MODE (trueop0)))
1787 || GET_CODE (trueop1) != PARALLEL
1788 || XVECLEN (trueop1, 0) != 1
1789 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1792 if (GET_CODE (trueop0) == CONST_VECTOR)
1793 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1797 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1798 || (GET_MODE_INNER (mode)
1799 != GET_MODE_INNER (GET_MODE (trueop0)))
1800 || GET_CODE (trueop1) != PARALLEL)
1803 if (GET_CODE (trueop0) == CONST_VECTOR)
1805 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1806 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1807 rtvec v = rtvec_alloc (n_elts);
1810 if (XVECLEN (trueop1, 0) != (int) n_elts)
1812 for (i = 0; i < n_elts; i++)
1814 rtx x = XVECEXP (trueop1, 0, i);
1816 if (GET_CODE (x) != CONST_INT)
1818 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1821 return gen_rtx_CONST_VECTOR (mode, v);
1827 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1828 ? GET_MODE (trueop0)
1829 : GET_MODE_INNER (mode));
1830 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1831 ? GET_MODE (trueop1)
1832 : GET_MODE_INNER (mode));
1834 if (!VECTOR_MODE_P (mode)
1835 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1836 != GET_MODE_SIZE (mode)))
1839 if ((VECTOR_MODE_P (op0_mode)
1840 && (GET_MODE_INNER (mode)
1841 != GET_MODE_INNER (op0_mode)))
1842 || (!VECTOR_MODE_P (op0_mode)
1843 && GET_MODE_INNER (mode) != op0_mode))
1846 if ((VECTOR_MODE_P (op1_mode)
1847 && (GET_MODE_INNER (mode)
1848 != GET_MODE_INNER (op1_mode)))
1849 || (!VECTOR_MODE_P (op1_mode)
1850 && GET_MODE_INNER (mode) != op1_mode))
1853 if ((GET_CODE (trueop0) == CONST_VECTOR
1854 || GET_CODE (trueop0) == CONST_INT
1855 || GET_CODE (trueop0) == CONST_DOUBLE)
1856 && (GET_CODE (trueop1) == CONST_VECTOR
1857 || GET_CODE (trueop1) == CONST_INT
1858 || GET_CODE (trueop1) == CONST_DOUBLE))
1860 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1861 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1862 rtvec v = rtvec_alloc (n_elts);
1864 unsigned in_n_elts = 1;
1866 if (VECTOR_MODE_P (op0_mode))
1867 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1868 for (i = 0; i < n_elts; i++)
1872 if (!VECTOR_MODE_P (op0_mode))
1873 RTVEC_ELT (v, i) = trueop0;
1875 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1879 if (!VECTOR_MODE_P (op1_mode))
1880 RTVEC_ELT (v, i) = trueop1;
1882 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1887 return gen_rtx_CONST_VECTOR (mode, v);
1899 /* Get the integer argument values in two forms:
1900 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1902 arg0 = INTVAL (trueop0);
1903 arg1 = INTVAL (trueop1);
1905 if (width < HOST_BITS_PER_WIDE_INT)
1907 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1908 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1911 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1912 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1915 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1916 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1924 /* Compute the value of the arithmetic. */
1929 val = arg0s + arg1s;
1933 val = arg0s - arg1s;
1937 val = arg0s * arg1s;
1942 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1945 val = arg0s / arg1s;
1950 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1953 val = arg0s % arg1s;
1958 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1961 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1966 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1969 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1985 /* If shift count is undefined, don't fold it; let the machine do
1986 what it wants. But truncate it if the machine will do that. */
1990 #ifdef SHIFT_COUNT_TRUNCATED
1991 if (SHIFT_COUNT_TRUNCATED)
1995 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2002 #ifdef SHIFT_COUNT_TRUNCATED
2003 if (SHIFT_COUNT_TRUNCATED)
2007 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2014 #ifdef SHIFT_COUNT_TRUNCATED
2015 if (SHIFT_COUNT_TRUNCATED)
2019 val = arg0s >> arg1;
2021 /* Bootstrap compiler may not have sign extended the right shift.
2022 Manually extend the sign to insure bootstrap cc matches gcc. */
2023 if (arg0s < 0 && arg1 > 0)
2024 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2033 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2034 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2042 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2043 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2047 /* Do nothing here. */
2051 val = arg0s <= arg1s ? arg0s : arg1s;
2055 val = ((unsigned HOST_WIDE_INT) arg0
2056 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2060 val = arg0s > arg1s ? arg0s : arg1s;
2064 val = ((unsigned HOST_WIDE_INT) arg0
2065 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2072 /* ??? There are simplifications that can be done. */
2079 val = trunc_int_for_mode (val, mode);
2081 return GEN_INT (val);
2084 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2087 Rather than test for specific case, we do this by a brute-force method
2088 and do all possible simplifications until no more changes occur. Then
2089 we rebuild the operation.
2091 If FORCE is true, then always generate the rtx. This is used to
2092 canonicalize stuff emitted from simplify_gen_binary. Note that this
2093 can still fail if the rtx is too complex. It won't fail just because
2094 the result is not 'simpler' than the input, however. */
2096 struct simplify_plus_minus_op_data
2103 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2105 const struct simplify_plus_minus_op_data *d1 = p1;
2106 const struct simplify_plus_minus_op_data *d2 = p2;
2108 return (commutative_operand_precedence (d2->op)
2109 - commutative_operand_precedence (d1->op));
2113 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2116 struct simplify_plus_minus_op_data ops[8];
2118 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2119 int first, negate, changed;
2122 memset (ops, 0, sizeof ops);
2124 /* Set up the two operands and then expand them until nothing has been
2125 changed. If we run out of room in our array, give up; this should
2126 almost never happen. */
2131 ops[1].neg = (code == MINUS);
2137 for (i = 0; i < n_ops; i++)
2139 rtx this_op = ops[i].op;
2140 int this_neg = ops[i].neg;
2141 enum rtx_code this_code = GET_CODE (this_op);
2150 ops[n_ops].op = XEXP (this_op, 1);
2151 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2154 ops[i].op = XEXP (this_op, 0);
2160 ops[i].op = XEXP (this_op, 0);
2161 ops[i].neg = ! this_neg;
2167 && GET_CODE (XEXP (this_op, 0)) == PLUS
2168 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2169 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2171 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2172 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2173 ops[n_ops].neg = this_neg;
2181 /* ~a -> (-a - 1) */
2184 ops[n_ops].op = constm1_rtx;
2185 ops[n_ops++].neg = this_neg;
2186 ops[i].op = XEXP (this_op, 0);
2187 ops[i].neg = !this_neg;
2195 ops[i].op = neg_const_int (mode, this_op);
2208 /* If we only have two operands, we can't do anything. */
2209 if (n_ops <= 2 && !force)
2212 /* Count the number of CONSTs we didn't split above. */
2213 for (i = 0; i < n_ops; i++)
2214 if (GET_CODE (ops[i].op) == CONST)
2217 /* Now simplify each pair of operands until nothing changes. The first
2218 time through just simplify constants against each other. */
2225 for (i = 0; i < n_ops - 1; i++)
2226 for (j = i + 1; j < n_ops; j++)
2228 rtx lhs = ops[i].op, rhs = ops[j].op;
2229 int lneg = ops[i].neg, rneg = ops[j].neg;
2231 if (lhs != 0 && rhs != 0
2232 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2234 enum rtx_code ncode = PLUS;
2240 tem = lhs, lhs = rhs, rhs = tem;
2242 else if (swap_commutative_operands_p (lhs, rhs))
2243 tem = lhs, lhs = rhs, rhs = tem;
2245 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2247 /* Reject "simplifications" that just wrap the two
2248 arguments in a CONST. Failure to do so can result
2249 in infinite recursion with simplify_binary_operation
2250 when it calls us to simplify CONST operations. */
2252 && ! (GET_CODE (tem) == CONST
2253 && GET_CODE (XEXP (tem, 0)) == ncode
2254 && XEXP (XEXP (tem, 0), 0) == lhs
2255 && XEXP (XEXP (tem, 0), 1) == rhs)
2256 /* Don't allow -x + -1 -> ~x simplifications in the
2257 first pass. This allows us the chance to combine
2258 the -1 with other constants. */
2260 && GET_CODE (tem) == NOT
2261 && XEXP (tem, 0) == rhs))
2264 if (GET_CODE (tem) == NEG)
2265 tem = XEXP (tem, 0), lneg = !lneg;
2266 if (GET_CODE (tem) == CONST_INT && lneg)
2267 tem = neg_const_int (mode, tem), lneg = 0;
2271 ops[j].op = NULL_RTX;
2281 /* Pack all the operands to the lower-numbered entries. */
2282 for (i = 0, j = 0; j < n_ops; j++)
2287 /* Sort the operations based on swap_commutative_operands_p. */
2288 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2290 /* We suppressed creation of trivial CONST expressions in the
2291 combination loop to avoid recursion. Create one manually now.
2292 The combination loop should have ensured that there is exactly
2293 one CONST_INT, and the sort will have ensured that it is last
2294 in the array and that any other constant will be next-to-last. */
2297 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2298 && CONSTANT_P (ops[n_ops - 2].op))
2300 rtx value = ops[n_ops - 1].op;
2301 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2302 value = neg_const_int (mode, value);
2303 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2307 /* Count the number of CONSTs that we generated. */
2309 for (i = 0; i < n_ops; i++)
2310 if (GET_CODE (ops[i].op) == CONST)
2313 /* Give up if we didn't reduce the number of operands we had. Make
2314 sure we count a CONST as two operands. If we have the same
2315 number of operands, but have made more CONSTs than before, this
2316 is also an improvement, so accept it. */
2318 && (n_ops + n_consts > input_ops
2319 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2322 /* Put a non-negated operand first. If there aren't any, make all
2323 operands positive and negate the whole thing later. */
2326 for (i = 0; i < n_ops && ops[i].neg; i++)
2330 for (i = 0; i < n_ops; i++)
2342 /* Now make the result by performing the requested operations. */
2344 for (i = 1; i < n_ops; i++)
2345 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2346 mode, result, ops[i].op);
2348 return negate ? gen_rtx_NEG (mode, result) : result;
2351 /* Like simplify_binary_operation except used for relational operators.
2352 MODE is the mode of the operands, not that of the result. If MODE
2353 is VOIDmode, both operands must also be VOIDmode and we compare the
2354 operands in "infinite precision".
2356 If no simplification is possible, this function returns zero. Otherwise,
2357 it returns either const_true_rtx or const0_rtx. */
2360 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2363 int equal, op0lt, op0ltu, op1lt, op1ltu;
2368 if (mode == VOIDmode
2369 && (GET_MODE (op0) != VOIDmode
2370 || GET_MODE (op1) != VOIDmode))
2373 /* If op0 is a compare, extract the comparison arguments from it. */
2374 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2375 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2377 trueop0 = avoid_constant_pool_reference (op0);
2378 trueop1 = avoid_constant_pool_reference (op1);
2380 /* We can't simplify MODE_CC values since we don't know what the
2381 actual comparison is. */
2382 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2385 /* Make sure the constant is second. */
2386 if (swap_commutative_operands_p (trueop0, trueop1))
2388 tem = op0, op0 = op1, op1 = tem;
2389 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2390 code = swap_condition (code);
2393 /* For integer comparisons of A and B maybe we can simplify A - B and can
2394 then simplify a comparison of that with zero. If A and B are both either
2395 a register or a CONST_INT, this can't help; testing for these cases will
2396 prevent infinite recursion here and speed things up.
2398 If CODE is an unsigned comparison, then we can never do this optimization,
2399 because it gives an incorrect result if the subtraction wraps around zero.
2400 ANSI C defines unsigned operations such that they never overflow, and
2401 thus such cases can not be ignored. */
2403 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2404 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2405 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2406 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2407 && code != GTU && code != GEU && code != LTU && code != LEU)
2408 return simplify_relational_operation (signed_condition (code),
2409 mode, tem, const0_rtx);
2411 if (flag_unsafe_math_optimizations && code == ORDERED)
2412 return const_true_rtx;
2414 if (flag_unsafe_math_optimizations && code == UNORDERED)
2417 /* For modes without NaNs, if the two operands are equal, we know the
2418 result except if they have side-effects. */
2419 if (! HONOR_NANS (GET_MODE (trueop0))
2420 && rtx_equal_p (trueop0, trueop1)
2421 && ! side_effects_p (trueop0))
2422 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2424 /* If the operands are floating-point constants, see if we can fold
2426 else if (GET_CODE (trueop0) == CONST_DOUBLE
2427 && GET_CODE (trueop1) == CONST_DOUBLE
2428 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2430 REAL_VALUE_TYPE d0, d1;
2432 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2433 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2435 /* Comparisons are unordered iff at least one of the values is NaN. */
2436 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2446 return const_true_rtx;
2459 equal = REAL_VALUES_EQUAL (d0, d1);
2460 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2461 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2464 /* Otherwise, see if the operands are both integers. */
2465 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2466 && (GET_CODE (trueop0) == CONST_DOUBLE
2467 || GET_CODE (trueop0) == CONST_INT)
2468 && (GET_CODE (trueop1) == CONST_DOUBLE
2469 || GET_CODE (trueop1) == CONST_INT))
2471 int width = GET_MODE_BITSIZE (mode);
2472 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2473 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2475 /* Get the two words comprising each integer constant. */
2476 if (GET_CODE (trueop0) == CONST_DOUBLE)
2478 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2479 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2483 l0u = l0s = INTVAL (trueop0);
2484 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2487 if (GET_CODE (trueop1) == CONST_DOUBLE)
2489 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2490 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2494 l1u = l1s = INTVAL (trueop1);
2495 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2498 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2499 we have to sign or zero-extend the values. */
2500 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2502 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2503 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2505 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2506 l0s |= ((HOST_WIDE_INT) (-1) << width);
2508 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2509 l1s |= ((HOST_WIDE_INT) (-1) << width);
2511 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2512 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2514 equal = (h0u == h1u && l0u == l1u);
2515 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2516 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2517 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2518 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2521 /* Otherwise, there are some code-specific tests we can make. */
2527 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2532 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2533 return const_true_rtx;
2537 /* Unsigned values are never negative. */
2538 if (trueop1 == const0_rtx)
2539 return const_true_rtx;
2543 if (trueop1 == const0_rtx)
2548 /* Unsigned values are never greater than the largest
2550 if (GET_CODE (trueop1) == CONST_INT
2551 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2552 && INTEGRAL_MODE_P (mode))
2553 return const_true_rtx;
2557 if (GET_CODE (trueop1) == CONST_INT
2558 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2559 && INTEGRAL_MODE_P (mode))
2564 /* Optimize abs(x) < 0.0. */
2565 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2567 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2569 if (GET_CODE (tem) == ABS)
2575 /* Optimize abs(x) >= 0.0. */
2576 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2578 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2580 if (GET_CODE (tem) == ABS)
2581 return const_true_rtx;
2586 /* Optimize ! (abs(x) < 0.0). */
2587 if (trueop1 == CONST0_RTX (mode))
2589 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2591 if (GET_CODE (tem) == ABS)
2592 return const_true_rtx;
2603 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2609 return equal ? const_true_rtx : const0_rtx;
2612 return ! equal ? const_true_rtx : const0_rtx;
2615 return op0lt ? const_true_rtx : const0_rtx;
2618 return op1lt ? const_true_rtx : const0_rtx;
2620 return op0ltu ? const_true_rtx : const0_rtx;
2622 return op1ltu ? const_true_rtx : const0_rtx;
2625 return equal || op0lt ? const_true_rtx : const0_rtx;
2628 return equal || op1lt ? const_true_rtx : const0_rtx;
2630 return equal || op0ltu ? const_true_rtx : const0_rtx;
2632 return equal || op1ltu ? const_true_rtx : const0_rtx;
2634 return const_true_rtx;
2642 /* Simplify CODE, an operation with result mode MODE and three operands,
2643 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2644 a constant. Return 0 if no simplifications is possible. */
2647 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2648 enum machine_mode op0_mode, rtx op0, rtx op1,
2651 unsigned int width = GET_MODE_BITSIZE (mode);
2653 /* VOIDmode means "infinite" precision. */
2655 width = HOST_BITS_PER_WIDE_INT;
2661 if (GET_CODE (op0) == CONST_INT
2662 && GET_CODE (op1) == CONST_INT
2663 && GET_CODE (op2) == CONST_INT
2664 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2665 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2667 /* Extracting a bit-field from a constant */
2668 HOST_WIDE_INT val = INTVAL (op0);
2670 if (BITS_BIG_ENDIAN)
2671 val >>= (GET_MODE_BITSIZE (op0_mode)
2672 - INTVAL (op2) - INTVAL (op1));
2674 val >>= INTVAL (op2);
2676 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2678 /* First zero-extend. */
2679 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2680 /* If desired, propagate sign bit. */
2681 if (code == SIGN_EXTRACT
2682 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2683 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2686 /* Clear the bits that don't belong in our mode,
2687 unless they and our sign bit are all one.
2688 So we get either a reasonable negative value or a reasonable
2689 unsigned value for this mode. */
2690 if (width < HOST_BITS_PER_WIDE_INT
2691 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2692 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2693 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2695 return GEN_INT (val);
2700 if (GET_CODE (op0) == CONST_INT)
2701 return op0 != const0_rtx ? op1 : op2;
2703 /* Convert a == b ? b : a to "a". */
2704 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2705 && !HONOR_NANS (mode)
2706 && rtx_equal_p (XEXP (op0, 0), op1)
2707 && rtx_equal_p (XEXP (op0, 1), op2))
2709 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2710 && !HONOR_NANS (mode)
2711 && rtx_equal_p (XEXP (op0, 1), op1)
2712 && rtx_equal_p (XEXP (op0, 0), op2))
2714 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2716 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2717 ? GET_MODE (XEXP (op0, 1))
2718 : GET_MODE (XEXP (op0, 0)));
2720 if (cmp_mode == VOIDmode)
2721 cmp_mode = op0_mode;
2722 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2723 XEXP (op0, 0), XEXP (op0, 1));
2725 /* See if any simplifications were possible. */
2726 if (temp == const0_rtx)
2728 else if (temp == const1_rtx)
2733 /* Look for happy constants in op1 and op2. */
2734 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2736 HOST_WIDE_INT t = INTVAL (op1);
2737 HOST_WIDE_INT f = INTVAL (op2);
2739 if (t == STORE_FLAG_VALUE && f == 0)
2740 code = GET_CODE (op0);
2741 else if (t == 0 && f == STORE_FLAG_VALUE)
2744 tmp = reversed_comparison_code (op0, NULL_RTX);
2752 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2757 if (GET_MODE (op0) != mode
2758 || GET_MODE (op1) != mode
2759 || !VECTOR_MODE_P (mode))
2761 op2 = avoid_constant_pool_reference (op2);
2762 if (GET_CODE (op2) == CONST_INT)
2764 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2765 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2766 int mask = (1 << n_elts) - 1;
2768 if (!(INTVAL (op2) & mask))
2770 if ((INTVAL (op2) & mask) == mask)
2773 op0 = avoid_constant_pool_reference (op0);
2774 op1 = avoid_constant_pool_reference (op1);
2775 if (GET_CODE (op0) == CONST_VECTOR
2776 && GET_CODE (op1) == CONST_VECTOR)
2778 rtvec v = rtvec_alloc (n_elts);
2781 for (i = 0; i < n_elts; i++)
2782 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2783 ? CONST_VECTOR_ELT (op0, i)
2784 : CONST_VECTOR_ELT (op1, i));
2785 return gen_rtx_CONST_VECTOR (mode, v);
2797 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2798 Return 0 if no simplifications is possible. */
2800 simplify_subreg (enum machine_mode outermode, rtx op,
2801 enum machine_mode innermode, unsigned int byte)
2803 /* Little bit of sanity checking. */
2804 if (innermode == VOIDmode || outermode == VOIDmode
2805 || innermode == BLKmode || outermode == BLKmode)
2808 if (GET_MODE (op) != innermode
2809 && GET_MODE (op) != VOIDmode)
2812 if (byte % GET_MODE_SIZE (outermode)
2813 || byte >= GET_MODE_SIZE (innermode))
2816 if (outermode == innermode && !byte)
2819 /* Simplify subregs of vector constants. */
2820 if (GET_CODE (op) == CONST_VECTOR)
2822 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2823 const unsigned int offset = byte / elt_size;
2826 if (GET_MODE_INNER (innermode) == outermode)
2828 elt = CONST_VECTOR_ELT (op, offset);
2830 /* ?? We probably don't need this copy_rtx because constants
2831 can be shared. ?? */
2833 return copy_rtx (elt);
2835 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2836 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2838 return (gen_rtx_CONST_VECTOR
2840 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2841 &CONST_VECTOR_ELT (op, offset))));
2843 else if (GET_MODE_CLASS (outermode) == MODE_INT
2844 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2846 /* This happens when the target register size is smaller then
2847 the vector mode, and we synthesize operations with vectors
2848 of elements that are smaller than the register size. */
2849 HOST_WIDE_INT sum = 0, high = 0;
2850 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2851 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2852 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2853 int shift = BITS_PER_UNIT * elt_size;
2854 unsigned HOST_WIDE_INT unit_mask;
2856 unit_mask = (unsigned HOST_WIDE_INT) -1
2857 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2859 for (; n_elts--; i += step)
2861 elt = CONST_VECTOR_ELT (op, i);
2862 if (GET_CODE (elt) == CONST_DOUBLE
2863 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2865 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2870 if (GET_CODE (elt) != CONST_INT)
2872 /* Avoid overflow. */
2873 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2875 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2876 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2878 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2879 return GEN_INT (trunc_int_for_mode (sum, outermode));
2880 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2881 return immed_double_const (sum, high, outermode);
2885 else if (GET_MODE_CLASS (outermode) == MODE_INT
2886 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2888 enum machine_mode new_mode
2889 = int_mode_for_mode (GET_MODE_INNER (innermode));
2890 int subbyte = byte % elt_size;
2892 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2895 return simplify_subreg (outermode, op, new_mode, subbyte);
2897 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2898 /* This shouldn't happen, but let's not do anything stupid. */
2902 /* Attempt to simplify constant to non-SUBREG expression. */
2903 if (CONSTANT_P (op))
2906 unsigned HOST_WIDE_INT val = 0;
2908 if (VECTOR_MODE_P (outermode))
2910 /* Construct a CONST_VECTOR from individual subregs. */
2911 enum machine_mode submode = GET_MODE_INNER (outermode);
2912 int subsize = GET_MODE_UNIT_SIZE (outermode);
2913 int i, elts = GET_MODE_NUNITS (outermode);
2914 rtvec v = rtvec_alloc (elts);
2917 for (i = 0; i < elts; i++, byte += subsize)
2919 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2920 /* ??? It would be nice if we could actually make such subregs
2921 on targets that allow such relocations. */
2922 if (byte >= GET_MODE_SIZE (innermode))
2923 elt = CONST0_RTX (submode);
2925 elt = simplify_subreg (submode, op, innermode, byte);
2928 RTVEC_ELT (v, i) = elt;
2930 return gen_rtx_CONST_VECTOR (outermode, v);
2933 /* ??? This code is partly redundant with code below, but can handle
2934 the subregs of floats and similar corner cases.
2935 Later it we should move all simplification code here and rewrite
2936 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2937 using SIMPLIFY_SUBREG. */
2938 if (subreg_lowpart_offset (outermode, innermode) == byte
2939 && GET_CODE (op) != CONST_VECTOR)
2941 rtx new = gen_lowpart_if_possible (outermode, op);
2946 /* Similar comment as above apply here. */
2947 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2948 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2949 && GET_MODE_CLASS (outermode) == MODE_INT)
2951 rtx new = constant_subword (op,
2952 (byte / UNITS_PER_WORD),
2958 if (GET_MODE_CLASS (outermode) != MODE_INT
2959 && GET_MODE_CLASS (outermode) != MODE_CC)
2961 enum machine_mode new_mode = int_mode_for_mode (outermode);
2963 if (new_mode != innermode || byte != 0)
2965 op = simplify_subreg (new_mode, op, innermode, byte);
2968 return simplify_subreg (outermode, op, new_mode, 0);
2972 offset = byte * BITS_PER_UNIT;
2973 switch (GET_CODE (op))
2976 if (GET_MODE (op) != VOIDmode)
2979 /* We can't handle this case yet. */
2980 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2983 part = offset >= HOST_BITS_PER_WIDE_INT;
2984 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2985 && BYTES_BIG_ENDIAN)
2986 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2987 && WORDS_BIG_ENDIAN))
2989 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2990 offset %= HOST_BITS_PER_WIDE_INT;
2992 /* We've already picked the word we want from a double, so
2993 pretend this is actually an integer. */
2994 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2998 if (GET_CODE (op) == CONST_INT)
3001 /* We don't handle synthesizing of non-integral constants yet. */
3002 if (GET_MODE_CLASS (outermode) != MODE_INT)
3005 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
3007 if (WORDS_BIG_ENDIAN)
3008 offset = (GET_MODE_BITSIZE (innermode)
3009 - GET_MODE_BITSIZE (outermode) - offset);
3010 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
3011 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
3012 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
3013 - 2 * (offset % BITS_PER_WORD));
3016 if (offset >= HOST_BITS_PER_WIDE_INT)
3017 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
3021 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
3022 val = trunc_int_for_mode (val, outermode);
3023 return GEN_INT (val);
3030 /* Changing mode twice with SUBREG => just change it once,
3031 or not at all if changing back op starting mode. */
3032 if (GET_CODE (op) == SUBREG)
3034 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3035 int final_offset = byte + SUBREG_BYTE (op);
3038 if (outermode == innermostmode
3039 && byte == 0 && SUBREG_BYTE (op) == 0)
3040 return SUBREG_REG (op);
3042 /* The SUBREG_BYTE represents offset, as if the value were stored
3043 in memory. Irritating exception is paradoxical subreg, where
3044 we define SUBREG_BYTE to be 0. On big endian machines, this
3045 value should be negative. For a moment, undo this exception. */
3046 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3048 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3049 if (WORDS_BIG_ENDIAN)
3050 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3051 if (BYTES_BIG_ENDIAN)
3052 final_offset += difference % UNITS_PER_WORD;
3054 if (SUBREG_BYTE (op) == 0
3055 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3057 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3058 if (WORDS_BIG_ENDIAN)
3059 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3060 if (BYTES_BIG_ENDIAN)
3061 final_offset += difference % UNITS_PER_WORD;
3064 /* See whether resulting subreg will be paradoxical. */
3065 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3067 /* In nonparadoxical subregs we can't handle negative offsets. */
3068 if (final_offset < 0)
3070 /* Bail out in case resulting subreg would be incorrect. */
3071 if (final_offset % GET_MODE_SIZE (outermode)
3072 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3078 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3080 /* In paradoxical subreg, see if we are still looking on lower part.
3081 If so, our SUBREG_BYTE will be 0. */
3082 if (WORDS_BIG_ENDIAN)
3083 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3084 if (BYTES_BIG_ENDIAN)
3085 offset += difference % UNITS_PER_WORD;
3086 if (offset == final_offset)
3092 /* Recurse for further possible simplifications. */
3093 new = simplify_subreg (outermode, SUBREG_REG (op),
3094 GET_MODE (SUBREG_REG (op)),
3098 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3101 /* SUBREG of a hard register => just change the register number
3102 and/or mode. If the hard register is not valid in that mode,
3103 suppress this simplification. If the hard register is the stack,
3104 frame, or argument pointer, leave this as a SUBREG. */
3107 && (! REG_FUNCTION_VALUE_P (op)
3108 || ! rtx_equal_function_value_matters)
3109 && REGNO (op) < FIRST_PSEUDO_REGISTER
3110 #ifdef CANNOT_CHANGE_MODE_CLASS
3111 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3112 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3113 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3115 && ((reload_completed && !frame_pointer_needed)
3116 || (REGNO (op) != FRAME_POINTER_REGNUM
3117 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3118 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3121 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3122 && REGNO (op) != ARG_POINTER_REGNUM
3124 && REGNO (op) != STACK_POINTER_REGNUM
3125 && subreg_offset_representable_p (REGNO (op), innermode,
3128 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3129 int final_regno = subreg_hard_regno (tem, 0);
3131 /* ??? We do allow it if the current REG is not valid for
3132 its mode. This is a kludge to work around how float/complex
3133 arguments are passed on 32-bit SPARC and should be fixed. */
3134 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3135 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3137 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3139 /* Propagate original regno. We don't have any way to specify
3140 the offset inside original regno, so do so only for lowpart.
3141 The information is used only by alias analysis that can not
3142 grog partial register anyway. */
3144 if (subreg_lowpart_offset (outermode, innermode) == byte)
3145 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3150 /* If we have a SUBREG of a register that we are replacing and we are
3151 replacing it with a MEM, make a new MEM and try replacing the
3152 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3153 or if we would be widening it. */
3155 if (GET_CODE (op) == MEM
3156 && ! mode_dependent_address_p (XEXP (op, 0))
3157 /* Allow splitting of volatile memory references in case we don't
3158 have instruction to move the whole thing. */
3159 && (! MEM_VOLATILE_P (op)
3160 || ! have_insn_for (SET, innermode))
3161 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3162 return adjust_address_nv (op, outermode, byte);
3164 /* Handle complex values represented as CONCAT
3165 of real and imaginary part. */
3166 if (GET_CODE (op) == CONCAT)
3168 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
3169 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3170 unsigned int final_offset;
3173 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3174 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3177 /* We can at least simplify it by referring directly to the relevant part. */
3178 return gen_rtx_SUBREG (outermode, part, final_offset);
3183 /* Make a SUBREG operation or equivalent if it folds. */
3186 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3187 enum machine_mode innermode, unsigned int byte)
3190 /* Little bit of sanity checking. */
3191 if (innermode == VOIDmode || outermode == VOIDmode
3192 || innermode == BLKmode || outermode == BLKmode)
3195 if (GET_MODE (op) != innermode
3196 && GET_MODE (op) != VOIDmode)
3199 if (byte % GET_MODE_SIZE (outermode)
3200 || byte >= GET_MODE_SIZE (innermode))
3203 if (GET_CODE (op) == QUEUED)
3206 new = simplify_subreg (outermode, op, innermode, byte);
3210 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3213 return gen_rtx_SUBREG (outermode, op, byte);
3215 /* Simplify X, an rtx expression.
3217 Return the simplified expression or NULL if no simplifications
3220 This is the preferred entry point into the simplification routines;
3221 however, we still allow passes to call the more specific routines.
3223 Right now GCC has three (yes, three) major bodies of RTL simplification
3224 code that need to be unified.
3226 1. fold_rtx in cse.c. This code uses various CSE specific
3227 information to aid in RTL simplification.
3229 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3230 it uses combine specific information to aid in RTL
3233 3. The routines in this file.
3236 Long term we want to only have one body of simplification code; to
3237 get to that state I recommend the following steps:
3239 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3240 which are not pass dependent state into these routines.
3242 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3243 use this routine whenever possible.
3245 3. Allow for pass dependent state to be provided to these
3246 routines and add simplifications based on the pass dependent
3247 state. Remove code from cse.c & combine.c that becomes
3250 It will take time, but ultimately the compiler will be easier to
3251 maintain and improve. It's totally silly that when we add a
3252 simplification that it needs to be added to 4 places (3 for RTL
3253 simplification and 1 for tree simplification. */
3256 simplify_rtx (rtx x)
3258 enum rtx_code code = GET_CODE (x);
3259 enum machine_mode mode = GET_MODE (x);
3262 switch (GET_RTX_CLASS (code))
3265 return simplify_unary_operation (code, mode,
3266 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3268 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3269 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3271 /* Fall through.... */
3274 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3278 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3279 XEXP (x, 0), XEXP (x, 1),
3283 temp = simplify_relational_operation (code,
3284 ((GET_MODE (XEXP (x, 0))
3286 ? GET_MODE (XEXP (x, 0))
3287 : GET_MODE (XEXP (x, 1))),
3288 XEXP (x, 0), XEXP (x, 1));
3289 #ifdef FLOAT_STORE_FLAG_VALUE
3290 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3292 if (temp == const0_rtx)
3293 temp = CONST0_RTX (mode);
3295 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3303 return simplify_gen_subreg (mode, SUBREG_REG (x),
3304 GET_MODE (SUBREG_REG (x)),
3306 if (code == CONSTANT_P_RTX)
3308 if (CONSTANT_P (XEXP (x, 0)))
3316 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3317 if (GET_CODE (XEXP (x, 0)) == HIGH
3318 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))