1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
57 /* Negate a CONST_INT rtx, truncating (because a conversion from a
58 maximally negative number can overflow). */
60 neg_const_int (enum machine_mode mode, rtx i)
62 return gen_int_mode (- INTVAL (i), mode);
66 /* Make a binary operation by properly ordering the operands and
67 seeing if the expression folds. */
70 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
75 /* Put complex operands first and constants second if commutative. */
76 if (GET_RTX_CLASS (code) == 'c'
77 && swap_commutative_operands_p (op0, op1))
78 tem = op0, op0 = op1, op1 = tem;
80 /* If this simplifies, do it. */
81 tem = simplify_binary_operation (code, mode, op0, op1);
85 /* Handle addition and subtraction specially. Otherwise, just form
88 if (code == PLUS || code == MINUS)
90 tem = simplify_plus_minus (code, mode, op0, op1, 1);
95 return gen_rtx_fmt_ee (code, mode, op0, op1);
98 /* If X is a MEM referencing the constant pool, return the real value.
99 Otherwise return X. */
101 avoid_constant_pool_reference (rtx x)
104 enum machine_mode cmode;
106 switch (GET_CODE (x))
112 /* Handle float extensions of constant pool references. */
114 c = avoid_constant_pool_reference (tmp);
115 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
119 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
120 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
130 /* Call target hook to avoid the effects of -fpic etc... */
131 addr = (*targetm.delegitimize_address) (addr);
133 if (GET_CODE (addr) == LO_SUM)
134 addr = XEXP (addr, 1);
136 if (GET_CODE (addr) != SYMBOL_REF
137 || ! CONSTANT_POOL_ADDRESS_P (addr))
140 c = get_pool_constant (addr);
141 cmode = get_pool_mode (addr);
143 /* If we're accessing the constant in a different mode than it was
144 originally stored, attempt to fix that up via subreg simplifications.
145 If that fails we have no choice but to return the original memory. */
146 if (cmode != GET_MODE (x))
148 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
155 /* Make a unary operation by first seeing if it folds and otherwise making
156 the specified operation. */
159 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
160 enum machine_mode op_mode)
164 /* If this simplifies, use it. */
165 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
168 return gen_rtx_fmt_e (code, mode, op);
171 /* Likewise for ternary operations. */
174 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
175 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
179 /* If this simplifies, use it. */
180 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
184 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
187 /* Likewise, for relational operations.
188 CMP_MODE specifies mode comparison is done in.
192 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
193 enum machine_mode cmp_mode, rtx op0, rtx op1)
197 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
200 /* For the following tests, ensure const0_rtx is op1. */
201 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
202 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
204 /* If op0 is a compare, extract the comparison arguments from it. */
205 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
206 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
208 /* If op0 is a comparison, extract the comparison arguments form it. */
209 if (code == NE && op1 == const0_rtx
210 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
212 else if (code == EQ && op1 == const0_rtx)
214 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
215 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
225 /* Put complex operands first and constants second. */
226 if (swap_commutative_operands_p (op0, op1))
227 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
229 return gen_rtx_fmt_ee (code, mode, op0, op1);
232 /* Replace all occurrences of OLD in X with NEW and try to simplify the
233 resulting RTX. Return a new RTX which is as simplified as possible. */
236 simplify_replace_rtx (rtx x, rtx old, rtx new)
238 enum rtx_code code = GET_CODE (x);
239 enum machine_mode mode = GET_MODE (x);
241 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
242 to build a new expression substituting recursively. If we can't do
243 anything, return our input. */
248 switch (GET_RTX_CLASS (code))
252 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
253 rtx op = (XEXP (x, 0) == old
254 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
256 return simplify_gen_unary (code, mode, op, op_mode);
262 simplify_gen_binary (code, mode,
263 simplify_replace_rtx (XEXP (x, 0), old, new),
264 simplify_replace_rtx (XEXP (x, 1), old, new));
267 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
268 ? GET_MODE (XEXP (x, 0))
269 : GET_MODE (XEXP (x, 1)));
270 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
271 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
272 rtx temp = simplify_gen_relational (code, mode,
275 : GET_MODE (op0) != VOIDmode
279 #ifdef FLOAT_STORE_FLAG_VALUE
280 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
282 if (temp == const0_rtx)
283 temp = CONST0_RTX (mode);
284 else if (temp == const_true_rtx)
285 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
295 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
296 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
299 simplify_gen_ternary (code, mode,
304 simplify_replace_rtx (XEXP (x, 1), old, new),
305 simplify_replace_rtx (XEXP (x, 2), old, new));
309 /* The only case we try to handle is a SUBREG. */
313 exp = simplify_gen_subreg (GET_MODE (x),
314 simplify_replace_rtx (SUBREG_REG (x),
316 GET_MODE (SUBREG_REG (x)),
325 return replace_equiv_address_nv (x,
326 simplify_replace_rtx (XEXP (x, 0),
328 else if (code == LO_SUM)
330 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
331 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
333 /* (lo_sum (high x) x) -> x */
334 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
337 return gen_rtx_LO_SUM (mode, op0, op1);
339 else if (code == REG)
341 if (REG_P (old) && REGNO (x) == REGNO (old))
353 /* Try to simplify a unary operation CODE whose output mode is to be
354 MODE with input operand OP whose mode was originally OP_MODE.
355 Return zero if no simplification can be made. */
357 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
358 rtx op, enum machine_mode op_mode)
360 unsigned int width = GET_MODE_BITSIZE (mode);
361 rtx trueop = avoid_constant_pool_reference (op);
363 if (code == VEC_DUPLICATE)
365 if (!VECTOR_MODE_P (mode))
367 if (GET_MODE (trueop) != VOIDmode
368 && !VECTOR_MODE_P (GET_MODE (trueop))
369 && GET_MODE_INNER (mode) != GET_MODE (trueop))
371 if (GET_MODE (trueop) != VOIDmode
372 && VECTOR_MODE_P (GET_MODE (trueop))
373 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
375 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
376 || GET_CODE (trueop) == CONST_VECTOR)
378 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
379 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
380 rtvec v = rtvec_alloc (n_elts);
383 if (GET_CODE (trueop) != CONST_VECTOR)
384 for (i = 0; i < n_elts; i++)
385 RTVEC_ELT (v, i) = trueop;
388 enum machine_mode inmode = GET_MODE (trueop);
389 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
390 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
392 if (in_n_elts >= n_elts || n_elts % in_n_elts)
394 for (i = 0; i < n_elts; i++)
395 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
397 return gen_rtx_CONST_VECTOR (mode, v);
401 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
403 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
404 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
405 enum machine_mode opmode = GET_MODE (trueop);
406 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
407 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
408 rtvec v = rtvec_alloc (n_elts);
411 if (op_n_elts != n_elts)
414 for (i = 0; i < n_elts; i++)
416 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
417 CONST_VECTOR_ELT (trueop, i),
418 GET_MODE_INNER (opmode));
421 RTVEC_ELT (v, i) = x;
423 return gen_rtx_CONST_VECTOR (mode, v);
426 /* The order of these tests is critical so that, for example, we don't
427 check the wrong mode (input vs. output) for a conversion operation,
428 such as FIX. At some point, this should be simplified. */
430 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
431 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
433 HOST_WIDE_INT hv, lv;
436 if (GET_CODE (trueop) == CONST_INT)
437 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
439 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
441 REAL_VALUE_FROM_INT (d, lv, hv, mode);
442 d = real_value_truncate (mode, d);
443 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
445 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
446 && (GET_CODE (trueop) == CONST_DOUBLE
447 || GET_CODE (trueop) == CONST_INT))
449 HOST_WIDE_INT hv, lv;
452 if (GET_CODE (trueop) == CONST_INT)
453 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
455 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
457 if (op_mode == VOIDmode)
459 /* We don't know how to interpret negative-looking numbers in
460 this case, so don't try to fold those. */
464 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
467 hv = 0, lv &= GET_MODE_MASK (op_mode);
469 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
470 d = real_value_truncate (mode, d);
471 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
474 if (GET_CODE (trueop) == CONST_INT
475 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
477 HOST_WIDE_INT arg0 = INTVAL (trueop);
491 val = (arg0 >= 0 ? arg0 : - arg0);
495 /* Don't use ffs here. Instead, get low order bit and then its
496 number. If arg0 is zero, this will return 0, as desired. */
497 arg0 &= GET_MODE_MASK (mode);
498 val = exact_log2 (arg0 & (- arg0)) + 1;
502 arg0 &= GET_MODE_MASK (mode);
503 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
506 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
510 arg0 &= GET_MODE_MASK (mode);
513 /* Even if the value at zero is undefined, we have to come
514 up with some replacement. Seems good enough. */
515 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
516 val = GET_MODE_BITSIZE (mode);
519 val = exact_log2 (arg0 & -arg0);
523 arg0 &= GET_MODE_MASK (mode);
526 val++, arg0 &= arg0 - 1;
530 arg0 &= GET_MODE_MASK (mode);
533 val++, arg0 &= arg0 - 1;
542 /* When zero-extending a CONST_INT, we need to know its
544 if (op_mode == VOIDmode)
546 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
548 /* If we were really extending the mode,
549 we would have to distinguish between zero-extension
550 and sign-extension. */
551 if (width != GET_MODE_BITSIZE (op_mode))
555 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
556 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
562 if (op_mode == VOIDmode)
564 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width != GET_MODE_BITSIZE (op_mode))
573 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
576 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
578 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
579 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
596 val = trunc_int_for_mode (val, mode);
598 return GEN_INT (val);
601 /* We can do some operations on integer CONST_DOUBLEs. Also allow
602 for a DImode operation on a CONST_INT. */
603 else if (GET_MODE (trueop) == VOIDmode
604 && width <= HOST_BITS_PER_WIDE_INT * 2
605 && (GET_CODE (trueop) == CONST_DOUBLE
606 || GET_CODE (trueop) == CONST_INT))
608 unsigned HOST_WIDE_INT l1, lv;
609 HOST_WIDE_INT h1, hv;
611 if (GET_CODE (trueop) == CONST_DOUBLE)
612 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
614 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
624 neg_double (l1, h1, &lv, &hv);
629 neg_double (l1, h1, &lv, &hv);
641 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
644 lv = exact_log2 (l1 & -l1) + 1;
650 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
652 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
653 - HOST_BITS_PER_WIDE_INT;
661 lv = GET_MODE_BITSIZE (mode);
663 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
666 lv = exact_log2 (l1 & -l1);
689 /* This is just a change-of-mode, so do nothing. */
694 if (op_mode == VOIDmode)
697 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
701 lv = l1 & GET_MODE_MASK (op_mode);
705 if (op_mode == VOIDmode
706 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
710 lv = l1 & GET_MODE_MASK (op_mode);
711 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
712 && (lv & ((HOST_WIDE_INT) 1
713 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
714 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
716 hv = HWI_SIGN_EXTEND (lv);
727 return immed_double_const (lv, hv, mode);
730 else if (GET_CODE (trueop) == CONST_DOUBLE
731 && GET_MODE_CLASS (mode) == MODE_FLOAT)
733 REAL_VALUE_TYPE d, t;
734 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
739 if (HONOR_SNANS (mode) && real_isnan (&d))
741 real_sqrt (&t, mode, &d);
745 d = REAL_VALUE_ABS (d);
748 d = REAL_VALUE_NEGATE (d);
751 d = real_value_truncate (mode, d);
754 /* All this does is change the mode. */
757 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
763 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
766 else if (GET_CODE (trueop) == CONST_DOUBLE
767 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
768 && GET_MODE_CLASS (mode) == MODE_INT
769 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
773 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
776 case FIX: i = REAL_VALUE_FIX (d); break;
777 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
781 return gen_int_mode (i, mode);
784 /* This was formerly used only for non-IEEE float.
785 eggert@twinsun.com says it is safe for IEEE also. */
788 enum rtx_code reversed;
789 /* There are some simplifications we can do even if the operands
794 /* (not (not X)) == X. */
795 if (GET_CODE (op) == NOT)
798 /* (not (eq X Y)) == (ne X Y), etc. */
799 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
800 && ((reversed = reversed_comparison_code (op, NULL_RTX))
802 return gen_rtx_fmt_ee (reversed,
803 op_mode, XEXP (op, 0), XEXP (op, 1));
807 /* (neg (neg X)) == X. */
808 if (GET_CODE (op) == NEG)
813 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
814 becomes just the MINUS if its mode is MODE. This allows
815 folding switch statements on machines using casesi (such as
817 if (GET_CODE (op) == TRUNCATE
818 && GET_MODE (XEXP (op, 0)) == mode
819 && GET_CODE (XEXP (op, 0)) == MINUS
820 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
821 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
828 || (GET_CODE (op) == SUBREG
829 && GET_CODE (SUBREG_REG (op)) == REG
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
836 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
838 if (POINTERS_EXTEND_UNSIGNED > 0
839 && mode == Pmode && GET_MODE (op) == ptr_mode
841 || (GET_CODE (op) == SUBREG
842 && GET_CODE (SUBREG_REG (op)) == REG
843 && REG_POINTER (SUBREG_REG (op))
844 && GET_MODE (SUBREG_REG (op)) == Pmode)))
845 return convert_memory_address (Pmode, op);
857 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
858 and OP1. Return 0 if no simplification is possible.
860 Don't use this for relational operations such as EQ or LT.
861 Use simplify_relational_operation instead. */
863 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
866 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
868 unsigned int width = GET_MODE_BITSIZE (mode);
870 rtx trueop0 = avoid_constant_pool_reference (op0);
871 rtx trueop1 = avoid_constant_pool_reference (op1);
873 /* Relational operations don't work here. We must know the mode
874 of the operands in order to do the comparison correctly.
875 Assuming a full word can give incorrect results.
876 Consider comparing 128 with -128 in QImode. */
878 if (GET_RTX_CLASS (code) == '<')
881 /* Make sure the constant is second. */
882 if (GET_RTX_CLASS (code) == 'c'
883 && swap_commutative_operands_p (trueop0, trueop1))
885 tem = op0, op0 = op1, op1 = tem;
886 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
889 if (VECTOR_MODE_P (mode)
890 && GET_CODE (trueop0) == CONST_VECTOR
891 && GET_CODE (trueop1) == CONST_VECTOR)
893 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
894 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
895 enum machine_mode op0mode = GET_MODE (trueop0);
896 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
897 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
898 enum machine_mode op1mode = GET_MODE (trueop1);
899 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
900 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
901 rtvec v = rtvec_alloc (n_elts);
904 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
907 for (i = 0; i < n_elts; i++)
909 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
910 CONST_VECTOR_ELT (trueop0, i),
911 CONST_VECTOR_ELT (trueop1, i));
914 RTVEC_ELT (v, i) = x;
917 return gen_rtx_CONST_VECTOR (mode, v);
920 if (GET_MODE_CLASS (mode) == MODE_FLOAT
921 && GET_CODE (trueop0) == CONST_DOUBLE
922 && GET_CODE (trueop1) == CONST_DOUBLE
923 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
925 REAL_VALUE_TYPE f0, f1, value;
927 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
928 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
929 f0 = real_value_truncate (mode, f0);
930 f1 = real_value_truncate (mode, f1);
932 if (HONOR_SNANS (mode)
933 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
937 && REAL_VALUES_EQUAL (f1, dconst0)
938 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
941 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
943 value = real_value_truncate (mode, value);
944 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
947 /* We can fold some multi-word operations. */
948 if (GET_MODE_CLASS (mode) == MODE_INT
949 && width == HOST_BITS_PER_WIDE_INT * 2
950 && (GET_CODE (trueop0) == CONST_DOUBLE
951 || GET_CODE (trueop0) == CONST_INT)
952 && (GET_CODE (trueop1) == CONST_DOUBLE
953 || GET_CODE (trueop1) == CONST_INT))
955 unsigned HOST_WIDE_INT l1, l2, lv;
956 HOST_WIDE_INT h1, h2, hv;
958 if (GET_CODE (trueop0) == CONST_DOUBLE)
959 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
961 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
963 if (GET_CODE (trueop1) == CONST_DOUBLE)
964 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
966 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
971 /* A - B == A + (-B). */
972 neg_double (l2, h2, &lv, &hv);
975 /* .. fall through ... */
978 add_double (l1, h1, l2, h2, &lv, &hv);
982 mul_double (l1, h1, l2, h2, &lv, &hv);
985 case DIV: case MOD: case UDIV: case UMOD:
986 /* We'd need to include tree.h to do this and it doesn't seem worth
991 lv = l1 & l2, hv = h1 & h2;
995 lv = l1 | l2, hv = h1 | h2;
999 lv = l1 ^ l2, hv = h1 ^ h2;
1005 && ((unsigned HOST_WIDE_INT) l1
1006 < (unsigned HOST_WIDE_INT) l2)))
1015 && ((unsigned HOST_WIDE_INT) l1
1016 > (unsigned HOST_WIDE_INT) l2)))
1023 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1025 && ((unsigned HOST_WIDE_INT) l1
1026 < (unsigned HOST_WIDE_INT) l2)))
1033 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1035 && ((unsigned HOST_WIDE_INT) l1
1036 > (unsigned HOST_WIDE_INT) l2)))
1042 case LSHIFTRT: case ASHIFTRT:
1044 case ROTATE: case ROTATERT:
1045 #ifdef SHIFT_COUNT_TRUNCATED
1046 if (SHIFT_COUNT_TRUNCATED)
1047 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1050 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1053 if (code == LSHIFTRT || code == ASHIFTRT)
1054 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1056 else if (code == ASHIFT)
1057 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1058 else if (code == ROTATE)
1059 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1060 else /* code == ROTATERT */
1061 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1068 return immed_double_const (lv, hv, mode);
1071 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1072 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1074 /* Even if we can't compute a constant result,
1075 there are some cases worth simplifying. */
1080 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1081 when x is NaN, infinite, or finite and nonzero. They aren't
1082 when x is -0 and the rounding mode is not towards -infinity,
1083 since (-0) + 0 is then 0. */
1084 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1087 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1088 transformations are safe even for IEEE. */
1089 if (GET_CODE (op0) == NEG)
1090 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1091 else if (GET_CODE (op1) == NEG)
1092 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1094 /* (~a) + 1 -> -a */
1095 if (INTEGRAL_MODE_P (mode)
1096 && GET_CODE (op0) == NOT
1097 && trueop1 == const1_rtx)
1098 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1100 /* Handle both-operands-constant cases. We can only add
1101 CONST_INTs to constants since the sum of relocatable symbols
1102 can't be handled by most assemblers. Don't add CONST_INT
1103 to CONST_INT since overflow won't be computed properly if wider
1104 than HOST_BITS_PER_WIDE_INT. */
1106 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1107 && GET_CODE (op1) == CONST_INT)
1108 return plus_constant (op0, INTVAL (op1));
1109 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1110 && GET_CODE (op0) == CONST_INT)
1111 return plus_constant (op1, INTVAL (op0));
1113 /* See if this is something like X * C - X or vice versa or
1114 if the multiplication is written as a shift. If so, we can
1115 distribute and make a new multiply, shift, or maybe just
1116 have X (if C is 2 in the example above). But don't make
1117 real multiply if we didn't have one before. */
1119 if (! FLOAT_MODE_P (mode))
1121 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1122 rtx lhs = op0, rhs = op1;
1125 if (GET_CODE (lhs) == NEG)
1126 coeff0 = -1, lhs = XEXP (lhs, 0);
1127 else if (GET_CODE (lhs) == MULT
1128 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1130 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1133 else if (GET_CODE (lhs) == ASHIFT
1134 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1135 && INTVAL (XEXP (lhs, 1)) >= 0
1136 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1138 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1139 lhs = XEXP (lhs, 0);
1142 if (GET_CODE (rhs) == NEG)
1143 coeff1 = -1, rhs = XEXP (rhs, 0);
1144 else if (GET_CODE (rhs) == MULT
1145 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1147 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1150 else if (GET_CODE (rhs) == ASHIFT
1151 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1152 && INTVAL (XEXP (rhs, 1)) >= 0
1153 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1155 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1156 rhs = XEXP (rhs, 0);
1159 if (rtx_equal_p (lhs, rhs))
1161 tem = simplify_gen_binary (MULT, mode, lhs,
1162 GEN_INT (coeff0 + coeff1));
1163 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1167 /* If one of the operands is a PLUS or a MINUS, see if we can
1168 simplify this by the associative law.
1169 Don't use the associative law for floating point.
1170 The inaccuracy makes it nonassociative,
1171 and subtle programs can break if operations are associated. */
1173 if (INTEGRAL_MODE_P (mode)
1174 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1175 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1176 || (GET_CODE (op0) == CONST
1177 && GET_CODE (XEXP (op0, 0)) == PLUS)
1178 || (GET_CODE (op1) == CONST
1179 && GET_CODE (XEXP (op1, 0)) == PLUS))
1180 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1186 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1187 using cc0, in which case we want to leave it as a COMPARE
1188 so we can distinguish it from a register-register-copy.
1190 In IEEE floating point, x-0 is not the same as x. */
1192 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1193 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1194 && trueop1 == CONST0_RTX (mode))
1198 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1199 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1200 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1201 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1203 rtx xop00 = XEXP (op0, 0);
1204 rtx xop10 = XEXP (op1, 0);
1207 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1209 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1210 && GET_MODE (xop00) == GET_MODE (xop10)
1211 && REGNO (xop00) == REGNO (xop10)
1212 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1213 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1220 /* We can't assume x-x is 0 even with non-IEEE floating point,
1221 but since it is zero except in very strange circumstances, we
1222 will treat it as zero with -funsafe-math-optimizations. */
1223 if (rtx_equal_p (trueop0, trueop1)
1224 && ! side_effects_p (op0)
1225 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1226 return CONST0_RTX (mode);
1228 /* Change subtraction from zero into negation. (0 - x) is the
1229 same as -x when x is NaN, infinite, or finite and nonzero.
1230 But if the mode has signed zeros, and does not round towards
1231 -infinity, then 0 - 0 is 0, not -0. */
1232 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1233 return simplify_gen_unary (NEG, mode, op1, mode);
1235 /* (-1 - a) is ~a. */
1236 if (trueop0 == constm1_rtx)
1237 return simplify_gen_unary (NOT, mode, op1, mode);
1239 /* Subtracting 0 has no effect unless the mode has signed zeros
1240 and supports rounding towards -infinity. In such a case,
1242 if (!(HONOR_SIGNED_ZEROS (mode)
1243 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1244 && trueop1 == CONST0_RTX (mode))
1247 /* See if this is something like X * C - X or vice versa or
1248 if the multiplication is written as a shift. If so, we can
1249 distribute and make a new multiply, shift, or maybe just
1250 have X (if C is 2 in the example above). But don't make
1251 real multiply if we didn't have one before. */
1253 if (! FLOAT_MODE_P (mode))
1255 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1256 rtx lhs = op0, rhs = op1;
1259 if (GET_CODE (lhs) == NEG)
1260 coeff0 = -1, lhs = XEXP (lhs, 0);
1261 else if (GET_CODE (lhs) == MULT
1262 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1264 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1267 else if (GET_CODE (lhs) == ASHIFT
1268 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1269 && INTVAL (XEXP (lhs, 1)) >= 0
1270 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1272 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1273 lhs = XEXP (lhs, 0);
1276 if (GET_CODE (rhs) == NEG)
1277 coeff1 = - 1, rhs = XEXP (rhs, 0);
1278 else if (GET_CODE (rhs) == MULT
1279 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1281 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1284 else if (GET_CODE (rhs) == ASHIFT
1285 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1286 && INTVAL (XEXP (rhs, 1)) >= 0
1287 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1289 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1290 rhs = XEXP (rhs, 0);
1293 if (rtx_equal_p (lhs, rhs))
1295 tem = simplify_gen_binary (MULT, mode, lhs,
1296 GEN_INT (coeff0 - coeff1));
1297 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1301 /* (a - (-b)) -> (a + b). True even for IEEE. */
1302 if (GET_CODE (op1) == NEG)
1303 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1305 /* If one of the operands is a PLUS or a MINUS, see if we can
1306 simplify this by the associative law.
1307 Don't use the associative law for floating point.
1308 The inaccuracy makes it nonassociative,
1309 and subtle programs can break if operations are associated. */
1311 if (INTEGRAL_MODE_P (mode)
1312 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1313 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1314 || (GET_CODE (op0) == CONST
1315 && GET_CODE (XEXP (op0, 0)) == PLUS)
1316 || (GET_CODE (op1) == CONST
1317 && GET_CODE (XEXP (op1, 0)) == PLUS))
1318 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1321 /* Don't let a relocatable value get a negative coeff. */
1322 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1323 return simplify_gen_binary (PLUS, mode,
1325 neg_const_int (mode, op1));
1327 /* (x - (x & y)) -> (x & ~y) */
1328 if (GET_CODE (op1) == AND)
1330 if (rtx_equal_p (op0, XEXP (op1, 0)))
1332 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1333 GET_MODE (XEXP (op1, 1)));
1334 return simplify_gen_binary (AND, mode, op0, tem);
1336 if (rtx_equal_p (op0, XEXP (op1, 1)))
1338 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1339 GET_MODE (XEXP (op1, 0)));
1340 return simplify_gen_binary (AND, mode, op0, tem);
1346 if (trueop1 == constm1_rtx)
1347 return simplify_gen_unary (NEG, mode, op0, mode);
1349 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1350 x is NaN, since x * 0 is then also NaN. Nor is it valid
1351 when the mode has signed zeros, since multiplying a negative
1352 number by 0 will give -0, not 0. */
1353 if (!HONOR_NANS (mode)
1354 && !HONOR_SIGNED_ZEROS (mode)
1355 && trueop1 == CONST0_RTX (mode)
1356 && ! side_effects_p (op0))
1359 /* In IEEE floating point, x*1 is not equivalent to x for
1361 if (!HONOR_SNANS (mode)
1362 && trueop1 == CONST1_RTX (mode))
1365 /* Convert multiply by constant power of two into shift unless
1366 we are still generating RTL. This test is a kludge. */
1367 if (GET_CODE (trueop1) == CONST_INT
1368 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1369 /* If the mode is larger than the host word size, and the
1370 uppermost bit is set, then this isn't a power of two due
1371 to implicit sign extension. */
1372 && (width <= HOST_BITS_PER_WIDE_INT
1373 || val != HOST_BITS_PER_WIDE_INT - 1)
1374 && ! rtx_equal_function_value_matters)
1375 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1377 /* x*2 is x+x and x*(-1) is -x */
1378 if (GET_CODE (trueop1) == CONST_DOUBLE
1379 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1380 && GET_MODE (op0) == mode)
1383 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1385 if (REAL_VALUES_EQUAL (d, dconst2))
1386 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1388 if (REAL_VALUES_EQUAL (d, dconstm1))
1389 return simplify_gen_unary (NEG, mode, op0, mode);
1394 if (trueop1 == const0_rtx)
1396 if (GET_CODE (trueop1) == CONST_INT
1397 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1398 == GET_MODE_MASK (mode)))
1400 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1402 /* A | (~A) -> -1 */
1403 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1404 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1405 && ! side_effects_p (op0)
1406 && GET_MODE_CLASS (mode) != MODE_CC)
1411 if (trueop1 == const0_rtx)
1413 if (GET_CODE (trueop1) == CONST_INT
1414 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1415 == GET_MODE_MASK (mode)))
1416 return simplify_gen_unary (NOT, mode, op0, mode);
1417 if (trueop0 == trueop1 && ! side_effects_p (op0)
1418 && GET_MODE_CLASS (mode) != MODE_CC)
1423 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1425 if (GET_CODE (trueop1) == CONST_INT
1426 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1427 == GET_MODE_MASK (mode)))
1429 if (trueop0 == trueop1 && ! side_effects_p (op0)
1430 && GET_MODE_CLASS (mode) != MODE_CC)
1433 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1434 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1435 && ! side_effects_p (op0)
1436 && GET_MODE_CLASS (mode) != MODE_CC)
1441 /* Convert divide by power of two into shift (divide by 1 handled
1443 if (GET_CODE (trueop1) == CONST_INT
1444 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1445 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1447 /* ... fall through ... */
1450 if (trueop1 == CONST1_RTX (mode))
1452 /* On some platforms DIV uses narrower mode than its
1454 rtx x = gen_lowpart_common (mode, op0);
1457 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1458 return gen_lowpart_SUBREG (mode, op0);
1463 /* Maybe change 0 / x to 0. This transformation isn't safe for
1464 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1465 Nor is it safe for modes with signed zeros, since dividing
1466 0 by a negative number gives -0, not 0. */
1467 if (!HONOR_NANS (mode)
1468 && !HONOR_SIGNED_ZEROS (mode)
1469 && trueop0 == CONST0_RTX (mode)
1470 && ! side_effects_p (op1))
1473 /* Change division by a constant into multiplication. Only do
1474 this with -funsafe-math-optimizations. */
1475 else if (GET_CODE (trueop1) == CONST_DOUBLE
1476 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1477 && trueop1 != CONST0_RTX (mode)
1478 && flag_unsafe_math_optimizations)
1481 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1483 if (! REAL_VALUES_EQUAL (d, dconst0))
1485 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1486 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1487 return simplify_gen_binary (MULT, mode, op0, tem);
1493 /* Handle modulus by power of two (mod with 1 handled below). */
1494 if (GET_CODE (trueop1) == CONST_INT
1495 && exact_log2 (INTVAL (trueop1)) > 0)
1496 return simplify_gen_binary (AND, mode, op0,
1497 GEN_INT (INTVAL (op1) - 1));
1499 /* ... fall through ... */
1502 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1503 && ! side_effects_p (op0) && ! side_effects_p (op1))
1510 /* Rotating ~0 always results in ~0. */
1511 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1512 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1513 && ! side_effects_p (op1))
1516 /* ... fall through ... */
1520 if (trueop1 == const0_rtx)
1522 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1527 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1528 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1529 && ! side_effects_p (op0))
1531 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1536 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1537 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1538 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1539 && ! side_effects_p (op0))
1541 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1546 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1548 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1553 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1555 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1563 /* ??? There are simplifications that can be done. */
1567 if (!VECTOR_MODE_P (mode))
1569 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1571 != GET_MODE_INNER (GET_MODE (trueop0)))
1572 || GET_CODE (trueop1) != PARALLEL
1573 || XVECLEN (trueop1, 0) != 1
1574 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1577 if (GET_CODE (trueop0) == CONST_VECTOR)
1578 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1582 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1583 || (GET_MODE_INNER (mode)
1584 != GET_MODE_INNER (GET_MODE (trueop0)))
1585 || GET_CODE (trueop1) != PARALLEL)
1588 if (GET_CODE (trueop0) == CONST_VECTOR)
1590 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1591 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1592 rtvec v = rtvec_alloc (n_elts);
1595 if (XVECLEN (trueop1, 0) != (int) n_elts)
1597 for (i = 0; i < n_elts; i++)
1599 rtx x = XVECEXP (trueop1, 0, i);
1601 if (GET_CODE (x) != CONST_INT)
1603 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1606 return gen_rtx_CONST_VECTOR (mode, v);
1612 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1613 ? GET_MODE (trueop0)
1614 : GET_MODE_INNER (mode));
1615 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1616 ? GET_MODE (trueop1)
1617 : GET_MODE_INNER (mode));
1619 if (!VECTOR_MODE_P (mode)
1620 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
1621 != GET_MODE_SIZE (mode)))
1624 if ((VECTOR_MODE_P (op0_mode)
1625 && (GET_MODE_INNER (mode)
1626 != GET_MODE_INNER (op0_mode)))
1627 || (!VECTOR_MODE_P (op0_mode)
1628 && GET_MODE_INNER (mode) != op0_mode))
1631 if ((VECTOR_MODE_P (op1_mode)
1632 && (GET_MODE_INNER (mode)
1633 != GET_MODE_INNER (op1_mode)))
1634 || (!VECTOR_MODE_P (op1_mode)
1635 && GET_MODE_INNER (mode) != op1_mode))
1638 if ((GET_CODE (trueop0) == CONST_VECTOR
1639 || GET_CODE (trueop0) == CONST_INT
1640 || GET_CODE (trueop0) == CONST_DOUBLE)
1641 && (GET_CODE (trueop1) == CONST_VECTOR
1642 || GET_CODE (trueop1) == CONST_INT
1643 || GET_CODE (trueop1) == CONST_DOUBLE))
1645 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1646 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1647 rtvec v = rtvec_alloc (n_elts);
1649 unsigned in_n_elts = 1;
1651 if (VECTOR_MODE_P (op0_mode))
1652 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
1653 for (i = 0; i < n_elts; i++)
1657 if (!VECTOR_MODE_P (op0_mode))
1658 RTVEC_ELT (v, i) = trueop0;
1660 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
1664 if (!VECTOR_MODE_P (op1_mode))
1665 RTVEC_ELT (v, i) = trueop1;
1667 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
1672 return gen_rtx_CONST_VECTOR (mode, v);
1684 /* Get the integer argument values in two forms:
1685 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1687 arg0 = INTVAL (trueop0);
1688 arg1 = INTVAL (trueop1);
1690 if (width < HOST_BITS_PER_WIDE_INT)
1692 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1693 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1696 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1697 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1700 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1701 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1709 /* Compute the value of the arithmetic. */
1714 val = arg0s + arg1s;
1718 val = arg0s - arg1s;
1722 val = arg0s * arg1s;
1727 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1730 val = arg0s / arg1s;
1735 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1738 val = arg0s % arg1s;
1743 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1746 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1751 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1754 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1770 /* If shift count is undefined, don't fold it; let the machine do
1771 what it wants. But truncate it if the machine will do that. */
1775 #ifdef SHIFT_COUNT_TRUNCATED
1776 if (SHIFT_COUNT_TRUNCATED)
1780 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1787 #ifdef SHIFT_COUNT_TRUNCATED
1788 if (SHIFT_COUNT_TRUNCATED)
1792 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1799 #ifdef SHIFT_COUNT_TRUNCATED
1800 if (SHIFT_COUNT_TRUNCATED)
1804 val = arg0s >> arg1;
1806 /* Bootstrap compiler may not have sign extended the right shift.
1807 Manually extend the sign to insure bootstrap cc matches gcc. */
1808 if (arg0s < 0 && arg1 > 0)
1809 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1818 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1819 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1827 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1828 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1832 /* Do nothing here. */
1836 val = arg0s <= arg1s ? arg0s : arg1s;
1840 val = ((unsigned HOST_WIDE_INT) arg0
1841 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1845 val = arg0s > arg1s ? arg0s : arg1s;
1849 val = ((unsigned HOST_WIDE_INT) arg0
1850 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1857 /* ??? There are simplifications that can be done. */
1864 val = trunc_int_for_mode (val, mode);
1866 return GEN_INT (val);
1869 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1872 Rather than test for specific case, we do this by a brute-force method
1873 and do all possible simplifications until no more changes occur. Then
1874 we rebuild the operation.
1876 If FORCE is true, then always generate the rtx. This is used to
1877 canonicalize stuff emitted from simplify_gen_binary. Note that this
1878 can still fail if the rtx is too complex. It won't fail just because
1879 the result is not 'simpler' than the input, however. */
1881 struct simplify_plus_minus_op_data
1888 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
1890 const struct simplify_plus_minus_op_data *d1 = p1;
1891 const struct simplify_plus_minus_op_data *d2 = p2;
1893 return (commutative_operand_precedence (d2->op)
1894 - commutative_operand_precedence (d1->op));
1898 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
1901 struct simplify_plus_minus_op_data ops[8];
1903 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1904 int first, negate, changed;
1907 memset (ops, 0, sizeof ops);
1909 /* Set up the two operands and then expand them until nothing has been
1910 changed. If we run out of room in our array, give up; this should
1911 almost never happen. */
1916 ops[1].neg = (code == MINUS);
1922 for (i = 0; i < n_ops; i++)
1924 rtx this_op = ops[i].op;
1925 int this_neg = ops[i].neg;
1926 enum rtx_code this_code = GET_CODE (this_op);
1935 ops[n_ops].op = XEXP (this_op, 1);
1936 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1939 ops[i].op = XEXP (this_op, 0);
1945 ops[i].op = XEXP (this_op, 0);
1946 ops[i].neg = ! this_neg;
1952 && GET_CODE (XEXP (this_op, 0)) == PLUS
1953 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1954 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1956 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1957 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1958 ops[n_ops].neg = this_neg;
1966 /* ~a -> (-a - 1) */
1969 ops[n_ops].op = constm1_rtx;
1970 ops[n_ops++].neg = this_neg;
1971 ops[i].op = XEXP (this_op, 0);
1972 ops[i].neg = !this_neg;
1980 ops[i].op = neg_const_int (mode, this_op);
1993 /* If we only have two operands, we can't do anything. */
1994 if (n_ops <= 2 && !force)
1997 /* Count the number of CONSTs we didn't split above. */
1998 for (i = 0; i < n_ops; i++)
1999 if (GET_CODE (ops[i].op) == CONST)
2002 /* Now simplify each pair of operands until nothing changes. The first
2003 time through just simplify constants against each other. */
2010 for (i = 0; i < n_ops - 1; i++)
2011 for (j = i + 1; j < n_ops; j++)
2013 rtx lhs = ops[i].op, rhs = ops[j].op;
2014 int lneg = ops[i].neg, rneg = ops[j].neg;
2016 if (lhs != 0 && rhs != 0
2017 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2019 enum rtx_code ncode = PLUS;
2025 tem = lhs, lhs = rhs, rhs = tem;
2027 else if (swap_commutative_operands_p (lhs, rhs))
2028 tem = lhs, lhs = rhs, rhs = tem;
2030 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2032 /* Reject "simplifications" that just wrap the two
2033 arguments in a CONST. Failure to do so can result
2034 in infinite recursion with simplify_binary_operation
2035 when it calls us to simplify CONST operations. */
2037 && ! (GET_CODE (tem) == CONST
2038 && GET_CODE (XEXP (tem, 0)) == ncode
2039 && XEXP (XEXP (tem, 0), 0) == lhs
2040 && XEXP (XEXP (tem, 0), 1) == rhs)
2041 /* Don't allow -x + -1 -> ~x simplifications in the
2042 first pass. This allows us the chance to combine
2043 the -1 with other constants. */
2045 && GET_CODE (tem) == NOT
2046 && XEXP (tem, 0) == rhs))
2049 if (GET_CODE (tem) == NEG)
2050 tem = XEXP (tem, 0), lneg = !lneg;
2051 if (GET_CODE (tem) == CONST_INT && lneg)
2052 tem = neg_const_int (mode, tem), lneg = 0;
2056 ops[j].op = NULL_RTX;
2066 /* Pack all the operands to the lower-numbered entries. */
2067 for (i = 0, j = 0; j < n_ops; j++)
2072 /* Sort the operations based on swap_commutative_operands_p. */
2073 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2075 /* We suppressed creation of trivial CONST expressions in the
2076 combination loop to avoid recursion. Create one manually now.
2077 The combination loop should have ensured that there is exactly
2078 one CONST_INT, and the sort will have ensured that it is last
2079 in the array and that any other constant will be next-to-last. */
2082 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2083 && CONSTANT_P (ops[n_ops - 2].op))
2085 rtx value = ops[n_ops - 1].op;
2086 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2087 value = neg_const_int (mode, value);
2088 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2092 /* Count the number of CONSTs that we generated. */
2094 for (i = 0; i < n_ops; i++)
2095 if (GET_CODE (ops[i].op) == CONST)
2098 /* Give up if we didn't reduce the number of operands we had. Make
2099 sure we count a CONST as two operands. If we have the same
2100 number of operands, but have made more CONSTs than before, this
2101 is also an improvement, so accept it. */
2103 && (n_ops + n_consts > input_ops
2104 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2107 /* Put a non-negated operand first. If there aren't any, make all
2108 operands positive and negate the whole thing later. */
2111 for (i = 0; i < n_ops && ops[i].neg; i++)
2115 for (i = 0; i < n_ops; i++)
2127 /* Now make the result by performing the requested operations. */
2129 for (i = 1; i < n_ops; i++)
2130 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2131 mode, result, ops[i].op);
2133 return negate ? gen_rtx_NEG (mode, result) : result;
2136 /* Like simplify_binary_operation except used for relational operators.
2137 MODE is the mode of the operands, not that of the result. If MODE
2138 is VOIDmode, both operands must also be VOIDmode and we compare the
2139 operands in "infinite precision".
2141 If no simplification is possible, this function returns zero. Otherwise,
2142 it returns either const_true_rtx or const0_rtx. */
2145 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2148 int equal, op0lt, op0ltu, op1lt, op1ltu;
2153 if (mode == VOIDmode
2154 && (GET_MODE (op0) != VOIDmode
2155 || GET_MODE (op1) != VOIDmode))
2158 /* If op0 is a compare, extract the comparison arguments from it. */
2159 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2160 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2162 trueop0 = avoid_constant_pool_reference (op0);
2163 trueop1 = avoid_constant_pool_reference (op1);
2165 /* We can't simplify MODE_CC values since we don't know what the
2166 actual comparison is. */
2167 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2170 /* Make sure the constant is second. */
2171 if (swap_commutative_operands_p (trueop0, trueop1))
2173 tem = op0, op0 = op1, op1 = tem;
2174 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2175 code = swap_condition (code);
2178 /* For integer comparisons of A and B maybe we can simplify A - B and can
2179 then simplify a comparison of that with zero. If A and B are both either
2180 a register or a CONST_INT, this can't help; testing for these cases will
2181 prevent infinite recursion here and speed things up.
2183 If CODE is an unsigned comparison, then we can never do this optimization,
2184 because it gives an incorrect result if the subtraction wraps around zero.
2185 ANSI C defines unsigned operations such that they never overflow, and
2186 thus such cases can not be ignored. */
2188 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2189 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2190 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2191 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2192 && code != GTU && code != GEU && code != LTU && code != LEU)
2193 return simplify_relational_operation (signed_condition (code),
2194 mode, tem, const0_rtx);
2196 if (flag_unsafe_math_optimizations && code == ORDERED)
2197 return const_true_rtx;
2199 if (flag_unsafe_math_optimizations && code == UNORDERED)
2202 /* For modes without NaNs, if the two operands are equal, we know the
2203 result except if they have side-effects. */
2204 if (! HONOR_NANS (GET_MODE (trueop0))
2205 && rtx_equal_p (trueop0, trueop1)
2206 && ! side_effects_p (trueop0))
2207 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2209 /* If the operands are floating-point constants, see if we can fold
2211 else if (GET_CODE (trueop0) == CONST_DOUBLE
2212 && GET_CODE (trueop1) == CONST_DOUBLE
2213 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2215 REAL_VALUE_TYPE d0, d1;
2217 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2218 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2220 /* Comparisons are unordered iff at least one of the values is NaN. */
2221 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2231 return const_true_rtx;
2244 equal = REAL_VALUES_EQUAL (d0, d1);
2245 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2246 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2249 /* Otherwise, see if the operands are both integers. */
2250 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2251 && (GET_CODE (trueop0) == CONST_DOUBLE
2252 || GET_CODE (trueop0) == CONST_INT)
2253 && (GET_CODE (trueop1) == CONST_DOUBLE
2254 || GET_CODE (trueop1) == CONST_INT))
2256 int width = GET_MODE_BITSIZE (mode);
2257 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2258 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2260 /* Get the two words comprising each integer constant. */
2261 if (GET_CODE (trueop0) == CONST_DOUBLE)
2263 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2264 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2268 l0u = l0s = INTVAL (trueop0);
2269 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2272 if (GET_CODE (trueop1) == CONST_DOUBLE)
2274 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2275 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2279 l1u = l1s = INTVAL (trueop1);
2280 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2283 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2284 we have to sign or zero-extend the values. */
2285 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2287 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2288 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2290 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2291 l0s |= ((HOST_WIDE_INT) (-1) << width);
2293 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2294 l1s |= ((HOST_WIDE_INT) (-1) << width);
2296 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2297 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2299 equal = (h0u == h1u && l0u == l1u);
2300 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2301 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2302 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2303 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2306 /* Otherwise, there are some code-specific tests we can make. */
2312 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2317 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2318 return const_true_rtx;
2322 /* Unsigned values are never negative. */
2323 if (trueop1 == const0_rtx)
2324 return const_true_rtx;
2328 if (trueop1 == const0_rtx)
2333 /* Unsigned values are never greater than the largest
2335 if (GET_CODE (trueop1) == CONST_INT
2336 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2337 && INTEGRAL_MODE_P (mode))
2338 return const_true_rtx;
2342 if (GET_CODE (trueop1) == CONST_INT
2343 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2344 && INTEGRAL_MODE_P (mode))
2349 /* Optimize abs(x) < 0.0. */
2350 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2352 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2354 if (GET_CODE (tem) == ABS)
2360 /* Optimize abs(x) >= 0.0. */
2361 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2363 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2365 if (GET_CODE (tem) == ABS)
2366 return const_true_rtx;
2371 /* Optimize ! (abs(x) < 0.0). */
2372 if (trueop1 == CONST0_RTX (mode))
2374 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2376 if (GET_CODE (tem) == ABS)
2377 return const_true_rtx;
2388 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2394 return equal ? const_true_rtx : const0_rtx;
2397 return ! equal ? const_true_rtx : const0_rtx;
2400 return op0lt ? const_true_rtx : const0_rtx;
2403 return op1lt ? const_true_rtx : const0_rtx;
2405 return op0ltu ? const_true_rtx : const0_rtx;
2407 return op1ltu ? const_true_rtx : const0_rtx;
2410 return equal || op0lt ? const_true_rtx : const0_rtx;
2413 return equal || op1lt ? const_true_rtx : const0_rtx;
2415 return equal || op0ltu ? const_true_rtx : const0_rtx;
2417 return equal || op1ltu ? const_true_rtx : const0_rtx;
2419 return const_true_rtx;
2427 /* Simplify CODE, an operation with result mode MODE and three operands,
2428 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2429 a constant. Return 0 if no simplifications is possible. */
2432 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2433 enum machine_mode op0_mode, rtx op0, rtx op1,
2436 unsigned int width = GET_MODE_BITSIZE (mode);
2438 /* VOIDmode means "infinite" precision. */
2440 width = HOST_BITS_PER_WIDE_INT;
2446 if (GET_CODE (op0) == CONST_INT
2447 && GET_CODE (op1) == CONST_INT
2448 && GET_CODE (op2) == CONST_INT
2449 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2450 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2452 /* Extracting a bit-field from a constant */
2453 HOST_WIDE_INT val = INTVAL (op0);
2455 if (BITS_BIG_ENDIAN)
2456 val >>= (GET_MODE_BITSIZE (op0_mode)
2457 - INTVAL (op2) - INTVAL (op1));
2459 val >>= INTVAL (op2);
2461 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2463 /* First zero-extend. */
2464 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2465 /* If desired, propagate sign bit. */
2466 if (code == SIGN_EXTRACT
2467 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2468 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2471 /* Clear the bits that don't belong in our mode,
2472 unless they and our sign bit are all one.
2473 So we get either a reasonable negative value or a reasonable
2474 unsigned value for this mode. */
2475 if (width < HOST_BITS_PER_WIDE_INT
2476 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2477 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2478 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2480 return GEN_INT (val);
2485 if (GET_CODE (op0) == CONST_INT)
2486 return op0 != const0_rtx ? op1 : op2;
2488 /* Convert a == b ? b : a to "a". */
2489 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2490 && !HONOR_NANS (mode)
2491 && rtx_equal_p (XEXP (op0, 0), op1)
2492 && rtx_equal_p (XEXP (op0, 1), op2))
2494 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2495 && !HONOR_NANS (mode)
2496 && rtx_equal_p (XEXP (op0, 1), op1)
2497 && rtx_equal_p (XEXP (op0, 0), op2))
2499 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2501 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2502 ? GET_MODE (XEXP (op0, 1))
2503 : GET_MODE (XEXP (op0, 0)));
2505 if (cmp_mode == VOIDmode)
2506 cmp_mode = op0_mode;
2507 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2508 XEXP (op0, 0), XEXP (op0, 1));
2510 /* See if any simplifications were possible. */
2511 if (temp == const0_rtx)
2513 else if (temp == const1_rtx)
2518 /* Look for happy constants in op1 and op2. */
2519 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2521 HOST_WIDE_INT t = INTVAL (op1);
2522 HOST_WIDE_INT f = INTVAL (op2);
2524 if (t == STORE_FLAG_VALUE && f == 0)
2525 code = GET_CODE (op0);
2526 else if (t == 0 && f == STORE_FLAG_VALUE)
2529 tmp = reversed_comparison_code (op0, NULL_RTX);
2537 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2542 if (GET_MODE (op0) != mode
2543 || GET_MODE (op1) != mode
2544 || !VECTOR_MODE_P (mode))
2546 op2 = avoid_constant_pool_reference (op2);
2547 if (GET_CODE (op2) == CONST_INT)
2549 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2550 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2551 int mask = (1 << n_elts) - 1;
2553 if (!(INTVAL (op2) & mask))
2555 if ((INTVAL (op2) & mask) == mask)
2558 op0 = avoid_constant_pool_reference (op0);
2559 op1 = avoid_constant_pool_reference (op1);
2560 if (GET_CODE (op0) == CONST_VECTOR
2561 && GET_CODE (op1) == CONST_VECTOR)
2563 rtvec v = rtvec_alloc (n_elts);
2566 for (i = 0; i < n_elts; i++)
2567 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2568 ? CONST_VECTOR_ELT (op0, i)
2569 : CONST_VECTOR_ELT (op1, i));
2570 return gen_rtx_CONST_VECTOR (mode, v);
2582 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2583 Return 0 if no simplifications is possible. */
2585 simplify_subreg (enum machine_mode outermode, rtx op,
2586 enum machine_mode innermode, unsigned int byte)
2588 /* Little bit of sanity checking. */
2589 if (innermode == VOIDmode || outermode == VOIDmode
2590 || innermode == BLKmode || outermode == BLKmode)
2593 if (GET_MODE (op) != innermode
2594 && GET_MODE (op) != VOIDmode)
2597 if (byte % GET_MODE_SIZE (outermode)
2598 || byte >= GET_MODE_SIZE (innermode))
2601 if (outermode == innermode && !byte)
2604 /* Simplify subregs of vector constants. */
2605 if (GET_CODE (op) == CONST_VECTOR)
2607 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2608 const unsigned int offset = byte / elt_size;
2611 if (GET_MODE_INNER (innermode) == outermode)
2613 elt = CONST_VECTOR_ELT (op, offset);
2615 /* ?? We probably don't need this copy_rtx because constants
2616 can be shared. ?? */
2618 return copy_rtx (elt);
2620 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2621 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2623 return (gen_rtx_CONST_VECTOR
2625 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2626 &CONST_VECTOR_ELT (op, offset))));
2628 else if (GET_MODE_CLASS (outermode) == MODE_INT
2629 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2631 /* This happens when the target register size is smaller then
2632 the vector mode, and we synthesize operations with vectors
2633 of elements that are smaller than the register size. */
2634 HOST_WIDE_INT sum = 0, high = 0;
2635 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2636 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2637 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2638 int shift = BITS_PER_UNIT * elt_size;
2639 unsigned HOST_WIDE_INT unit_mask;
2641 unit_mask = (unsigned HOST_WIDE_INT) -1
2642 >> (sizeof (HOST_WIDE_INT) * BITS_PER_UNIT - shift);
2644 for (; n_elts--; i += step)
2646 elt = CONST_VECTOR_ELT (op, i);
2647 if (GET_CODE (elt) == CONST_DOUBLE
2648 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2650 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2655 if (GET_CODE (elt) != CONST_INT)
2657 /* Avoid overflow. */
2658 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2660 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2661 sum = (sum << shift) + (INTVAL (elt) & unit_mask);
2663 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2664 return GEN_INT (trunc_int_for_mode (sum, outermode));
2665 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2666 return immed_double_const (sum, high, outermode);
2670 else if (GET_MODE_CLASS (outermode) == MODE_INT
2671 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2673 enum machine_mode new_mode
2674 = int_mode_for_mode (GET_MODE_INNER (innermode));
2675 int subbyte = byte % elt_size;
2677 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2680 return simplify_subreg (outermode, op, new_mode, subbyte);
2682 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2683 /* This shouldn't happen, but let's not do anything stupid. */
2687 /* Attempt to simplify constant to non-SUBREG expression. */
2688 if (CONSTANT_P (op))
2691 unsigned HOST_WIDE_INT val = 0;
2693 if (VECTOR_MODE_P (outermode))
2695 /* Construct a CONST_VECTOR from individual subregs. */
2696 enum machine_mode submode = GET_MODE_INNER (outermode);
2697 int subsize = GET_MODE_UNIT_SIZE (outermode);
2698 int i, elts = GET_MODE_NUNITS (outermode);
2699 rtvec v = rtvec_alloc (elts);
2702 for (i = 0; i < elts; i++, byte += subsize)
2704 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2705 /* ??? It would be nice if we could actually make such subregs
2706 on targets that allow such relocations. */
2707 if (byte >= GET_MODE_SIZE (innermode))
2708 elt = CONST0_RTX (submode);
2710 elt = simplify_subreg (submode, op, innermode, byte);
2713 RTVEC_ELT (v, i) = elt;
2715 return gen_rtx_CONST_VECTOR (outermode, v);
2718 /* ??? This code is partly redundant with code below, but can handle
2719 the subregs of floats and similar corner cases.
2720 Later it we should move all simplification code here and rewrite
2721 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2722 using SIMPLIFY_SUBREG. */
2723 if (subreg_lowpart_offset (outermode, innermode) == byte
2724 && GET_CODE (op) != CONST_VECTOR)
2726 rtx new = gen_lowpart_if_possible (outermode, op);
2731 /* Similar comment as above apply here. */
2732 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2733 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2734 && GET_MODE_CLASS (outermode) == MODE_INT)
2736 rtx new = constant_subword (op,
2737 (byte / UNITS_PER_WORD),
2743 if (GET_MODE_CLASS (outermode) != MODE_INT
2744 && GET_MODE_CLASS (outermode) != MODE_CC)
2746 enum machine_mode new_mode = int_mode_for_mode (outermode);
2748 if (new_mode != innermode || byte != 0)
2750 op = simplify_subreg (new_mode, op, innermode, byte);
2753 return simplify_subreg (outermode, op, new_mode, 0);
2757 offset = byte * BITS_PER_UNIT;
2758 switch (GET_CODE (op))
2761 if (GET_MODE (op) != VOIDmode)
2764 /* We can't handle this case yet. */
2765 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2768 part = offset >= HOST_BITS_PER_WIDE_INT;
2769 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2770 && BYTES_BIG_ENDIAN)
2771 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2772 && WORDS_BIG_ENDIAN))
2774 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2775 offset %= HOST_BITS_PER_WIDE_INT;
2777 /* We've already picked the word we want from a double, so
2778 pretend this is actually an integer. */
2779 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2783 if (GET_CODE (op) == CONST_INT)
2786 /* We don't handle synthesizing of non-integral constants yet. */
2787 if (GET_MODE_CLASS (outermode) != MODE_INT)
2790 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2792 if (WORDS_BIG_ENDIAN)
2793 offset = (GET_MODE_BITSIZE (innermode)
2794 - GET_MODE_BITSIZE (outermode) - offset);
2795 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2796 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2797 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2798 - 2 * (offset % BITS_PER_WORD));
2801 if (offset >= HOST_BITS_PER_WIDE_INT)
2802 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2806 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2807 val = trunc_int_for_mode (val, outermode);
2808 return GEN_INT (val);
2815 /* Changing mode twice with SUBREG => just change it once,
2816 or not at all if changing back op starting mode. */
2817 if (GET_CODE (op) == SUBREG)
2819 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2820 int final_offset = byte + SUBREG_BYTE (op);
2823 if (outermode == innermostmode
2824 && byte == 0 && SUBREG_BYTE (op) == 0)
2825 return SUBREG_REG (op);
2827 /* The SUBREG_BYTE represents offset, as if the value were stored
2828 in memory. Irritating exception is paradoxical subreg, where
2829 we define SUBREG_BYTE to be 0. On big endian machines, this
2830 value should be negative. For a moment, undo this exception. */
2831 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2833 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2834 if (WORDS_BIG_ENDIAN)
2835 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2836 if (BYTES_BIG_ENDIAN)
2837 final_offset += difference % UNITS_PER_WORD;
2839 if (SUBREG_BYTE (op) == 0
2840 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2842 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2843 if (WORDS_BIG_ENDIAN)
2844 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2845 if (BYTES_BIG_ENDIAN)
2846 final_offset += difference % UNITS_PER_WORD;
2849 /* See whether resulting subreg will be paradoxical. */
2850 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2852 /* In nonparadoxical subregs we can't handle negative offsets. */
2853 if (final_offset < 0)
2855 /* Bail out in case resulting subreg would be incorrect. */
2856 if (final_offset % GET_MODE_SIZE (outermode)
2857 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2863 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2865 /* In paradoxical subreg, see if we are still looking on lower part.
2866 If so, our SUBREG_BYTE will be 0. */
2867 if (WORDS_BIG_ENDIAN)
2868 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2869 if (BYTES_BIG_ENDIAN)
2870 offset += difference % UNITS_PER_WORD;
2871 if (offset == final_offset)
2877 /* Recurse for further possible simplifications. */
2878 new = simplify_subreg (outermode, SUBREG_REG (op),
2879 GET_MODE (SUBREG_REG (op)),
2883 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2886 /* SUBREG of a hard register => just change the register number
2887 and/or mode. If the hard register is not valid in that mode,
2888 suppress this simplification. If the hard register is the stack,
2889 frame, or argument pointer, leave this as a SUBREG. */
2892 && (! REG_FUNCTION_VALUE_P (op)
2893 || ! rtx_equal_function_value_matters)
2894 && REGNO (op) < FIRST_PSEUDO_REGISTER
2895 #ifdef CANNOT_CHANGE_MODE_CLASS
2896 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
2897 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2898 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2900 && ((reload_completed && !frame_pointer_needed)
2901 || (REGNO (op) != FRAME_POINTER_REGNUM
2902 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2903 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2906 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2907 && REGNO (op) != ARG_POINTER_REGNUM
2909 && REGNO (op) != STACK_POINTER_REGNUM)
2911 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2914 /* ??? We do allow it if the current REG is not valid for
2915 its mode. This is a kludge to work around how float/complex
2916 arguments are passed on 32-bit SPARC and should be fixed. */
2917 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2918 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2920 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
2922 /* Propagate original regno. We don't have any way to specify
2923 the offset inside original regno, so do so only for lowpart.
2924 The information is used only by alias analysis that can not
2925 grog partial register anyway. */
2927 if (subreg_lowpart_offset (outermode, innermode) == byte)
2928 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2933 /* If we have a SUBREG of a register that we are replacing and we are
2934 replacing it with a MEM, make a new MEM and try replacing the
2935 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2936 or if we would be widening it. */
2938 if (GET_CODE (op) == MEM
2939 && ! mode_dependent_address_p (XEXP (op, 0))
2940 /* Allow splitting of volatile memory references in case we don't
2941 have instruction to move the whole thing. */
2942 && (! MEM_VOLATILE_P (op)
2943 || ! have_insn_for (SET, innermode))
2944 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2945 return adjust_address_nv (op, outermode, byte);
2947 /* Handle complex values represented as CONCAT
2948 of real and imaginary part. */
2949 if (GET_CODE (op) == CONCAT)
2951 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2952 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2953 unsigned int final_offset;
2956 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2957 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2960 /* We can at least simplify it by referring directly to the relevant part. */
2961 return gen_rtx_SUBREG (outermode, part, final_offset);
2966 /* Make a SUBREG operation or equivalent if it folds. */
2969 simplify_gen_subreg (enum machine_mode outermode, rtx op,
2970 enum machine_mode innermode, unsigned int byte)
2973 /* Little bit of sanity checking. */
2974 if (innermode == VOIDmode || outermode == VOIDmode
2975 || innermode == BLKmode || outermode == BLKmode)
2978 if (GET_MODE (op) != innermode
2979 && GET_MODE (op) != VOIDmode)
2982 if (byte % GET_MODE_SIZE (outermode)
2983 || byte >= GET_MODE_SIZE (innermode))
2986 if (GET_CODE (op) == QUEUED)
2989 new = simplify_subreg (outermode, op, innermode, byte);
2993 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2996 return gen_rtx_SUBREG (outermode, op, byte);
2998 /* Simplify X, an rtx expression.
3000 Return the simplified expression or NULL if no simplifications
3003 This is the preferred entry point into the simplification routines;
3004 however, we still allow passes to call the more specific routines.
3006 Right now GCC has three (yes, three) major bodies of RTL simplification
3007 code that need to be unified.
3009 1. fold_rtx in cse.c. This code uses various CSE specific
3010 information to aid in RTL simplification.
3012 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3013 it uses combine specific information to aid in RTL
3016 3. The routines in this file.
3019 Long term we want to only have one body of simplification code; to
3020 get to that state I recommend the following steps:
3022 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3023 which are not pass dependent state into these routines.
3025 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3026 use this routine whenever possible.
3028 3. Allow for pass dependent state to be provided to these
3029 routines and add simplifications based on the pass dependent
3030 state. Remove code from cse.c & combine.c that becomes
3033 It will take time, but ultimately the compiler will be easier to
3034 maintain and improve. It's totally silly that when we add a
3035 simplification that it needs to be added to 4 places (3 for RTL
3036 simplification and 1 for tree simplification. */
3039 simplify_rtx (rtx x)
3041 enum rtx_code code = GET_CODE (x);
3042 enum machine_mode mode = GET_MODE (x);
3045 switch (GET_RTX_CLASS (code))
3048 return simplify_unary_operation (code, mode,
3049 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3051 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3052 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3054 /* ... fall through ... */
3057 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3061 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3062 XEXP (x, 0), XEXP (x, 1),
3066 temp = simplify_relational_operation (code,
3067 ((GET_MODE (XEXP (x, 0))
3069 ? GET_MODE (XEXP (x, 0))
3070 : GET_MODE (XEXP (x, 1))),
3071 XEXP (x, 0), XEXP (x, 1));
3072 #ifdef FLOAT_STORE_FLAG_VALUE
3073 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3075 if (temp == const0_rtx)
3076 temp = CONST0_RTX (mode);
3078 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3086 return simplify_gen_subreg (mode, SUBREG_REG (x),
3087 GET_MODE (SUBREG_REG (x)),
3089 if (code == CONSTANT_P_RTX)
3091 if (CONSTANT_P (XEXP (x, 0)))
3099 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3100 if (GET_CODE (XEXP (x, 0)) == HIGH
3101 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))