1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
53 static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
55 static rtx simplify_plus_minus PARAMS ((enum rtx_code,
56 enum machine_mode, rtx,
59 /* Negate a CONST_INT rtx, truncating (because a conversion from a
60 maximally negative number can overflow). */
62 neg_const_int (mode, i)
63 enum machine_mode mode;
66 return gen_int_mode (- INTVAL (i), mode);
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
74 simplify_gen_binary (code, mode, op0, op1)
76 enum machine_mode mode;
81 /* Put complex operands first and constants second if commutative. */
82 if (GET_RTX_CLASS (code) == 'c'
83 && swap_commutative_operands_p (op0, op1))
84 tem = op0, op0 = op1, op1 = tem;
86 /* If this simplifies, do it. */
87 tem = simplify_binary_operation (code, mode, op0, op1);
91 /* Handle addition and subtraction specially. Otherwise, just form
94 if (code == PLUS || code == MINUS)
96 tem = simplify_plus_minus (code, mode, op0, op1, 1);
101 return gen_rtx_fmt_ee (code, mode, op0, op1);
104 /* If X is a MEM referencing the constant pool, return the real value.
105 Otherwise return X. */
107 avoid_constant_pool_reference (x)
111 enum machine_mode cmode;
113 switch (GET_CODE (x))
119 /* Handle float extensions of constant pool references. */
121 c = avoid_constant_pool_reference (tmp);
122 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
126 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
127 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
137 /* Call target hook to avoid the effects of -fpic etc... */
138 addr = (*targetm.delegitimize_address) (addr);
140 if (GET_CODE (addr) == LO_SUM)
141 addr = XEXP (addr, 1);
143 if (GET_CODE (addr) != SYMBOL_REF
144 || ! CONSTANT_POOL_ADDRESS_P (addr))
147 c = get_pool_constant (addr);
148 cmode = get_pool_mode (addr);
150 /* If we're accessing the constant in a different mode than it was
151 originally stored, attempt to fix that up via subreg simplifications.
152 If that fails we have no choice but to return the original memory. */
153 if (cmode != GET_MODE (x))
155 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
162 /* Make a unary operation by first seeing if it folds and otherwise making
163 the specified operation. */
166 simplify_gen_unary (code, mode, op, op_mode)
168 enum machine_mode mode;
170 enum machine_mode op_mode;
174 /* If this simplifies, use it. */
175 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
178 return gen_rtx_fmt_e (code, mode, op);
181 /* Likewise for ternary operations. */
184 simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
186 enum machine_mode mode, op0_mode;
191 /* If this simplifies, use it. */
192 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
196 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
199 /* Likewise, for relational operations.
200 CMP_MODE specifies mode comparison is done in.
204 simplify_gen_relational (code, mode, cmp_mode, op0, op1)
206 enum machine_mode mode;
207 enum machine_mode cmp_mode;
212 if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
215 /* For the following tests, ensure const0_rtx is op1. */
216 if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
217 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
219 /* If op0 is a compare, extract the comparison arguments from it. */
220 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
221 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
223 /* If op0 is a comparison, extract the comparison arguments form it. */
224 if (code == NE && op1 == const0_rtx
225 && GET_RTX_CLASS (GET_CODE (op0)) == '<')
227 else if (code == EQ && op1 == const0_rtx)
229 /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'. */
230 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
240 /* Put complex operands first and constants second. */
241 if (swap_commutative_operands_p (op0, op1))
242 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
244 return gen_rtx_fmt_ee (code, mode, op0, op1);
247 /* Replace all occurrences of OLD in X with NEW and try to simplify the
248 resulting RTX. Return a new RTX which is as simplified as possible. */
251 simplify_replace_rtx (x, old, new)
256 enum rtx_code code = GET_CODE (x);
257 enum machine_mode mode = GET_MODE (x);
259 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
260 to build a new expression substituting recursively. If we can't do
261 anything, return our input. */
266 switch (GET_RTX_CLASS (code))
270 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
271 rtx op = (XEXP (x, 0) == old
272 ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
274 return simplify_gen_unary (code, mode, op, op_mode);
280 simplify_gen_binary (code, mode,
281 simplify_replace_rtx (XEXP (x, 0), old, new),
282 simplify_replace_rtx (XEXP (x, 1), old, new));
285 enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
286 ? GET_MODE (XEXP (x, 0))
287 : GET_MODE (XEXP (x, 1)));
288 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
289 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
292 simplify_gen_relational (code, mode,
295 : GET_MODE (op0) != VOIDmode
304 enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
305 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
308 simplify_gen_ternary (code, mode,
313 simplify_replace_rtx (XEXP (x, 1), old, new),
314 simplify_replace_rtx (XEXP (x, 2), old, new));
318 /* The only case we try to handle is a SUBREG. */
322 exp = simplify_gen_subreg (GET_MODE (x),
323 simplify_replace_rtx (SUBREG_REG (x),
325 GET_MODE (SUBREG_REG (x)),
334 return replace_equiv_address_nv (x,
335 simplify_replace_rtx (XEXP (x, 0),
337 else if (code == LO_SUM)
339 rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
340 rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
342 /* (lo_sum (high x) x) -> x */
343 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
346 return gen_rtx_LO_SUM (mode, op0, op1);
348 else if (code == REG)
350 if (REG_P (old) && REGNO (x) == REGNO (old))
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
366 simplify_unary_operation (code, mode, op, op_mode)
368 enum machine_mode mode;
370 enum machine_mode op_mode;
372 unsigned int width = GET_MODE_BITSIZE (mode);
373 rtx trueop = avoid_constant_pool_reference (op);
375 if (code == VEC_DUPLICATE)
377 if (!VECTOR_MODE_P (mode))
379 if (GET_MODE (trueop) != VOIDmode
380 && !VECTOR_MODE_P (GET_MODE (trueop))
381 && GET_MODE_INNER (mode) != GET_MODE (trueop))
383 if (GET_MODE (trueop) != VOIDmode
384 && VECTOR_MODE_P (GET_MODE (trueop))
385 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
387 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
388 || GET_CODE (trueop) == CONST_VECTOR)
390 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
391 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
392 rtvec v = rtvec_alloc (n_elts);
395 if (GET_CODE (trueop) != CONST_VECTOR)
396 for (i = 0; i < n_elts; i++)
397 RTVEC_ELT (v, i) = trueop;
400 enum machine_mode inmode = GET_MODE (trueop);
401 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
402 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
404 if (in_n_elts >= n_elts || n_elts % in_n_elts)
406 for (i = 0; i < n_elts; i++)
407 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
409 return gen_rtx_CONST_VECTOR (mode, v);
413 /* The order of these tests is critical so that, for example, we don't
414 check the wrong mode (input vs. output) for a conversion operation,
415 such as FIX. At some point, this should be simplified. */
417 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
418 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
420 HOST_WIDE_INT hv, lv;
423 if (GET_CODE (trueop) == CONST_INT)
424 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
426 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
428 REAL_VALUE_FROM_INT (d, lv, hv, mode);
429 d = real_value_truncate (mode, d);
430 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
432 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
433 && (GET_CODE (trueop) == CONST_DOUBLE
434 || GET_CODE (trueop) == CONST_INT))
436 HOST_WIDE_INT hv, lv;
439 if (GET_CODE (trueop) == CONST_INT)
440 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
442 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
444 if (op_mode == VOIDmode)
446 /* We don't know how to interpret negative-looking numbers in
447 this case, so don't try to fold those. */
451 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
454 hv = 0, lv &= GET_MODE_MASK (op_mode);
456 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
457 d = real_value_truncate (mode, d);
458 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
461 if (GET_CODE (trueop) == CONST_INT
462 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
464 HOST_WIDE_INT arg0 = INTVAL (trueop);
478 val = (arg0 >= 0 ? arg0 : - arg0);
482 /* Don't use ffs here. Instead, get low order bit and then its
483 number. If arg0 is zero, this will return 0, as desired. */
484 arg0 &= GET_MODE_MASK (mode);
485 val = exact_log2 (arg0 & (- arg0)) + 1;
489 arg0 &= GET_MODE_MASK (mode);
490 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
493 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
497 arg0 &= GET_MODE_MASK (mode);
500 /* Even if the value at zero is undefined, we have to come
501 up with some replacement. Seems good enough. */
502 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
503 val = GET_MODE_BITSIZE (mode);
506 val = exact_log2 (arg0 & -arg0);
510 arg0 &= GET_MODE_MASK (mode);
513 val++, arg0 &= arg0 - 1;
517 arg0 &= GET_MODE_MASK (mode);
520 val++, arg0 &= arg0 - 1;
529 /* When zero-extending a CONST_INT, we need to know its
531 if (op_mode == VOIDmode)
533 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
535 /* If we were really extending the mode,
536 we would have to distinguish between zero-extension
537 and sign-extension. */
538 if (width != GET_MODE_BITSIZE (op_mode))
542 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
543 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
549 if (op_mode == VOIDmode)
551 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
553 /* If we were really extending the mode,
554 we would have to distinguish between zero-extension
555 and sign-extension. */
556 if (width != GET_MODE_BITSIZE (op_mode))
560 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
563 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
565 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
566 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
583 val = trunc_int_for_mode (val, mode);
585 return GEN_INT (val);
588 /* We can do some operations on integer CONST_DOUBLEs. Also allow
589 for a DImode operation on a CONST_INT. */
590 else if (GET_MODE (trueop) == VOIDmode
591 && width <= HOST_BITS_PER_WIDE_INT * 2
592 && (GET_CODE (trueop) == CONST_DOUBLE
593 || GET_CODE (trueop) == CONST_INT))
595 unsigned HOST_WIDE_INT l1, lv;
596 HOST_WIDE_INT h1, hv;
598 if (GET_CODE (trueop) == CONST_DOUBLE)
599 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
601 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
611 neg_double (l1, h1, &lv, &hv);
616 neg_double (l1, h1, &lv, &hv);
628 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
631 lv = exact_log2 (l1 & -l1) + 1;
637 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
639 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
640 - HOST_BITS_PER_WIDE_INT;
648 lv = GET_MODE_BITSIZE (mode);
650 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
653 lv = exact_log2 (l1 & -l1);
676 /* This is just a change-of-mode, so do nothing. */
681 if (op_mode == VOIDmode)
684 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
688 lv = l1 & GET_MODE_MASK (op_mode);
692 if (op_mode == VOIDmode
693 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
697 lv = l1 & GET_MODE_MASK (op_mode);
698 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
699 && (lv & ((HOST_WIDE_INT) 1
700 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
701 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
703 hv = HWI_SIGN_EXTEND (lv);
714 return immed_double_const (lv, hv, mode);
717 else if (GET_CODE (trueop) == CONST_DOUBLE
718 && GET_MODE_CLASS (mode) == MODE_FLOAT)
720 REAL_VALUE_TYPE d, t;
721 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
726 if (HONOR_SNANS (mode) && real_isnan (&d))
728 real_sqrt (&t, mode, &d);
732 d = REAL_VALUE_ABS (d);
735 d = REAL_VALUE_NEGATE (d);
738 d = real_value_truncate (mode, d);
741 /* All this does is change the mode. */
744 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
750 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
753 else if (GET_CODE (trueop) == CONST_DOUBLE
754 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
755 && GET_MODE_CLASS (mode) == MODE_INT
756 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
760 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
763 case FIX: i = REAL_VALUE_FIX (d); break;
764 case UNSIGNED_FIX: i = REAL_VALUE_UNSIGNED_FIX (d); break;
768 return gen_int_mode (i, mode);
771 /* This was formerly used only for non-IEEE float.
772 eggert@twinsun.com says it is safe for IEEE also. */
775 enum rtx_code reversed;
776 /* There are some simplifications we can do even if the operands
781 /* (not (not X)) == X. */
782 if (GET_CODE (op) == NOT)
785 /* (not (eq X Y)) == (ne X Y), etc. */
786 if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
787 && ((reversed = reversed_comparison_code (op, NULL_RTX))
789 return gen_rtx_fmt_ee (reversed,
790 op_mode, XEXP (op, 0), XEXP (op, 1));
794 /* (neg (neg X)) == X. */
795 if (GET_CODE (op) == NEG)
800 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
801 becomes just the MINUS if its mode is MODE. This allows
802 folding switch statements on machines using casesi (such as
804 if (GET_CODE (op) == TRUNCATE
805 && GET_MODE (XEXP (op, 0)) == mode
806 && GET_CODE (XEXP (op, 0)) == MINUS
807 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
808 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
811 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
812 if (! POINTERS_EXTEND_UNSIGNED
813 && mode == Pmode && GET_MODE (op) == ptr_mode
815 || (GET_CODE (op) == SUBREG
816 && GET_CODE (SUBREG_REG (op)) == REG
817 && REG_POINTER (SUBREG_REG (op))
818 && GET_MODE (SUBREG_REG (op)) == Pmode)))
819 return convert_memory_address (Pmode, op);
823 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (POINTERS_EXTEND_UNSIGNED > 0
826 && mode == Pmode && GET_MODE (op) == ptr_mode
828 || (GET_CODE (op) == SUBREG
829 && GET_CODE (SUBREG_REG (op)) == REG
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
844 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
845 and OP1. Return 0 if no simplification is possible.
847 Don't use this for relational operations such as EQ or LT.
848 Use simplify_relational_operation instead. */
850 simplify_binary_operation (code, mode, op0, op1)
852 enum machine_mode mode;
855 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
857 unsigned int width = GET_MODE_BITSIZE (mode);
859 rtx trueop0 = avoid_constant_pool_reference (op0);
860 rtx trueop1 = avoid_constant_pool_reference (op1);
862 /* Relational operations don't work here. We must know the mode
863 of the operands in order to do the comparison correctly.
864 Assuming a full word can give incorrect results.
865 Consider comparing 128 with -128 in QImode. */
867 if (GET_RTX_CLASS (code) == '<')
870 /* Make sure the constant is second. */
871 if (GET_RTX_CLASS (code) == 'c'
872 && swap_commutative_operands_p (trueop0, trueop1))
874 tem = op0, op0 = op1, op1 = tem;
875 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
878 if (GET_MODE_CLASS (mode) == MODE_FLOAT
879 && GET_CODE (trueop0) == CONST_DOUBLE
880 && GET_CODE (trueop1) == CONST_DOUBLE
881 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
883 REAL_VALUE_TYPE f0, f1, value;
885 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
886 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
887 f0 = real_value_truncate (mode, f0);
888 f1 = real_value_truncate (mode, f1);
891 && !MODE_HAS_INFINITIES (mode)
892 && REAL_VALUES_EQUAL (f1, dconst0))
895 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
897 value = real_value_truncate (mode, value);
898 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
901 /* We can fold some multi-word operations. */
902 if (GET_MODE_CLASS (mode) == MODE_INT
903 && width == HOST_BITS_PER_WIDE_INT * 2
904 && (GET_CODE (trueop0) == CONST_DOUBLE
905 || GET_CODE (trueop0) == CONST_INT)
906 && (GET_CODE (trueop1) == CONST_DOUBLE
907 || GET_CODE (trueop1) == CONST_INT))
909 unsigned HOST_WIDE_INT l1, l2, lv;
910 HOST_WIDE_INT h1, h2, hv;
912 if (GET_CODE (trueop0) == CONST_DOUBLE)
913 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
915 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
917 if (GET_CODE (trueop1) == CONST_DOUBLE)
918 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
920 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
925 /* A - B == A + (-B). */
926 neg_double (l2, h2, &lv, &hv);
929 /* .. fall through ... */
932 add_double (l1, h1, l2, h2, &lv, &hv);
936 mul_double (l1, h1, l2, h2, &lv, &hv);
939 case DIV: case MOD: case UDIV: case UMOD:
940 /* We'd need to include tree.h to do this and it doesn't seem worth
945 lv = l1 & l2, hv = h1 & h2;
949 lv = l1 | l2, hv = h1 | h2;
953 lv = l1 ^ l2, hv = h1 ^ h2;
959 && ((unsigned HOST_WIDE_INT) l1
960 < (unsigned HOST_WIDE_INT) l2)))
969 && ((unsigned HOST_WIDE_INT) l1
970 > (unsigned HOST_WIDE_INT) l2)))
977 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
979 && ((unsigned HOST_WIDE_INT) l1
980 < (unsigned HOST_WIDE_INT) l2)))
987 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
989 && ((unsigned HOST_WIDE_INT) l1
990 > (unsigned HOST_WIDE_INT) l2)))
996 case LSHIFTRT: case ASHIFTRT:
998 case ROTATE: case ROTATERT:
999 #ifdef SHIFT_COUNT_TRUNCATED
1000 if (SHIFT_COUNT_TRUNCATED)
1001 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1004 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1007 if (code == LSHIFTRT || code == ASHIFTRT)
1008 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1010 else if (code == ASHIFT)
1011 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1012 else if (code == ROTATE)
1013 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1014 else /* code == ROTATERT */
1015 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1022 return immed_double_const (lv, hv, mode);
1025 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1026 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1028 /* Even if we can't compute a constant result,
1029 there are some cases worth simplifying. */
1034 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1035 when x is NaN, infinite, or finite and nonzero. They aren't
1036 when x is -0 and the rounding mode is not towards -infinity,
1037 since (-0) + 0 is then 0. */
1038 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1041 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1042 transformations are safe even for IEEE. */
1043 if (GET_CODE (op0) == NEG)
1044 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1045 else if (GET_CODE (op1) == NEG)
1046 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1048 /* (~a) + 1 -> -a */
1049 if (INTEGRAL_MODE_P (mode)
1050 && GET_CODE (op0) == NOT
1051 && trueop1 == const1_rtx)
1052 return gen_rtx_NEG (mode, XEXP (op0, 0));
1054 /* Handle both-operands-constant cases. We can only add
1055 CONST_INTs to constants since the sum of relocatable symbols
1056 can't be handled by most assemblers. Don't add CONST_INT
1057 to CONST_INT since overflow won't be computed properly if wider
1058 than HOST_BITS_PER_WIDE_INT. */
1060 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1061 && GET_CODE (op1) == CONST_INT)
1062 return plus_constant (op0, INTVAL (op1));
1063 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1064 && GET_CODE (op0) == CONST_INT)
1065 return plus_constant (op1, INTVAL (op0));
1067 /* See if this is something like X * C - X or vice versa or
1068 if the multiplication is written as a shift. If so, we can
1069 distribute and make a new multiply, shift, or maybe just
1070 have X (if C is 2 in the example above). But don't make
1071 real multiply if we didn't have one before. */
1073 if (! FLOAT_MODE_P (mode))
1075 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1076 rtx lhs = op0, rhs = op1;
1079 if (GET_CODE (lhs) == NEG)
1080 coeff0 = -1, lhs = XEXP (lhs, 0);
1081 else if (GET_CODE (lhs) == MULT
1082 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1084 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1087 else if (GET_CODE (lhs) == ASHIFT
1088 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1089 && INTVAL (XEXP (lhs, 1)) >= 0
1090 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1092 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1093 lhs = XEXP (lhs, 0);
1096 if (GET_CODE (rhs) == NEG)
1097 coeff1 = -1, rhs = XEXP (rhs, 0);
1098 else if (GET_CODE (rhs) == MULT
1099 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1101 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1104 else if (GET_CODE (rhs) == ASHIFT
1105 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1106 && INTVAL (XEXP (rhs, 1)) >= 0
1107 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1109 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1110 rhs = XEXP (rhs, 0);
1113 if (rtx_equal_p (lhs, rhs))
1115 tem = simplify_gen_binary (MULT, mode, lhs,
1116 GEN_INT (coeff0 + coeff1));
1117 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1121 /* If one of the operands is a PLUS or a MINUS, see if we can
1122 simplify this by the associative law.
1123 Don't use the associative law for floating point.
1124 The inaccuracy makes it nonassociative,
1125 and subtle programs can break if operations are associated. */
1127 if (INTEGRAL_MODE_P (mode)
1128 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1129 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1130 || (GET_CODE (op0) == CONST
1131 && GET_CODE (XEXP (op0, 0)) == PLUS)
1132 || (GET_CODE (op1) == CONST
1133 && GET_CODE (XEXP (op1, 0)) == PLUS))
1134 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1140 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1141 using cc0, in which case we want to leave it as a COMPARE
1142 so we can distinguish it from a register-register-copy.
1144 In IEEE floating point, x-0 is not the same as x. */
1146 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1147 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1148 && trueop1 == CONST0_RTX (mode))
1152 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1153 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1154 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1155 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1157 rtx xop00 = XEXP (op0, 0);
1158 rtx xop10 = XEXP (op1, 0);
1161 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1163 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1164 && GET_MODE (xop00) == GET_MODE (xop10)
1165 && REGNO (xop00) == REGNO (xop10)
1166 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1167 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1174 /* We can't assume x-x is 0 even with non-IEEE floating point,
1175 but since it is zero except in very strange circumstances, we
1176 will treat it as zero with -funsafe-math-optimizations. */
1177 if (rtx_equal_p (trueop0, trueop1)
1178 && ! side_effects_p (op0)
1179 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1180 return CONST0_RTX (mode);
1182 /* Change subtraction from zero into negation. (0 - x) is the
1183 same as -x when x is NaN, infinite, or finite and nonzero.
1184 But if the mode has signed zeros, and does not round towards
1185 -infinity, then 0 - 0 is 0, not -0. */
1186 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1187 return gen_rtx_NEG (mode, op1);
1189 /* (-1 - a) is ~a. */
1190 if (trueop0 == constm1_rtx)
1191 return gen_rtx_NOT (mode, op1);
1193 /* Subtracting 0 has no effect unless the mode has signed zeros
1194 and supports rounding towards -infinity. In such a case,
1196 if (!(HONOR_SIGNED_ZEROS (mode)
1197 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1198 && trueop1 == CONST0_RTX (mode))
1201 /* See if this is something like X * C - X or vice versa or
1202 if the multiplication is written as a shift. If so, we can
1203 distribute and make a new multiply, shift, or maybe just
1204 have X (if C is 2 in the example above). But don't make
1205 real multiply if we didn't have one before. */
1207 if (! FLOAT_MODE_P (mode))
1209 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1210 rtx lhs = op0, rhs = op1;
1213 if (GET_CODE (lhs) == NEG)
1214 coeff0 = -1, lhs = XEXP (lhs, 0);
1215 else if (GET_CODE (lhs) == MULT
1216 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1218 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1221 else if (GET_CODE (lhs) == ASHIFT
1222 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1223 && INTVAL (XEXP (lhs, 1)) >= 0
1224 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1226 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1227 lhs = XEXP (lhs, 0);
1230 if (GET_CODE (rhs) == NEG)
1231 coeff1 = - 1, rhs = XEXP (rhs, 0);
1232 else if (GET_CODE (rhs) == MULT
1233 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1235 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1238 else if (GET_CODE (rhs) == ASHIFT
1239 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1240 && INTVAL (XEXP (rhs, 1)) >= 0
1241 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1243 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1244 rhs = XEXP (rhs, 0);
1247 if (rtx_equal_p (lhs, rhs))
1249 tem = simplify_gen_binary (MULT, mode, lhs,
1250 GEN_INT (coeff0 - coeff1));
1251 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1255 /* (a - (-b)) -> (a + b). True even for IEEE. */
1256 if (GET_CODE (op1) == NEG)
1257 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1259 /* If one of the operands is a PLUS or a MINUS, see if we can
1260 simplify this by the associative law.
1261 Don't use the associative law for floating point.
1262 The inaccuracy makes it nonassociative,
1263 and subtle programs can break if operations are associated. */
1265 if (INTEGRAL_MODE_P (mode)
1266 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1267 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1268 || (GET_CODE (op0) == CONST
1269 && GET_CODE (XEXP (op0, 0)) == PLUS)
1270 || (GET_CODE (op1) == CONST
1271 && GET_CODE (XEXP (op1, 0)) == PLUS))
1272 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1275 /* Don't let a relocatable value get a negative coeff. */
1276 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1277 return simplify_gen_binary (PLUS, mode,
1279 neg_const_int (mode, op1));
1281 /* (x - (x & y)) -> (x & ~y) */
1282 if (GET_CODE (op1) == AND)
1284 if (rtx_equal_p (op0, XEXP (op1, 0)))
1285 return simplify_gen_binary (AND, mode, op0,
1286 gen_rtx_NOT (mode, XEXP (op1, 1)));
1287 if (rtx_equal_p (op0, XEXP (op1, 1)))
1288 return simplify_gen_binary (AND, mode, op0,
1289 gen_rtx_NOT (mode, XEXP (op1, 0)));
1294 if (trueop1 == constm1_rtx)
1296 tem = simplify_unary_operation (NEG, mode, op0, mode);
1298 return tem ? tem : gen_rtx_NEG (mode, op0);
1301 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1302 x is NaN, since x * 0 is then also NaN. Nor is it valid
1303 when the mode has signed zeros, since multiplying a negative
1304 number by 0 will give -0, not 0. */
1305 if (!HONOR_NANS (mode)
1306 && !HONOR_SIGNED_ZEROS (mode)
1307 && trueop1 == CONST0_RTX (mode)
1308 && ! side_effects_p (op0))
1311 /* In IEEE floating point, x*1 is not equivalent to x for
1313 if (!HONOR_SNANS (mode)
1314 && trueop1 == CONST1_RTX (mode))
1317 /* Convert multiply by constant power of two into shift unless
1318 we are still generating RTL. This test is a kludge. */
1319 if (GET_CODE (trueop1) == CONST_INT
1320 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1321 /* If the mode is larger than the host word size, and the
1322 uppermost bit is set, then this isn't a power of two due
1323 to implicit sign extension. */
1324 && (width <= HOST_BITS_PER_WIDE_INT
1325 || val != HOST_BITS_PER_WIDE_INT - 1)
1326 && ! rtx_equal_function_value_matters)
1327 return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1329 /* x*2 is x+x and x*(-1) is -x */
1330 if (GET_CODE (trueop1) == CONST_DOUBLE
1331 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1332 && GET_MODE (op0) == mode)
1335 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1337 if (REAL_VALUES_EQUAL (d, dconst2))
1338 return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1340 if (REAL_VALUES_EQUAL (d, dconstm1))
1341 return gen_rtx_NEG (mode, op0);
1346 if (trueop1 == const0_rtx)
1348 if (GET_CODE (trueop1) == CONST_INT
1349 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1350 == GET_MODE_MASK (mode)))
1352 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1354 /* A | (~A) -> -1 */
1355 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1356 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1357 && ! side_effects_p (op0)
1358 && GET_MODE_CLASS (mode) != MODE_CC)
1363 if (trueop1 == const0_rtx)
1365 if (GET_CODE (trueop1) == CONST_INT
1366 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1367 == GET_MODE_MASK (mode)))
1368 return gen_rtx_NOT (mode, op0);
1369 if (trueop0 == trueop1 && ! side_effects_p (op0)
1370 && GET_MODE_CLASS (mode) != MODE_CC)
1375 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1377 if (GET_CODE (trueop1) == CONST_INT
1378 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1379 == GET_MODE_MASK (mode)))
1381 if (trueop0 == trueop1 && ! side_effects_p (op0)
1382 && GET_MODE_CLASS (mode) != MODE_CC)
1385 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1386 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1387 && ! side_effects_p (op0)
1388 && GET_MODE_CLASS (mode) != MODE_CC)
1393 /* Convert divide by power of two into shift (divide by 1 handled
1395 if (GET_CODE (trueop1) == CONST_INT
1396 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1397 return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1399 /* ... fall through ... */
1402 if (trueop1 == CONST1_RTX (mode))
1404 /* On some platforms DIV uses narrower mode than its
1406 rtx x = gen_lowpart_common (mode, op0);
1409 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1410 return gen_lowpart_SUBREG (mode, op0);
1415 /* Maybe change 0 / x to 0. This transformation isn't safe for
1416 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1417 Nor is it safe for modes with signed zeros, since dividing
1418 0 by a negative number gives -0, not 0. */
1419 if (!HONOR_NANS (mode)
1420 && !HONOR_SIGNED_ZEROS (mode)
1421 && trueop0 == CONST0_RTX (mode)
1422 && ! side_effects_p (op1))
1425 /* Change division by a constant into multiplication. Only do
1426 this with -funsafe-math-optimizations. */
1427 else if (GET_CODE (trueop1) == CONST_DOUBLE
1428 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1429 && trueop1 != CONST0_RTX (mode)
1430 && flag_unsafe_math_optimizations)
1433 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1435 if (! REAL_VALUES_EQUAL (d, dconst0))
1437 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1438 return gen_rtx_MULT (mode, op0,
1439 CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1445 /* Handle modulus by power of two (mod with 1 handled below). */
1446 if (GET_CODE (trueop1) == CONST_INT
1447 && exact_log2 (INTVAL (trueop1)) > 0)
1448 return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1450 /* ... fall through ... */
1453 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1454 && ! side_effects_p (op0) && ! side_effects_p (op1))
1461 /* Rotating ~0 always results in ~0. */
1462 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1463 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1464 && ! side_effects_p (op1))
1467 /* ... fall through ... */
1471 if (trueop1 == const0_rtx)
1473 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1478 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1479 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1480 && ! side_effects_p (op0))
1482 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1487 if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1488 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1489 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1490 && ! side_effects_p (op0))
1492 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1497 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1499 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1504 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1506 else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1514 /* ??? There are simplifications that can be done. */
1528 /* Get the integer argument values in two forms:
1529 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1531 arg0 = INTVAL (trueop0);
1532 arg1 = INTVAL (trueop1);
1534 if (width < HOST_BITS_PER_WIDE_INT)
1536 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1537 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1540 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1541 arg0s |= ((HOST_WIDE_INT) (-1) << width);
1544 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1545 arg1s |= ((HOST_WIDE_INT) (-1) << width);
1553 /* Compute the value of the arithmetic. */
1558 val = arg0s + arg1s;
1562 val = arg0s - arg1s;
1566 val = arg0s * arg1s;
1571 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1574 val = arg0s / arg1s;
1579 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1582 val = arg0s % arg1s;
1587 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1590 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1595 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1598 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1614 /* If shift count is undefined, don't fold it; let the machine do
1615 what it wants. But truncate it if the machine will do that. */
1619 #ifdef SHIFT_COUNT_TRUNCATED
1620 if (SHIFT_COUNT_TRUNCATED)
1624 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1631 #ifdef SHIFT_COUNT_TRUNCATED
1632 if (SHIFT_COUNT_TRUNCATED)
1636 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1643 #ifdef SHIFT_COUNT_TRUNCATED
1644 if (SHIFT_COUNT_TRUNCATED)
1648 val = arg0s >> arg1;
1650 /* Bootstrap compiler may not have sign extended the right shift.
1651 Manually extend the sign to insure bootstrap cc matches gcc. */
1652 if (arg0s < 0 && arg1 > 0)
1653 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1662 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1663 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1671 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1672 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1676 /* Do nothing here. */
1680 val = arg0s <= arg1s ? arg0s : arg1s;
1684 val = ((unsigned HOST_WIDE_INT) arg0
1685 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1689 val = arg0s > arg1s ? arg0s : arg1s;
1693 val = ((unsigned HOST_WIDE_INT) arg0
1694 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1701 val = trunc_int_for_mode (val, mode);
1703 return GEN_INT (val);
1706 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
1709 Rather than test for specific case, we do this by a brute-force method
1710 and do all possible simplifications until no more changes occur. Then
1711 we rebuild the operation.
1713 If FORCE is true, then always generate the rtx. This is used to
1714 canonicalize stuff emitted from simplify_gen_binary. Note that this
1715 can still fail if the rtx is too complex. It won't fail just because
1716 the result is not 'simpler' than the input, however. */
1718 struct simplify_plus_minus_op_data
1725 simplify_plus_minus_op_data_cmp (p1, p2)
1729 const struct simplify_plus_minus_op_data *d1 = p1;
1730 const struct simplify_plus_minus_op_data *d2 = p2;
1732 return (commutative_operand_precedence (d2->op)
1733 - commutative_operand_precedence (d1->op));
1737 simplify_plus_minus (code, mode, op0, op1, force)
1739 enum machine_mode mode;
1743 struct simplify_plus_minus_op_data ops[8];
1745 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1746 int first, negate, changed;
1749 memset ((char *) ops, 0, sizeof ops);
1751 /* Set up the two operands and then expand them until nothing has been
1752 changed. If we run out of room in our array, give up; this should
1753 almost never happen. */
1758 ops[1].neg = (code == MINUS);
1764 for (i = 0; i < n_ops; i++)
1766 rtx this_op = ops[i].op;
1767 int this_neg = ops[i].neg;
1768 enum rtx_code this_code = GET_CODE (this_op);
1777 ops[n_ops].op = XEXP (this_op, 1);
1778 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1781 ops[i].op = XEXP (this_op, 0);
1787 ops[i].op = XEXP (this_op, 0);
1788 ops[i].neg = ! this_neg;
1794 && GET_CODE (XEXP (this_op, 0)) == PLUS
1795 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1796 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1798 ops[i].op = XEXP (XEXP (this_op, 0), 0);
1799 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1800 ops[n_ops].neg = this_neg;
1808 /* ~a -> (-a - 1) */
1811 ops[n_ops].op = constm1_rtx;
1812 ops[n_ops++].neg = this_neg;
1813 ops[i].op = XEXP (this_op, 0);
1814 ops[i].neg = !this_neg;
1822 ops[i].op = neg_const_int (mode, this_op);
1835 /* If we only have two operands, we can't do anything. */
1836 if (n_ops <= 2 && !force)
1839 /* Count the number of CONSTs we didn't split above. */
1840 for (i = 0; i < n_ops; i++)
1841 if (GET_CODE (ops[i].op) == CONST)
1844 /* Now simplify each pair of operands until nothing changes. The first
1845 time through just simplify constants against each other. */
1852 for (i = 0; i < n_ops - 1; i++)
1853 for (j = i + 1; j < n_ops; j++)
1855 rtx lhs = ops[i].op, rhs = ops[j].op;
1856 int lneg = ops[i].neg, rneg = ops[j].neg;
1858 if (lhs != 0 && rhs != 0
1859 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1861 enum rtx_code ncode = PLUS;
1867 tem = lhs, lhs = rhs, rhs = tem;
1869 else if (swap_commutative_operands_p (lhs, rhs))
1870 tem = lhs, lhs = rhs, rhs = tem;
1872 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1874 /* Reject "simplifications" that just wrap the two
1875 arguments in a CONST. Failure to do so can result
1876 in infinite recursion with simplify_binary_operation
1877 when it calls us to simplify CONST operations. */
1879 && ! (GET_CODE (tem) == CONST
1880 && GET_CODE (XEXP (tem, 0)) == ncode
1881 && XEXP (XEXP (tem, 0), 0) == lhs
1882 && XEXP (XEXP (tem, 0), 1) == rhs)
1883 /* Don't allow -x + -1 -> ~x simplifications in the
1884 first pass. This allows us the chance to combine
1885 the -1 with other constants. */
1887 && GET_CODE (tem) == NOT
1888 && XEXP (tem, 0) == rhs))
1891 if (GET_CODE (tem) == NEG)
1892 tem = XEXP (tem, 0), lneg = !lneg;
1893 if (GET_CODE (tem) == CONST_INT && lneg)
1894 tem = neg_const_int (mode, tem), lneg = 0;
1898 ops[j].op = NULL_RTX;
1908 /* Pack all the operands to the lower-numbered entries. */
1909 for (i = 0, j = 0; j < n_ops; j++)
1914 /* Sort the operations based on swap_commutative_operands_p. */
1915 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1917 /* We suppressed creation of trivial CONST expressions in the
1918 combination loop to avoid recursion. Create one manually now.
1919 The combination loop should have ensured that there is exactly
1920 one CONST_INT, and the sort will have ensured that it is last
1921 in the array and that any other constant will be next-to-last. */
1924 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1925 && CONSTANT_P (ops[n_ops - 2].op))
1927 rtx value = ops[n_ops - 1].op;
1928 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1929 value = neg_const_int (mode, value);
1930 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1934 /* Count the number of CONSTs that we generated. */
1936 for (i = 0; i < n_ops; i++)
1937 if (GET_CODE (ops[i].op) == CONST)
1940 /* Give up if we didn't reduce the number of operands we had. Make
1941 sure we count a CONST as two operands. If we have the same
1942 number of operands, but have made more CONSTs than before, this
1943 is also an improvement, so accept it. */
1945 && (n_ops + n_consts > input_ops
1946 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1949 /* Put a non-negated operand first. If there aren't any, make all
1950 operands positive and negate the whole thing later. */
1953 for (i = 0; i < n_ops && ops[i].neg; i++)
1957 for (i = 0; i < n_ops; i++)
1969 /* Now make the result by performing the requested operations. */
1971 for (i = 1; i < n_ops; i++)
1972 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1973 mode, result, ops[i].op);
1975 return negate ? gen_rtx_NEG (mode, result) : result;
1978 /* Like simplify_binary_operation except used for relational operators.
1979 MODE is the mode of the operands, not that of the result. If MODE
1980 is VOIDmode, both operands must also be VOIDmode and we compare the
1981 operands in "infinite precision".
1983 If no simplification is possible, this function returns zero. Otherwise,
1984 it returns either const_true_rtx or const0_rtx. */
1987 simplify_relational_operation (code, mode, op0, op1)
1989 enum machine_mode mode;
1992 int equal, op0lt, op0ltu, op1lt, op1ltu;
1997 if (mode == VOIDmode
1998 && (GET_MODE (op0) != VOIDmode
1999 || GET_MODE (op1) != VOIDmode))
2002 /* If op0 is a compare, extract the comparison arguments from it. */
2003 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2004 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2006 trueop0 = avoid_constant_pool_reference (op0);
2007 trueop1 = avoid_constant_pool_reference (op1);
2009 /* We can't simplify MODE_CC values since we don't know what the
2010 actual comparison is. */
2011 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
2018 /* Make sure the constant is second. */
2019 if (swap_commutative_operands_p (trueop0, trueop1))
2021 tem = op0, op0 = op1, op1 = tem;
2022 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2023 code = swap_condition (code);
2026 /* For integer comparisons of A and B maybe we can simplify A - B and can
2027 then simplify a comparison of that with zero. If A and B are both either
2028 a register or a CONST_INT, this can't help; testing for these cases will
2029 prevent infinite recursion here and speed things up.
2031 If CODE is an unsigned comparison, then we can never do this optimization,
2032 because it gives an incorrect result if the subtraction wraps around zero.
2033 ANSI C defines unsigned operations such that they never overflow, and
2034 thus such cases can not be ignored. */
2036 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2037 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2038 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2039 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2040 && code != GTU && code != GEU && code != LTU && code != LEU)
2041 return simplify_relational_operation (signed_condition (code),
2042 mode, tem, const0_rtx);
2044 if (flag_unsafe_math_optimizations && code == ORDERED)
2045 return const_true_rtx;
2047 if (flag_unsafe_math_optimizations && code == UNORDERED)
2050 /* For modes without NaNs, if the two operands are equal, we know the
2052 if (!HONOR_NANS (GET_MODE (trueop0)) && rtx_equal_p (trueop0, trueop1))
2053 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2055 /* If the operands are floating-point constants, see if we can fold
2057 else if (GET_CODE (trueop0) == CONST_DOUBLE
2058 && GET_CODE (trueop1) == CONST_DOUBLE
2059 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2061 REAL_VALUE_TYPE d0, d1;
2063 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2064 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2066 /* Comparisons are unordered iff at least one of the values is NaN. */
2067 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2077 return const_true_rtx;
2090 equal = REAL_VALUES_EQUAL (d0, d1);
2091 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2092 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2095 /* Otherwise, see if the operands are both integers. */
2096 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2097 && (GET_CODE (trueop0) == CONST_DOUBLE
2098 || GET_CODE (trueop0) == CONST_INT)
2099 && (GET_CODE (trueop1) == CONST_DOUBLE
2100 || GET_CODE (trueop1) == CONST_INT))
2102 int width = GET_MODE_BITSIZE (mode);
2103 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2104 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2106 /* Get the two words comprising each integer constant. */
2107 if (GET_CODE (trueop0) == CONST_DOUBLE)
2109 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2110 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2114 l0u = l0s = INTVAL (trueop0);
2115 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2118 if (GET_CODE (trueop1) == CONST_DOUBLE)
2120 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2121 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2125 l1u = l1s = INTVAL (trueop1);
2126 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2129 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2130 we have to sign or zero-extend the values. */
2131 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2133 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2134 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2136 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2137 l0s |= ((HOST_WIDE_INT) (-1) << width);
2139 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2140 l1s |= ((HOST_WIDE_INT) (-1) << width);
2142 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2143 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2145 equal = (h0u == h1u && l0u == l1u);
2146 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2147 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2148 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2149 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2152 /* Otherwise, there are some code-specific tests we can make. */
2158 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2163 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2164 return const_true_rtx;
2168 /* Unsigned values are never negative. */
2169 if (trueop1 == const0_rtx)
2170 return const_true_rtx;
2174 if (trueop1 == const0_rtx)
2179 /* Unsigned values are never greater than the largest
2181 if (GET_CODE (trueop1) == CONST_INT
2182 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2183 && INTEGRAL_MODE_P (mode))
2184 return const_true_rtx;
2188 if (GET_CODE (trueop1) == CONST_INT
2189 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2190 && INTEGRAL_MODE_P (mode))
2195 /* Optimize abs(x) < 0.0. */
2196 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2198 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2200 if (GET_CODE (tem) == ABS)
2206 /* Optimize abs(x) >= 0.0. */
2207 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2209 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2211 if (GET_CODE (tem) == ABS)
2223 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2229 return equal ? const_true_rtx : const0_rtx;
2232 return ! equal ? const_true_rtx : const0_rtx;
2235 return op0lt ? const_true_rtx : const0_rtx;
2238 return op1lt ? const_true_rtx : const0_rtx;
2240 return op0ltu ? const_true_rtx : const0_rtx;
2242 return op1ltu ? const_true_rtx : const0_rtx;
2245 return equal || op0lt ? const_true_rtx : const0_rtx;
2248 return equal || op1lt ? const_true_rtx : const0_rtx;
2250 return equal || op0ltu ? const_true_rtx : const0_rtx;
2252 return equal || op1ltu ? const_true_rtx : const0_rtx;
2254 return const_true_rtx;
2262 /* Simplify CODE, an operation with result mode MODE and three operands,
2263 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2264 a constant. Return 0 if no simplifications is possible. */
2267 simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2269 enum machine_mode mode, op0_mode;
2272 unsigned int width = GET_MODE_BITSIZE (mode);
2274 /* VOIDmode means "infinite" precision. */
2276 width = HOST_BITS_PER_WIDE_INT;
2282 if (GET_CODE (op0) == CONST_INT
2283 && GET_CODE (op1) == CONST_INT
2284 && GET_CODE (op2) == CONST_INT
2285 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2286 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2288 /* Extracting a bit-field from a constant */
2289 HOST_WIDE_INT val = INTVAL (op0);
2291 if (BITS_BIG_ENDIAN)
2292 val >>= (GET_MODE_BITSIZE (op0_mode)
2293 - INTVAL (op2) - INTVAL (op1));
2295 val >>= INTVAL (op2);
2297 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2299 /* First zero-extend. */
2300 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2301 /* If desired, propagate sign bit. */
2302 if (code == SIGN_EXTRACT
2303 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2304 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2307 /* Clear the bits that don't belong in our mode,
2308 unless they and our sign bit are all one.
2309 So we get either a reasonable negative value or a reasonable
2310 unsigned value for this mode. */
2311 if (width < HOST_BITS_PER_WIDE_INT
2312 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2313 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2314 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2316 return GEN_INT (val);
2321 if (GET_CODE (op0) == CONST_INT)
2322 return op0 != const0_rtx ? op1 : op2;
2324 /* Convert a == b ? b : a to "a". */
2325 if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2326 && !HONOR_NANS (mode)
2327 && rtx_equal_p (XEXP (op0, 0), op1)
2328 && rtx_equal_p (XEXP (op0, 1), op2))
2330 else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2331 && !HONOR_NANS (mode)
2332 && rtx_equal_p (XEXP (op0, 1), op1)
2333 && rtx_equal_p (XEXP (op0, 0), op2))
2335 else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2337 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2338 ? GET_MODE (XEXP (op0, 1))
2339 : GET_MODE (XEXP (op0, 0)));
2341 if (cmp_mode == VOIDmode)
2342 cmp_mode = op0_mode;
2343 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2344 XEXP (op0, 0), XEXP (op0, 1));
2346 /* See if any simplifications were possible. */
2347 if (temp == const0_rtx)
2349 else if (temp == const1_rtx)
2354 /* Look for happy constants in op1 and op2. */
2355 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2357 HOST_WIDE_INT t = INTVAL (op1);
2358 HOST_WIDE_INT f = INTVAL (op2);
2360 if (t == STORE_FLAG_VALUE && f == 0)
2361 code = GET_CODE (op0);
2362 else if (t == 0 && f == STORE_FLAG_VALUE)
2365 tmp = reversed_comparison_code (op0, NULL_RTX);
2373 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2378 if (GET_MODE (op0) != mode
2379 || GET_MODE (op1) != mode
2380 || !VECTOR_MODE_P (mode))
2382 op0 = avoid_constant_pool_reference (op0);
2383 op1 = avoid_constant_pool_reference (op1);
2384 op2 = avoid_constant_pool_reference (op2);
2385 if (GET_CODE (op0) == CONST_VECTOR
2386 && GET_CODE (op1) == CONST_VECTOR
2387 && GET_CODE (op2) == CONST_INT)
2389 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2390 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2391 rtvec v = rtvec_alloc (n_elts);
2394 for (i = 0; i < n_elts; i++)
2395 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2396 ? CONST_VECTOR_ELT (op0, i)
2397 : CONST_VECTOR_ELT (op1, i));
2398 return gen_rtx_CONST_VECTOR (mode, v);
2409 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2410 Return 0 if no simplifications is possible. */
2412 simplify_subreg (outermode, op, innermode, byte)
2415 enum machine_mode outermode, innermode;
2417 /* Little bit of sanity checking. */
2418 if (innermode == VOIDmode || outermode == VOIDmode
2419 || innermode == BLKmode || outermode == BLKmode)
2422 if (GET_MODE (op) != innermode
2423 && GET_MODE (op) != VOIDmode)
2426 if (byte % GET_MODE_SIZE (outermode)
2427 || byte >= GET_MODE_SIZE (innermode))
2430 if (outermode == innermode && !byte)
2433 /* Simplify subregs of vector constants. */
2434 if (GET_CODE (op) == CONST_VECTOR)
2436 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2437 const unsigned int offset = byte / elt_size;
2440 if (GET_MODE_INNER (innermode) == outermode)
2442 elt = CONST_VECTOR_ELT (op, offset);
2444 /* ?? We probably don't need this copy_rtx because constants
2445 can be shared. ?? */
2447 return copy_rtx (elt);
2449 else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2450 && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2452 return (gen_rtx_CONST_VECTOR
2454 gen_rtvec_v (GET_MODE_NUNITS (outermode),
2455 &CONST_VECTOR_ELT (op, offset))));
2457 else if (GET_MODE_CLASS (outermode) == MODE_INT
2458 && (GET_MODE_SIZE (outermode) % elt_size == 0))
2460 /* This happens when the target register size is smaller then
2461 the vector mode, and we synthesize operations with vectors
2462 of elements that are smaller than the register size. */
2463 HOST_WIDE_INT sum = 0, high = 0;
2464 unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2465 unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2466 unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2467 int shift = BITS_PER_UNIT * elt_size;
2469 for (; n_elts--; i += step)
2471 elt = CONST_VECTOR_ELT (op, i);
2472 if (GET_CODE (elt) == CONST_DOUBLE
2473 && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2475 elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2480 if (GET_CODE (elt) != CONST_INT)
2482 /* Avoid overflow. */
2483 if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2485 high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2486 sum = (sum << shift) + INTVAL (elt);
2488 if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2489 return GEN_INT (trunc_int_for_mode (sum, outermode));
2490 else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2491 return immed_double_const (sum, high, outermode);
2495 else if (GET_MODE_CLASS (outermode) == MODE_INT
2496 && (elt_size % GET_MODE_SIZE (outermode) == 0))
2498 enum machine_mode new_mode
2499 = int_mode_for_mode (GET_MODE_INNER (innermode));
2500 int subbyte = byte % elt_size;
2502 op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2505 return simplify_subreg (outermode, op, new_mode, subbyte);
2507 else if (GET_MODE_CLASS (outermode) == MODE_INT)
2508 /* This shouldn't happen, but let's not do anything stupid. */
2512 /* Attempt to simplify constant to non-SUBREG expression. */
2513 if (CONSTANT_P (op))
2516 unsigned HOST_WIDE_INT val = 0;
2518 if (GET_MODE_CLASS (outermode) == MODE_VECTOR_INT
2519 || GET_MODE_CLASS (outermode) == MODE_VECTOR_FLOAT)
2521 /* Construct a CONST_VECTOR from individual subregs. */
2522 enum machine_mode submode = GET_MODE_INNER (outermode);
2523 int subsize = GET_MODE_UNIT_SIZE (outermode);
2524 int i, elts = GET_MODE_NUNITS (outermode);
2525 rtvec v = rtvec_alloc (elts);
2528 for (i = 0; i < elts; i++, byte += subsize)
2530 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2531 /* ??? It would be nice if we could actually make such subregs
2532 on targets that allow such relocations. */
2533 if (byte >= GET_MODE_UNIT_SIZE (innermode))
2534 elt = CONST0_RTX (submode);
2536 elt = simplify_subreg (submode, op, innermode, byte);
2539 RTVEC_ELT (v, i) = elt;
2541 return gen_rtx_CONST_VECTOR (outermode, v);
2544 /* ??? This code is partly redundant with code below, but can handle
2545 the subregs of floats and similar corner cases.
2546 Later it we should move all simplification code here and rewrite
2547 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2548 using SIMPLIFY_SUBREG. */
2549 if (subreg_lowpart_offset (outermode, innermode) == byte
2550 && GET_CODE (op) != CONST_VECTOR)
2552 rtx new = gen_lowpart_if_possible (outermode, op);
2557 /* Similar comment as above apply here. */
2558 if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2559 && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2560 && GET_MODE_CLASS (outermode) == MODE_INT)
2562 rtx new = constant_subword (op,
2563 (byte / UNITS_PER_WORD),
2569 if (GET_MODE_CLASS (outermode) != MODE_INT
2570 && GET_MODE_CLASS (outermode) != MODE_CC)
2572 enum machine_mode new_mode = int_mode_for_mode (outermode);
2574 if (new_mode != innermode || byte != 0)
2576 op = simplify_subreg (new_mode, op, innermode, byte);
2579 return simplify_subreg (outermode, op, new_mode, 0);
2583 offset = byte * BITS_PER_UNIT;
2584 switch (GET_CODE (op))
2587 if (GET_MODE (op) != VOIDmode)
2590 /* We can't handle this case yet. */
2591 if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2594 part = offset >= HOST_BITS_PER_WIDE_INT;
2595 if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2596 && BYTES_BIG_ENDIAN)
2597 || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2598 && WORDS_BIG_ENDIAN))
2600 val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2601 offset %= HOST_BITS_PER_WIDE_INT;
2603 /* We've already picked the word we want from a double, so
2604 pretend this is actually an integer. */
2605 innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2609 if (GET_CODE (op) == CONST_INT)
2612 /* We don't handle synthesizing of non-integral constants yet. */
2613 if (GET_MODE_CLASS (outermode) != MODE_INT)
2616 if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2618 if (WORDS_BIG_ENDIAN)
2619 offset = (GET_MODE_BITSIZE (innermode)
2620 - GET_MODE_BITSIZE (outermode) - offset);
2621 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2622 && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2623 offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2624 - 2 * (offset % BITS_PER_WORD));
2627 if (offset >= HOST_BITS_PER_WIDE_INT)
2628 return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2632 if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2633 val = trunc_int_for_mode (val, outermode);
2634 return GEN_INT (val);
2641 /* Changing mode twice with SUBREG => just change it once,
2642 or not at all if changing back op starting mode. */
2643 if (GET_CODE (op) == SUBREG)
2645 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2646 int final_offset = byte + SUBREG_BYTE (op);
2649 if (outermode == innermostmode
2650 && byte == 0 && SUBREG_BYTE (op) == 0)
2651 return SUBREG_REG (op);
2653 /* The SUBREG_BYTE represents offset, as if the value were stored
2654 in memory. Irritating exception is paradoxical subreg, where
2655 we define SUBREG_BYTE to be 0. On big endian machines, this
2656 value should be negative. For a moment, undo this exception. */
2657 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2659 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2660 if (WORDS_BIG_ENDIAN)
2661 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2662 if (BYTES_BIG_ENDIAN)
2663 final_offset += difference % UNITS_PER_WORD;
2665 if (SUBREG_BYTE (op) == 0
2666 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2668 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2669 if (WORDS_BIG_ENDIAN)
2670 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2671 if (BYTES_BIG_ENDIAN)
2672 final_offset += difference % UNITS_PER_WORD;
2675 /* See whether resulting subreg will be paradoxical. */
2676 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2678 /* In nonparadoxical subregs we can't handle negative offsets. */
2679 if (final_offset < 0)
2681 /* Bail out in case resulting subreg would be incorrect. */
2682 if (final_offset % GET_MODE_SIZE (outermode)
2683 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2689 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2691 /* In paradoxical subreg, see if we are still looking on lower part.
2692 If so, our SUBREG_BYTE will be 0. */
2693 if (WORDS_BIG_ENDIAN)
2694 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2695 if (BYTES_BIG_ENDIAN)
2696 offset += difference % UNITS_PER_WORD;
2697 if (offset == final_offset)
2703 /* Recurse for futher possible simplifications. */
2704 new = simplify_subreg (outermode, SUBREG_REG (op),
2705 GET_MODE (SUBREG_REG (op)),
2709 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2712 /* SUBREG of a hard register => just change the register number
2713 and/or mode. If the hard register is not valid in that mode,
2714 suppress this simplification. If the hard register is the stack,
2715 frame, or argument pointer, leave this as a SUBREG. */
2718 && (! REG_FUNCTION_VALUE_P (op)
2719 || ! rtx_equal_function_value_matters)
2720 && REGNO (op) < FIRST_PSEUDO_REGISTER
2721 #ifdef CANNOT_CHANGE_MODE_CLASS
2722 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
2723 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2724 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2726 && ((reload_completed && !frame_pointer_needed)
2727 || (REGNO (op) != FRAME_POINTER_REGNUM
2728 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2729 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2732 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2733 && REGNO (op) != ARG_POINTER_REGNUM
2735 && REGNO (op) != STACK_POINTER_REGNUM)
2737 int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2740 /* ??? We do allow it if the current REG is not valid for
2741 its mode. This is a kludge to work around how float/complex
2742 arguments are passed on 32-bit SPARC and should be fixed. */
2743 if (HARD_REGNO_MODE_OK (final_regno, outermode)
2744 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2746 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
2748 /* Propagate original regno. We don't have any way to specify
2749 the offset inside original regno, so do so only for lowpart.
2750 The information is used only by alias analysis that can not
2751 grog partial register anyway. */
2753 if (subreg_lowpart_offset (outermode, innermode) == byte)
2754 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2759 /* If we have a SUBREG of a register that we are replacing and we are
2760 replacing it with a MEM, make a new MEM and try replacing the
2761 SUBREG with it. Don't do this if the MEM has a mode-dependent address
2762 or if we would be widening it. */
2764 if (GET_CODE (op) == MEM
2765 && ! mode_dependent_address_p (XEXP (op, 0))
2766 /* Allow splitting of volatile memory references in case we don't
2767 have instruction to move the whole thing. */
2768 && (! MEM_VOLATILE_P (op)
2769 || ! have_insn_for (SET, innermode))
2770 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2771 return adjust_address_nv (op, outermode, byte);
2773 /* Handle complex values represented as CONCAT
2774 of real and imaginary part. */
2775 if (GET_CODE (op) == CONCAT)
2777 int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2778 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2779 unsigned int final_offset;
2782 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2783 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2786 /* We can at least simplify it by referring directly to the relevant part. */
2787 return gen_rtx_SUBREG (outermode, part, final_offset);
2792 /* Make a SUBREG operation or equivalent if it folds. */
2795 simplify_gen_subreg (outermode, op, innermode, byte)
2798 enum machine_mode outermode, innermode;
2801 /* Little bit of sanity checking. */
2802 if (innermode == VOIDmode || outermode == VOIDmode
2803 || innermode == BLKmode || outermode == BLKmode)
2806 if (GET_MODE (op) != innermode
2807 && GET_MODE (op) != VOIDmode)
2810 if (byte % GET_MODE_SIZE (outermode)
2811 || byte >= GET_MODE_SIZE (innermode))
2814 if (GET_CODE (op) == QUEUED)
2817 new = simplify_subreg (outermode, op, innermode, byte);
2821 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2824 return gen_rtx_SUBREG (outermode, op, byte);
2826 /* Simplify X, an rtx expression.
2828 Return the simplified expression or NULL if no simplifications
2831 This is the preferred entry point into the simplification routines;
2832 however, we still allow passes to call the more specific routines.
2834 Right now GCC has three (yes, three) major bodies of RTL simplification
2835 code that need to be unified.
2837 1. fold_rtx in cse.c. This code uses various CSE specific
2838 information to aid in RTL simplification.
2840 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
2841 it uses combine specific information to aid in RTL
2844 3. The routines in this file.
2847 Long term we want to only have one body of simplification code; to
2848 get to that state I recommend the following steps:
2850 1. Pour over fold_rtx & simplify_rtx and move any simplifications
2851 which are not pass dependent state into these routines.
2853 2. As code is moved by #1, change fold_rtx & simplify_rtx to
2854 use this routine whenever possible.
2856 3. Allow for pass dependent state to be provided to these
2857 routines and add simplifications based on the pass dependent
2858 state. Remove code from cse.c & combine.c that becomes
2861 It will take time, but ultimately the compiler will be easier to
2862 maintain and improve. It's totally silly that when we add a
2863 simplification that it needs to be added to 4 places (3 for RTL
2864 simplification and 1 for tree simplification. */
2870 enum rtx_code code = GET_CODE (x);
2871 enum machine_mode mode = GET_MODE (x);
2873 switch (GET_RTX_CLASS (code))
2876 return simplify_unary_operation (code, mode,
2877 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2879 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2884 XEXP (x, 0) = XEXP (x, 1);
2886 return simplify_binary_operation (code, mode,
2887 XEXP (x, 0), XEXP (x, 1));
2891 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2895 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2896 XEXP (x, 0), XEXP (x, 1),
2900 return simplify_relational_operation (code,
2901 ((GET_MODE (XEXP (x, 0))
2903 ? GET_MODE (XEXP (x, 0))
2904 : GET_MODE (XEXP (x, 1))),
2905 XEXP (x, 0), XEXP (x, 1));
2908 return simplify_gen_subreg (mode, SUBREG_REG (x),
2909 GET_MODE (SUBREG_REG (x)),
2911 if (code == CONSTANT_P_RTX)
2913 if (CONSTANT_P (XEXP (x,0)))