1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
64 /* Negate a CONST_INT rtx, truncating (because a conversion from a
65 maximally negative number can overflow). */
67 neg_const_int (enum machine_mode mode, rtx i)
69 return gen_int_mode (- INTVAL (i), mode);
72 /* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
76 mode_signbit_p (enum machine_mode mode, rtx x)
78 unsigned HOST_WIDE_INT val;
81 if (GET_MODE_CLASS (mode) != MODE_INT)
84 width = GET_MODE_BITSIZE (mode);
88 if (width <= HOST_BITS_PER_WIDE_INT
89 && GET_CODE (x) == CONST_INT)
91 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_DOUBLE
93 && CONST_DOUBLE_LOW (x) == 0)
95 val = CONST_DOUBLE_HIGH (x);
96 width -= HOST_BITS_PER_WIDE_INT;
101 if (width < HOST_BITS_PER_WIDE_INT)
102 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
103 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
106 /* Make a binary operation by properly ordering the operands and
107 seeing if the expression folds. */
110 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
115 /* Put complex operands first and constants second if commutative. */
116 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
117 && swap_commutative_operands_p (op0, op1))
118 tem = op0, op0 = op1, op1 = tem;
120 /* If this simplifies, do it. */
121 tem = simplify_binary_operation (code, mode, op0, op1);
125 /* Handle addition and subtraction specially. Otherwise, just form
128 if (code == PLUS || code == MINUS)
130 tem = simplify_plus_minus (code, mode, op0, op1, 1);
135 return gen_rtx_fmt_ee (code, mode, op0, op1);
138 /* If X is a MEM referencing the constant pool, return the real value.
139 Otherwise return X. */
141 avoid_constant_pool_reference (rtx x)
144 enum machine_mode cmode;
146 switch (GET_CODE (x))
152 /* Handle float extensions of constant pool references. */
154 c = avoid_constant_pool_reference (tmp);
155 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
159 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
160 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
170 /* Call target hook to avoid the effects of -fpic etc.... */
171 addr = targetm.delegitimize_address (addr);
173 if (GET_CODE (addr) == LO_SUM)
174 addr = XEXP (addr, 1);
176 if (GET_CODE (addr) != SYMBOL_REF
177 || ! CONSTANT_POOL_ADDRESS_P (addr))
180 c = get_pool_constant (addr);
181 cmode = get_pool_mode (addr);
183 /* If we're accessing the constant in a different mode than it was
184 originally stored, attempt to fix that up via subreg simplifications.
185 If that fails we have no choice but to return the original memory. */
186 if (cmode != GET_MODE (x))
188 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
195 /* Make a unary operation by first seeing if it folds and otherwise making
196 the specified operation. */
199 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
200 enum machine_mode op_mode)
204 /* If this simplifies, use it. */
205 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
208 return gen_rtx_fmt_e (code, mode, op);
211 /* Likewise for ternary operations. */
214 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
215 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
219 /* If this simplifies, use it. */
220 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
224 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
227 /* Likewise, for relational operations.
228 CMP_MODE specifies mode comparison is done in. */
231 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
232 enum machine_mode cmp_mode, rtx op0, rtx op1)
236 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
240 return gen_rtx_fmt_ee (code, mode, op0, op1);
243 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
244 resulting RTX. Return a new RTX which is as simplified as possible. */
247 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
249 enum rtx_code code = GET_CODE (x);
250 enum machine_mode mode = GET_MODE (x);
251 enum machine_mode op_mode;
254 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
255 to build a new expression substituting recursively. If we can't do
256 anything, return our input. */
261 switch (GET_RTX_CLASS (code))
265 op_mode = GET_MODE (op0);
266 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
267 if (op0 == XEXP (x, 0))
269 return simplify_gen_unary (code, mode, op0, op_mode);
273 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
274 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
275 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
277 return simplify_gen_binary (code, mode, op0, op1);
280 case RTX_COMM_COMPARE:
283 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
284 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
288 return simplify_gen_relational (code, mode, op_mode, op0, op1);
291 case RTX_BITFIELD_OPS:
293 op_mode = GET_MODE (op0);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
296 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
299 if (op_mode == VOIDmode)
300 op_mode = GET_MODE (op0);
301 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
304 /* The only case we try to handle is a SUBREG. */
307 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
308 if (op0 == SUBREG_REG (x))
310 op0 = simplify_gen_subreg (GET_MODE (x), op0,
311 GET_MODE (SUBREG_REG (x)),
313 return op0 ? op0 : x;
320 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
321 if (op0 == XEXP (x, 0))
323 return replace_equiv_address_nv (x, op0);
325 else if (code == LO_SUM)
327 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
328 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
330 /* (lo_sum (high x) x) -> x */
331 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
334 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
336 return gen_rtx_LO_SUM (mode, op0, op1);
338 else if (code == REG)
340 if (rtx_equal_p (x, old_rtx))
351 /* Try to simplify a unary operation CODE whose output mode is to be
352 MODE with input operand OP whose mode was originally OP_MODE.
353 Return zero if no simplification can be made. */
355 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
356 rtx op, enum machine_mode op_mode)
358 unsigned int width = GET_MODE_BITSIZE (mode);
359 rtx trueop = avoid_constant_pool_reference (op);
361 if (code == VEC_DUPLICATE)
363 gcc_assert (VECTOR_MODE_P (mode));
364 if (GET_MODE (trueop) != VOIDmode)
366 if (!VECTOR_MODE_P (GET_MODE (trueop)))
367 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
369 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
370 (GET_MODE (trueop)));
372 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
373 || GET_CODE (trueop) == CONST_VECTOR)
375 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
376 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
377 rtvec v = rtvec_alloc (n_elts);
380 if (GET_CODE (trueop) != CONST_VECTOR)
381 for (i = 0; i < n_elts; i++)
382 RTVEC_ELT (v, i) = trueop;
385 enum machine_mode inmode = GET_MODE (trueop);
386 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
387 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
389 gcc_assert (in_n_elts < n_elts);
390 gcc_assert ((n_elts % in_n_elts) == 0);
391 for (i = 0; i < n_elts; i++)
392 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
394 return gen_rtx_CONST_VECTOR (mode, v);
397 else if (GET_CODE (op) == CONST)
398 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
400 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
402 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
403 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
404 enum machine_mode opmode = GET_MODE (trueop);
405 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
406 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
407 rtvec v = rtvec_alloc (n_elts);
410 gcc_assert (op_n_elts == n_elts);
411 for (i = 0; i < n_elts; i++)
413 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
414 CONST_VECTOR_ELT (trueop, i),
415 GET_MODE_INNER (opmode));
418 RTVEC_ELT (v, i) = x;
420 return gen_rtx_CONST_VECTOR (mode, v);
423 /* The order of these tests is critical so that, for example, we don't
424 check the wrong mode (input vs. output) for a conversion operation,
425 such as FIX. At some point, this should be simplified. */
427 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
428 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
430 HOST_WIDE_INT hv, lv;
433 if (GET_CODE (trueop) == CONST_INT)
434 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
436 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
438 REAL_VALUE_FROM_INT (d, lv, hv, mode);
439 d = real_value_truncate (mode, d);
440 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
442 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
443 && (GET_CODE (trueop) == CONST_DOUBLE
444 || GET_CODE (trueop) == CONST_INT))
446 HOST_WIDE_INT hv, lv;
449 if (GET_CODE (trueop) == CONST_INT)
450 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
452 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
454 if (op_mode == VOIDmode)
456 /* We don't know how to interpret negative-looking numbers in
457 this case, so don't try to fold those. */
461 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
464 hv = 0, lv &= GET_MODE_MASK (op_mode);
466 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
467 d = real_value_truncate (mode, d);
468 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
471 if (GET_CODE (trueop) == CONST_INT
472 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
474 HOST_WIDE_INT arg0 = INTVAL (trueop);
488 val = (arg0 >= 0 ? arg0 : - arg0);
492 /* Don't use ffs here. Instead, get low order bit and then its
493 number. If arg0 is zero, this will return 0, as desired. */
494 arg0 &= GET_MODE_MASK (mode);
495 val = exact_log2 (arg0 & (- arg0)) + 1;
499 arg0 &= GET_MODE_MASK (mode);
500 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
503 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
507 arg0 &= GET_MODE_MASK (mode);
510 /* Even if the value at zero is undefined, we have to come
511 up with some replacement. Seems good enough. */
512 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
513 val = GET_MODE_BITSIZE (mode);
516 val = exact_log2 (arg0 & -arg0);
520 arg0 &= GET_MODE_MASK (mode);
523 val++, arg0 &= arg0 - 1;
527 arg0 &= GET_MODE_MASK (mode);
530 val++, arg0 &= arg0 - 1;
539 /* When zero-extending a CONST_INT, we need to know its
541 gcc_assert (op_mode != VOIDmode);
542 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
544 /* If we were really extending the mode,
545 we would have to distinguish between zero-extension
546 and sign-extension. */
547 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
550 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
551 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
557 if (op_mode == VOIDmode)
559 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
561 /* If we were really extending the mode,
562 we would have to distinguish between zero-extension
563 and sign-extension. */
564 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
567 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
570 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
572 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
573 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
590 val = trunc_int_for_mode (val, mode);
592 return GEN_INT (val);
595 /* We can do some operations on integer CONST_DOUBLEs. Also allow
596 for a DImode operation on a CONST_INT. */
597 else if (GET_MODE (trueop) == VOIDmode
598 && width <= HOST_BITS_PER_WIDE_INT * 2
599 && (GET_CODE (trueop) == CONST_DOUBLE
600 || GET_CODE (trueop) == CONST_INT))
602 unsigned HOST_WIDE_INT l1, lv;
603 HOST_WIDE_INT h1, hv;
605 if (GET_CODE (trueop) == CONST_DOUBLE)
606 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
608 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
618 neg_double (l1, h1, &lv, &hv);
623 neg_double (l1, h1, &lv, &hv);
635 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
638 lv = exact_log2 (l1 & -l1) + 1;
644 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
645 - HOST_BITS_PER_WIDE_INT;
647 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
648 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
649 lv = GET_MODE_BITSIZE (mode);
655 lv = exact_log2 (l1 & -l1);
657 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
658 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
659 lv = GET_MODE_BITSIZE (mode);
682 /* This is just a change-of-mode, so do nothing. */
687 gcc_assert (op_mode != VOIDmode);
689 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
693 lv = l1 & GET_MODE_MASK (op_mode);
697 if (op_mode == VOIDmode
698 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
702 lv = l1 & GET_MODE_MASK (op_mode);
703 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
704 && (lv & ((HOST_WIDE_INT) 1
705 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
706 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
708 hv = HWI_SIGN_EXTEND (lv);
719 return immed_double_const (lv, hv, mode);
722 else if (GET_CODE (trueop) == CONST_DOUBLE
723 && GET_MODE_CLASS (mode) == MODE_FLOAT)
725 REAL_VALUE_TYPE d, t;
726 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
731 if (HONOR_SNANS (mode) && real_isnan (&d))
733 real_sqrt (&t, mode, &d);
737 d = REAL_VALUE_ABS (d);
740 d = REAL_VALUE_NEGATE (d);
743 d = real_value_truncate (mode, d);
746 /* All this does is change the mode. */
749 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
756 real_to_target (tmp, &d, GET_MODE (trueop));
757 for (i = 0; i < 4; i++)
759 real_from_target (&d, tmp, mode);
764 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
767 else if (GET_CODE (trueop) == CONST_DOUBLE
768 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
769 && GET_MODE_CLASS (mode) == MODE_INT
770 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
772 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
773 operators are intentionally left unspecified (to ease implementation
774 by target backends), for consistency, this routine implements the
775 same semantics for constant folding as used by the middle-end. */
777 HOST_WIDE_INT xh, xl, th, tl;
778 REAL_VALUE_TYPE x, t;
779 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
783 if (REAL_VALUE_ISNAN (x))
786 /* Test against the signed upper bound. */
787 if (width > HOST_BITS_PER_WIDE_INT)
789 th = ((unsigned HOST_WIDE_INT) 1
790 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
796 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
798 real_from_integer (&t, VOIDmode, tl, th, 0);
799 if (REAL_VALUES_LESS (t, x))
806 /* Test against the signed lower bound. */
807 if (width > HOST_BITS_PER_WIDE_INT)
809 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
815 tl = (HOST_WIDE_INT) -1 << (width - 1);
817 real_from_integer (&t, VOIDmode, tl, th, 0);
818 if (REAL_VALUES_LESS (x, t))
824 REAL_VALUE_TO_INT (&xl, &xh, x);
828 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
831 /* Test against the unsigned upper bound. */
832 if (width == 2*HOST_BITS_PER_WIDE_INT)
837 else if (width >= HOST_BITS_PER_WIDE_INT)
839 th = ((unsigned HOST_WIDE_INT) 1
840 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
846 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
848 real_from_integer (&t, VOIDmode, tl, th, 1);
849 if (REAL_VALUES_LESS (t, x))
856 REAL_VALUE_TO_INT (&xl, &xh, x);
862 return immed_double_const (xl, xh, mode);
865 /* This was formerly used only for non-IEEE float.
866 eggert@twinsun.com says it is safe for IEEE also. */
869 enum rtx_code reversed;
872 /* There are some simplifications we can do even if the operands
877 /* (not (not X)) == X. */
878 if (GET_CODE (op) == NOT)
881 /* (not (eq X Y)) == (ne X Y), etc. */
882 if (COMPARISON_P (op)
883 && (mode == BImode || STORE_FLAG_VALUE == -1)
884 && ((reversed = reversed_comparison_code (op, NULL_RTX))
886 return simplify_gen_relational (reversed, mode, VOIDmode,
887 XEXP (op, 0), XEXP (op, 1));
889 /* (not (plus X -1)) can become (neg X). */
890 if (GET_CODE (op) == PLUS
891 && XEXP (op, 1) == constm1_rtx)
892 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
894 /* Similarly, (not (neg X)) is (plus X -1). */
895 if (GET_CODE (op) == NEG)
896 return plus_constant (XEXP (op, 0), -1);
898 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
899 if (GET_CODE (op) == XOR
900 && GET_CODE (XEXP (op, 1)) == CONST_INT
901 && (temp = simplify_unary_operation (NOT, mode,
904 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
906 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
907 if (GET_CODE (op) == PLUS
908 && GET_CODE (XEXP (op, 1)) == CONST_INT
909 && mode_signbit_p (mode, XEXP (op, 1))
910 && (temp = simplify_unary_operation (NOT, mode,
913 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
917 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
918 operands other than 1, but that is not valid. We could do a
919 similar simplification for (not (lshiftrt C X)) where C is
920 just the sign bit, but this doesn't seem common enough to
922 if (GET_CODE (op) == ASHIFT
923 && XEXP (op, 0) == const1_rtx)
925 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
926 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
929 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
930 by reversing the comparison code if valid. */
931 if (STORE_FLAG_VALUE == -1
933 && (reversed = reversed_comparison_code (op, NULL_RTX))
935 return simplify_gen_relational (reversed, mode, VOIDmode,
936 XEXP (op, 0), XEXP (op, 1));
938 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
939 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
940 so we can perform the above simplification. */
942 if (STORE_FLAG_VALUE == -1
943 && GET_CODE (op) == ASHIFTRT
944 && GET_CODE (XEXP (op, 1)) == CONST_INT
945 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
946 return simplify_gen_relational (GE, mode, VOIDmode,
947 XEXP (op, 0), const0_rtx);
952 /* (neg (neg X)) == X. */
953 if (GET_CODE (op) == NEG)
956 /* (neg (plus X 1)) can become (not X). */
957 if (GET_CODE (op) == PLUS
958 && XEXP (op, 1) == const1_rtx)
959 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
961 /* Similarly, (neg (not X)) is (plus X 1). */
962 if (GET_CODE (op) == NOT)
963 return plus_constant (XEXP (op, 0), 1);
965 /* (neg (minus X Y)) can become (minus Y X). This transformation
966 isn't safe for modes with signed zeros, since if X and Y are
967 both +0, (minus Y X) is the same as (minus X Y). If the
968 rounding mode is towards +infinity (or -infinity) then the two
969 expressions will be rounded differently. */
970 if (GET_CODE (op) == MINUS
971 && !HONOR_SIGNED_ZEROS (mode)
972 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
973 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
976 if (GET_CODE (op) == PLUS
977 && !HONOR_SIGNED_ZEROS (mode)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
980 /* (neg (plus A C)) is simplified to (minus -C A). */
981 if (GET_CODE (XEXP (op, 1)) == CONST_INT
982 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
984 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
987 return simplify_gen_binary (MINUS, mode, temp,
991 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
992 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
993 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
996 /* (neg (mult A B)) becomes (mult (neg A) B).
997 This works even for floating-point values. */
998 if (GET_CODE (op) == MULT
999 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1001 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1002 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1005 /* NEG commutes with ASHIFT since it is multiplication. Only do
1006 this if we can then eliminate the NEG (e.g., if the operand
1008 if (GET_CODE (op) == ASHIFT)
1010 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1013 return simplify_gen_binary (ASHIFT, mode, temp,
1017 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1018 C is equal to the width of MODE minus 1. */
1019 if (GET_CODE (op) == ASHIFTRT
1020 && GET_CODE (XEXP (op, 1)) == CONST_INT
1021 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1022 return simplify_gen_binary (LSHIFTRT, mode,
1023 XEXP (op, 0), XEXP (op, 1));
1025 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1026 C is equal to the width of MODE minus 1. */
1027 if (GET_CODE (op) == LSHIFTRT
1028 && GET_CODE (XEXP (op, 1)) == CONST_INT
1029 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1030 return simplify_gen_binary (ASHIFTRT, mode,
1031 XEXP (op, 0), XEXP (op, 1));
1036 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037 becomes just the MINUS if its mode is MODE. This allows
1038 folding switch statements on machines using casesi (such as
1040 if (GET_CODE (op) == TRUNCATE
1041 && GET_MODE (XEXP (op, 0)) == mode
1042 && GET_CODE (XEXP (op, 0)) == MINUS
1043 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1044 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1045 return XEXP (op, 0);
1047 /* Check for a sign extension of a subreg of a promoted
1048 variable, where the promotion is sign-extended, and the
1049 target mode is the same as the variable's promotion. */
1050 if (GET_CODE (op) == SUBREG
1051 && SUBREG_PROMOTED_VAR_P (op)
1052 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1053 && GET_MODE (XEXP (op, 0)) == mode)
1054 return XEXP (op, 0);
1056 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057 if (! POINTERS_EXTEND_UNSIGNED
1058 && mode == Pmode && GET_MODE (op) == ptr_mode
1060 || (GET_CODE (op) == SUBREG
1061 && REG_P (SUBREG_REG (op))
1062 && REG_POINTER (SUBREG_REG (op))
1063 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1064 return convert_memory_address (Pmode, op);
1069 /* Check for a zero extension of a subreg of a promoted
1070 variable, where the promotion is zero-extended, and the
1071 target mode is the same as the variable's promotion. */
1072 if (GET_CODE (op) == SUBREG
1073 && SUBREG_PROMOTED_VAR_P (op)
1074 && SUBREG_PROMOTED_UNSIGNED_P (op)
1075 && GET_MODE (XEXP (op, 0)) == mode)
1076 return XEXP (op, 0);
1078 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079 if (POINTERS_EXTEND_UNSIGNED > 0
1080 && mode == Pmode && GET_MODE (op) == ptr_mode
1082 || (GET_CODE (op) == SUBREG
1083 && REG_P (SUBREG_REG (op))
1084 && REG_POINTER (SUBREG_REG (op))
1085 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1086 return convert_memory_address (Pmode, op);
1098 /* Subroutine of simplify_binary_operation to simplify a commutative,
1099 associative binary operation CODE with result mode MODE, operating
1100 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1101 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1102 canonicalization is possible. */
1105 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1110 /* Linearize the operator to the left. */
1111 if (GET_CODE (op1) == code)
1113 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1114 if (GET_CODE (op0) == code)
1116 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1117 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1120 /* "a op (b op c)" becomes "(b op c) op a". */
1121 if (! swap_commutative_operands_p (op1, op0))
1122 return simplify_gen_binary (code, mode, op1, op0);
1129 if (GET_CODE (op0) == code)
1131 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1132 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1134 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1135 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1138 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1139 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1140 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1141 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1143 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1145 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1146 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1147 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1148 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1150 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1156 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1157 and OP1. Return 0 if no simplification is possible.
1159 Don't use this for relational operations such as EQ or LT.
1160 Use simplify_relational_operation instead. */
1162 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1165 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1167 unsigned int width = GET_MODE_BITSIZE (mode);
1168 rtx trueop0, trueop1;
1171 /* Relational operations don't work here. We must know the mode
1172 of the operands in order to do the comparison correctly.
1173 Assuming a full word can give incorrect results.
1174 Consider comparing 128 with -128 in QImode. */
1175 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1176 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1178 /* Make sure the constant is second. */
1179 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1180 && swap_commutative_operands_p (op0, op1))
1182 tem = op0, op0 = op1, op1 = tem;
1185 trueop0 = avoid_constant_pool_reference (op0);
1186 trueop1 = avoid_constant_pool_reference (op1);
1188 if (VECTOR_MODE_P (mode)
1189 && code != VEC_CONCAT
1190 && GET_CODE (trueop0) == CONST_VECTOR
1191 && GET_CODE (trueop1) == CONST_VECTOR)
1193 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1194 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1195 enum machine_mode op0mode = GET_MODE (trueop0);
1196 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1197 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1198 enum machine_mode op1mode = GET_MODE (trueop1);
1199 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1200 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1201 rtvec v = rtvec_alloc (n_elts);
1204 gcc_assert (op0_n_elts == n_elts);
1205 gcc_assert (op1_n_elts == n_elts);
1206 for (i = 0; i < n_elts; i++)
1208 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1209 CONST_VECTOR_ELT (trueop0, i),
1210 CONST_VECTOR_ELT (trueop1, i));
1213 RTVEC_ELT (v, i) = x;
1216 return gen_rtx_CONST_VECTOR (mode, v);
1219 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1220 && GET_CODE (trueop0) == CONST_DOUBLE
1221 && GET_CODE (trueop1) == CONST_DOUBLE
1222 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1233 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1235 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1237 for (i = 0; i < 4; i++)
1254 real_from_target (&r, tmp0, mode);
1255 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1259 REAL_VALUE_TYPE f0, f1, value;
1261 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1262 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1263 f0 = real_value_truncate (mode, f0);
1264 f1 = real_value_truncate (mode, f1);
1266 if (HONOR_SNANS (mode)
1267 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1271 && REAL_VALUES_EQUAL (f1, dconst0)
1272 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1275 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1276 && flag_trapping_math
1277 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1279 int s0 = REAL_VALUE_NEGATIVE (f0);
1280 int s1 = REAL_VALUE_NEGATIVE (f1);
1285 /* Inf + -Inf = NaN plus exception. */
1290 /* Inf - Inf = NaN plus exception. */
1295 /* Inf / Inf = NaN plus exception. */
1302 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1303 && flag_trapping_math
1304 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1305 || (REAL_VALUE_ISINF (f1)
1306 && REAL_VALUES_EQUAL (f0, dconst0))))
1307 /* Inf * 0 = NaN plus exception. */
1310 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1312 value = real_value_truncate (mode, value);
1313 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1317 /* We can fold some multi-word operations. */
1318 if (GET_MODE_CLASS (mode) == MODE_INT
1319 && width == HOST_BITS_PER_WIDE_INT * 2
1320 && (GET_CODE (trueop0) == CONST_DOUBLE
1321 || GET_CODE (trueop0) == CONST_INT)
1322 && (GET_CODE (trueop1) == CONST_DOUBLE
1323 || GET_CODE (trueop1) == CONST_INT))
1325 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1326 HOST_WIDE_INT h1, h2, hv, ht;
1328 if (GET_CODE (trueop0) == CONST_DOUBLE)
1329 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1331 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1333 if (GET_CODE (trueop1) == CONST_DOUBLE)
1334 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1336 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1341 /* A - B == A + (-B). */
1342 neg_double (l2, h2, &lv, &hv);
1345 /* Fall through.... */
1348 add_double (l1, h1, l2, h2, &lv, &hv);
1352 mul_double (l1, h1, l2, h2, &lv, &hv);
1356 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1357 &lv, &hv, <, &ht))
1362 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1363 <, &ht, &lv, &hv))
1368 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1369 &lv, &hv, <, &ht))
1374 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1375 <, &ht, &lv, &hv))
1380 lv = l1 & l2, hv = h1 & h2;
1384 lv = l1 | l2, hv = h1 | h2;
1388 lv = l1 ^ l2, hv = h1 ^ h2;
1394 && ((unsigned HOST_WIDE_INT) l1
1395 < (unsigned HOST_WIDE_INT) l2)))
1404 && ((unsigned HOST_WIDE_INT) l1
1405 > (unsigned HOST_WIDE_INT) l2)))
1412 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1414 && ((unsigned HOST_WIDE_INT) l1
1415 < (unsigned HOST_WIDE_INT) l2)))
1422 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1424 && ((unsigned HOST_WIDE_INT) l1
1425 > (unsigned HOST_WIDE_INT) l2)))
1431 case LSHIFTRT: case ASHIFTRT:
1433 case ROTATE: case ROTATERT:
1434 if (SHIFT_COUNT_TRUNCATED)
1435 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1437 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1440 if (code == LSHIFTRT || code == ASHIFTRT)
1441 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1443 else if (code == ASHIFT)
1444 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1445 else if (code == ROTATE)
1446 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1447 else /* code == ROTATERT */
1448 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1455 return immed_double_const (lv, hv, mode);
1458 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1459 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1461 /* Even if we can't compute a constant result,
1462 there are some cases worth simplifying. */
1467 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1468 when x is NaN, infinite, or finite and nonzero. They aren't
1469 when x is -0 and the rounding mode is not towards -infinity,
1470 since (-0) + 0 is then 0. */
1471 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1474 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1475 transformations are safe even for IEEE. */
1476 if (GET_CODE (op0) == NEG)
1477 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1478 else if (GET_CODE (op1) == NEG)
1479 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1481 /* (~a) + 1 -> -a */
1482 if (INTEGRAL_MODE_P (mode)
1483 && GET_CODE (op0) == NOT
1484 && trueop1 == const1_rtx)
1485 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1487 /* Handle both-operands-constant cases. We can only add
1488 CONST_INTs to constants since the sum of relocatable symbols
1489 can't be handled by most assemblers. Don't add CONST_INT
1490 to CONST_INT since overflow won't be computed properly if wider
1491 than HOST_BITS_PER_WIDE_INT. */
1493 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1494 && GET_CODE (op1) == CONST_INT)
1495 return plus_constant (op0, INTVAL (op1));
1496 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1497 && GET_CODE (op0) == CONST_INT)
1498 return plus_constant (op1, INTVAL (op0));
1500 /* See if this is something like X * C - X or vice versa or
1501 if the multiplication is written as a shift. If so, we can
1502 distribute and make a new multiply, shift, or maybe just
1503 have X (if C is 2 in the example above). But don't make
1504 something more expensive than we had before. */
1506 if (! FLOAT_MODE_P (mode))
1508 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1509 rtx lhs = op0, rhs = op1;
1511 if (GET_CODE (lhs) == NEG)
1512 coeff0 = -1, lhs = XEXP (lhs, 0);
1513 else if (GET_CODE (lhs) == MULT
1514 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1516 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1518 else if (GET_CODE (lhs) == ASHIFT
1519 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1520 && INTVAL (XEXP (lhs, 1)) >= 0
1521 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1523 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1524 lhs = XEXP (lhs, 0);
1527 if (GET_CODE (rhs) == NEG)
1528 coeff1 = -1, rhs = XEXP (rhs, 0);
1529 else if (GET_CODE (rhs) == MULT
1530 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1532 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1534 else if (GET_CODE (rhs) == ASHIFT
1535 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1536 && INTVAL (XEXP (rhs, 1)) >= 0
1537 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1539 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1540 rhs = XEXP (rhs, 0);
1543 if (rtx_equal_p (lhs, rhs))
1545 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1546 tem = simplify_gen_binary (MULT, mode, lhs,
1547 GEN_INT (coeff0 + coeff1));
1548 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1553 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1554 if ((GET_CODE (op1) == CONST_INT
1555 || GET_CODE (op1) == CONST_DOUBLE)
1556 && GET_CODE (op0) == XOR
1557 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1558 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1559 && mode_signbit_p (mode, op1))
1560 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1561 simplify_gen_binary (XOR, mode, op1,
1564 /* If one of the operands is a PLUS or a MINUS, see if we can
1565 simplify this by the associative law.
1566 Don't use the associative law for floating point.
1567 The inaccuracy makes it nonassociative,
1568 and subtle programs can break if operations are associated. */
1570 if (INTEGRAL_MODE_P (mode)
1571 && (plus_minus_operand_p (op0)
1572 || plus_minus_operand_p (op1))
1573 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1576 /* Reassociate floating point addition only when the user
1577 specifies unsafe math optimizations. */
1578 if (FLOAT_MODE_P (mode)
1579 && flag_unsafe_math_optimizations)
1581 tem = simplify_associative_operation (code, mode, op0, op1);
1589 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1590 using cc0, in which case we want to leave it as a COMPARE
1591 so we can distinguish it from a register-register-copy.
1593 In IEEE floating point, x-0 is not the same as x. */
1595 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1596 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1597 && trueop1 == CONST0_RTX (mode))
1601 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1602 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1603 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1604 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1606 rtx xop00 = XEXP (op0, 0);
1607 rtx xop10 = XEXP (op1, 0);
1610 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1612 if (REG_P (xop00) && REG_P (xop10)
1613 && GET_MODE (xop00) == GET_MODE (xop10)
1614 && REGNO (xop00) == REGNO (xop10)
1615 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1616 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1623 /* We can't assume x-x is 0 even with non-IEEE floating point,
1624 but since it is zero except in very strange circumstances, we
1625 will treat it as zero with -funsafe-math-optimizations. */
1626 if (rtx_equal_p (trueop0, trueop1)
1627 && ! side_effects_p (op0)
1628 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1629 return CONST0_RTX (mode);
1631 /* Change subtraction from zero into negation. (0 - x) is the
1632 same as -x when x is NaN, infinite, or finite and nonzero.
1633 But if the mode has signed zeros, and does not round towards
1634 -infinity, then 0 - 0 is 0, not -0. */
1635 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1636 return simplify_gen_unary (NEG, mode, op1, mode);
1638 /* (-1 - a) is ~a. */
1639 if (trueop0 == constm1_rtx)
1640 return simplify_gen_unary (NOT, mode, op1, mode);
1642 /* Subtracting 0 has no effect unless the mode has signed zeros
1643 and supports rounding towards -infinity. In such a case,
1645 if (!(HONOR_SIGNED_ZEROS (mode)
1646 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1647 && trueop1 == CONST0_RTX (mode))
1650 /* See if this is something like X * C - X or vice versa or
1651 if the multiplication is written as a shift. If so, we can
1652 distribute and make a new multiply, shift, or maybe just
1653 have X (if C is 2 in the example above). But don't make
1654 something more expensive than we had before. */
1656 if (! FLOAT_MODE_P (mode))
1658 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1659 rtx lhs = op0, rhs = op1;
1661 if (GET_CODE (lhs) == NEG)
1662 coeff0 = -1, lhs = XEXP (lhs, 0);
1663 else if (GET_CODE (lhs) == MULT
1664 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1666 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1668 else if (GET_CODE (lhs) == ASHIFT
1669 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1670 && INTVAL (XEXP (lhs, 1)) >= 0
1671 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1673 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1674 lhs = XEXP (lhs, 0);
1677 if (GET_CODE (rhs) == NEG)
1678 coeff1 = - 1, rhs = XEXP (rhs, 0);
1679 else if (GET_CODE (rhs) == MULT
1680 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1682 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1684 else if (GET_CODE (rhs) == ASHIFT
1685 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1686 && INTVAL (XEXP (rhs, 1)) >= 0
1687 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1689 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1690 rhs = XEXP (rhs, 0);
1693 if (rtx_equal_p (lhs, rhs))
1695 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1696 tem = simplify_gen_binary (MULT, mode, lhs,
1697 GEN_INT (coeff0 - coeff1));
1698 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1703 /* (a - (-b)) -> (a + b). True even for IEEE. */
1704 if (GET_CODE (op1) == NEG)
1705 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1707 /* (-x - c) may be simplified as (-c - x). */
1708 if (GET_CODE (op0) == NEG
1709 && (GET_CODE (op1) == CONST_INT
1710 || GET_CODE (op1) == CONST_DOUBLE))
1712 tem = simplify_unary_operation (NEG, mode, op1, mode);
1714 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1717 /* If one of the operands is a PLUS or a MINUS, see if we can
1718 simplify this by the associative law.
1719 Don't use the associative law for floating point.
1720 The inaccuracy makes it nonassociative,
1721 and subtle programs can break if operations are associated. */
1723 if (INTEGRAL_MODE_P (mode)
1724 && (plus_minus_operand_p (op0)
1725 || plus_minus_operand_p (op1))
1726 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1729 /* Don't let a relocatable value get a negative coeff. */
1730 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1731 return simplify_gen_binary (PLUS, mode,
1733 neg_const_int (mode, op1));
1735 /* (x - (x & y)) -> (x & ~y) */
1736 if (GET_CODE (op1) == AND)
1738 if (rtx_equal_p (op0, XEXP (op1, 0)))
1740 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1741 GET_MODE (XEXP (op1, 1)));
1742 return simplify_gen_binary (AND, mode, op0, tem);
1744 if (rtx_equal_p (op0, XEXP (op1, 1)))
1746 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1747 GET_MODE (XEXP (op1, 0)));
1748 return simplify_gen_binary (AND, mode, op0, tem);
1754 if (trueop1 == constm1_rtx)
1755 return simplify_gen_unary (NEG, mode, op0, mode);
1757 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1758 x is NaN, since x * 0 is then also NaN. Nor is it valid
1759 when the mode has signed zeros, since multiplying a negative
1760 number by 0 will give -0, not 0. */
1761 if (!HONOR_NANS (mode)
1762 && !HONOR_SIGNED_ZEROS (mode)
1763 && trueop1 == CONST0_RTX (mode)
1764 && ! side_effects_p (op0))
1767 /* In IEEE floating point, x*1 is not equivalent to x for
1769 if (!HONOR_SNANS (mode)
1770 && trueop1 == CONST1_RTX (mode))
1773 /* Convert multiply by constant power of two into shift unless
1774 we are still generating RTL. This test is a kludge. */
1775 if (GET_CODE (trueop1) == CONST_INT
1776 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1777 /* If the mode is larger than the host word size, and the
1778 uppermost bit is set, then this isn't a power of two due
1779 to implicit sign extension. */
1780 && (width <= HOST_BITS_PER_WIDE_INT
1781 || val != HOST_BITS_PER_WIDE_INT - 1))
1782 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1784 /* x*2 is x+x and x*(-1) is -x */
1785 if (GET_CODE (trueop1) == CONST_DOUBLE
1786 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1787 && GET_MODE (op0) == mode)
1790 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1792 if (REAL_VALUES_EQUAL (d, dconst2))
1793 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1795 if (REAL_VALUES_EQUAL (d, dconstm1))
1796 return simplify_gen_unary (NEG, mode, op0, mode);
1799 /* Reassociate multiplication, but for floating point MULTs
1800 only when the user specifies unsafe math optimizations. */
1801 if (! FLOAT_MODE_P (mode)
1802 || flag_unsafe_math_optimizations)
1804 tem = simplify_associative_operation (code, mode, op0, op1);
1811 if (trueop1 == const0_rtx)
1813 if (GET_CODE (trueop1) == CONST_INT
1814 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1815 == GET_MODE_MASK (mode)))
1817 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1819 /* A | (~A) -> -1 */
1820 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1821 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1822 && ! side_effects_p (op0)
1823 && GET_MODE_CLASS (mode) != MODE_CC)
1825 tem = simplify_associative_operation (code, mode, op0, op1);
1831 if (trueop1 == const0_rtx)
1833 if (GET_CODE (trueop1) == CONST_INT
1834 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1835 == GET_MODE_MASK (mode)))
1836 return simplify_gen_unary (NOT, mode, op0, mode);
1837 if (trueop0 == trueop1
1838 && ! side_effects_p (op0)
1839 && GET_MODE_CLASS (mode) != MODE_CC)
1842 /* Canonicalize XOR of the most significant bit to PLUS. */
1843 if ((GET_CODE (op1) == CONST_INT
1844 || GET_CODE (op1) == CONST_DOUBLE)
1845 && mode_signbit_p (mode, op1))
1846 return simplify_gen_binary (PLUS, mode, op0, op1);
1847 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1848 if ((GET_CODE (op1) == CONST_INT
1849 || GET_CODE (op1) == CONST_DOUBLE)
1850 && GET_CODE (op0) == PLUS
1851 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1852 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1853 && mode_signbit_p (mode, XEXP (op0, 1)))
1854 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1855 simplify_gen_binary (XOR, mode, op1,
1858 tem = simplify_associative_operation (code, mode, op0, op1);
1864 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1866 /* If we are turning off bits already known off in OP0, we need
1868 if (GET_CODE (trueop1) == CONST_INT
1869 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1870 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
1872 if (trueop0 == trueop1 && ! side_effects_p (op0)
1873 && GET_MODE_CLASS (mode) != MODE_CC)
1876 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1877 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1878 && ! side_effects_p (op0)
1879 && GET_MODE_CLASS (mode) != MODE_CC)
1881 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
1882 ((A & N) + B) & M -> (A + B) & M
1883 Similarly if (N & M) == 0,
1884 ((A | N) + B) & M -> (A + B) & M
1885 and for - instead of + and/or ^ instead of |. */
1886 if (GET_CODE (trueop1) == CONST_INT
1887 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1888 && ~INTVAL (trueop1)
1889 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
1890 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
1895 pmop[0] = XEXP (op0, 0);
1896 pmop[1] = XEXP (op0, 1);
1898 for (which = 0; which < 2; which++)
1901 switch (GET_CODE (tem))
1904 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1905 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
1906 == INTVAL (trueop1))
1907 pmop[which] = XEXP (tem, 0);
1911 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
1912 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
1913 pmop[which] = XEXP (tem, 0);
1920 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
1922 tem = simplify_gen_binary (GET_CODE (op0), mode,
1924 return simplify_gen_binary (code, mode, tem, op1);
1927 tem = simplify_associative_operation (code, mode, op0, op1);
1933 /* 0/x is 0 (or x&0 if x has side-effects). */
1934 if (trueop0 == const0_rtx)
1935 return side_effects_p (op1)
1936 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1939 if (trueop1 == const1_rtx)
1941 /* Handle narrowing UDIV. */
1942 rtx x = gen_lowpart_common (mode, op0);
1945 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1946 return gen_lowpart_SUBREG (mode, op0);
1949 /* Convert divide by power of two into shift. */
1950 if (GET_CODE (trueop1) == CONST_INT
1951 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1952 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1956 /* Handle floating point and integers separately. */
1957 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1959 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1960 safe for modes with NaNs, since 0.0 / 0.0 will then be
1961 NaN rather than 0.0. Nor is it safe for modes with signed
1962 zeros, since dividing 0 by a negative number gives -0.0 */
1963 if (trueop0 == CONST0_RTX (mode)
1964 && !HONOR_NANS (mode)
1965 && !HONOR_SIGNED_ZEROS (mode)
1966 && ! side_effects_p (op1))
1969 if (trueop1 == CONST1_RTX (mode)
1970 && !HONOR_SNANS (mode))
1973 if (GET_CODE (trueop1) == CONST_DOUBLE
1974 && trueop1 != CONST0_RTX (mode))
1977 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1980 if (REAL_VALUES_EQUAL (d, dconstm1)
1981 && !HONOR_SNANS (mode))
1982 return simplify_gen_unary (NEG, mode, op0, mode);
1984 /* Change FP division by a constant into multiplication.
1985 Only do this with -funsafe-math-optimizations. */
1986 if (flag_unsafe_math_optimizations
1987 && !REAL_VALUES_EQUAL (d, dconst0))
1989 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1990 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1991 return simplify_gen_binary (MULT, mode, op0, tem);
1997 /* 0/x is 0 (or x&0 if x has side-effects). */
1998 if (trueop0 == const0_rtx)
1999 return side_effects_p (op1)
2000 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2003 if (trueop1 == const1_rtx)
2005 /* Handle narrowing DIV. */
2006 rtx x = gen_lowpart_common (mode, op0);
2009 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2010 return gen_lowpart_SUBREG (mode, op0);
2014 if (trueop1 == constm1_rtx)
2016 rtx x = gen_lowpart_common (mode, op0);
2018 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
2019 ? gen_lowpart_SUBREG (mode, op0) : op0;
2020 return simplify_gen_unary (NEG, mode, x, mode);
2026 /* 0%x is 0 (or x&0 if x has side-effects). */
2027 if (trueop0 == const0_rtx)
2028 return side_effects_p (op1)
2029 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2031 /* x%1 is 0 (of x&0 if x has side-effects). */
2032 if (trueop1 == const1_rtx)
2033 return side_effects_p (op0)
2034 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2036 /* Implement modulus by power of two as AND. */
2037 if (GET_CODE (trueop1) == CONST_INT
2038 && exact_log2 (INTVAL (trueop1)) > 0)
2039 return simplify_gen_binary (AND, mode, op0,
2040 GEN_INT (INTVAL (op1) - 1));
2044 /* 0%x is 0 (or x&0 if x has side-effects). */
2045 if (trueop0 == const0_rtx)
2046 return side_effects_p (op1)
2047 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2049 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2050 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2051 return side_effects_p (op0)
2052 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2059 /* Rotating ~0 always results in ~0. */
2060 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2061 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2062 && ! side_effects_p (op1))
2065 /* Fall through.... */
2069 if (trueop1 == const0_rtx)
2071 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2076 if (width <= HOST_BITS_PER_WIDE_INT
2077 && GET_CODE (trueop1) == CONST_INT
2078 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2079 && ! side_effects_p (op0))
2081 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2083 tem = simplify_associative_operation (code, mode, op0, op1);
2089 if (width <= HOST_BITS_PER_WIDE_INT
2090 && GET_CODE (trueop1) == CONST_INT
2091 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2092 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2093 && ! side_effects_p (op0))
2095 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2097 tem = simplify_associative_operation (code, mode, op0, op1);
2103 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2105 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2107 tem = simplify_associative_operation (code, mode, op0, op1);
2113 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2115 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2117 tem = simplify_associative_operation (code, mode, op0, op1);
2126 /* ??? There are simplifications that can be done. */
2130 if (!VECTOR_MODE_P (mode))
2132 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2133 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2134 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2135 gcc_assert (XVECLEN (trueop1, 0) == 1);
2136 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2138 if (GET_CODE (trueop0) == CONST_VECTOR)
2139 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2144 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2145 gcc_assert (GET_MODE_INNER (mode)
2146 == GET_MODE_INNER (GET_MODE (trueop0)));
2147 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2149 if (GET_CODE (trueop0) == CONST_VECTOR)
2151 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2152 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2153 rtvec v = rtvec_alloc (n_elts);
2156 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2157 for (i = 0; i < n_elts; i++)
2159 rtx x = XVECEXP (trueop1, 0, i);
2161 gcc_assert (GET_CODE (x) == CONST_INT);
2162 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2166 return gen_rtx_CONST_VECTOR (mode, v);
2172 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2173 ? GET_MODE (trueop0)
2174 : GET_MODE_INNER (mode));
2175 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2176 ? GET_MODE (trueop1)
2177 : GET_MODE_INNER (mode));
2179 gcc_assert (VECTOR_MODE_P (mode));
2180 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2181 == GET_MODE_SIZE (mode));
2183 if (VECTOR_MODE_P (op0_mode))
2184 gcc_assert (GET_MODE_INNER (mode)
2185 == GET_MODE_INNER (op0_mode));
2187 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2189 if (VECTOR_MODE_P (op1_mode))
2190 gcc_assert (GET_MODE_INNER (mode)
2191 == GET_MODE_INNER (op1_mode));
2193 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2195 if ((GET_CODE (trueop0) == CONST_VECTOR
2196 || GET_CODE (trueop0) == CONST_INT
2197 || GET_CODE (trueop0) == CONST_DOUBLE)
2198 && (GET_CODE (trueop1) == CONST_VECTOR
2199 || GET_CODE (trueop1) == CONST_INT
2200 || GET_CODE (trueop1) == CONST_DOUBLE))
2202 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2203 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2204 rtvec v = rtvec_alloc (n_elts);
2206 unsigned in_n_elts = 1;
2208 if (VECTOR_MODE_P (op0_mode))
2209 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2210 for (i = 0; i < n_elts; i++)
2214 if (!VECTOR_MODE_P (op0_mode))
2215 RTVEC_ELT (v, i) = trueop0;
2217 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2221 if (!VECTOR_MODE_P (op1_mode))
2222 RTVEC_ELT (v, i) = trueop1;
2224 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2229 return gen_rtx_CONST_VECTOR (mode, v);
2241 /* Get the integer argument values in two forms:
2242 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2244 arg0 = INTVAL (trueop0);
2245 arg1 = INTVAL (trueop1);
2247 if (width < HOST_BITS_PER_WIDE_INT)
2249 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2250 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2253 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2254 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2257 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2258 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2266 /* Compute the value of the arithmetic. */
2271 val = arg0s + arg1s;
2275 val = arg0s - arg1s;
2279 val = arg0s * arg1s;
2284 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2287 val = arg0s / arg1s;
2292 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2295 val = arg0s % arg1s;
2300 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2303 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2308 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2311 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2329 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
2330 value is in range. We can't return any old value for out-of-range
2331 arguments because either the middle-end (via shift_truncation_mask)
2332 or the back-end might be relying on target-specific knowledge.
2333 Nor can we rely on shift_truncation_mask, since the shift might
2334 not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
2335 if (SHIFT_COUNT_TRUNCATED)
2336 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
2337 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
2340 val = (code == ASHIFT
2341 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
2342 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
2344 /* Sign-extend the result for arithmetic right shifts. */
2345 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
2346 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
2354 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2355 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2363 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2364 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2368 /* Do nothing here. */
2372 val = arg0s <= arg1s ? arg0s : arg1s;
2376 val = ((unsigned HOST_WIDE_INT) arg0
2377 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2381 val = arg0s > arg1s ? arg0s : arg1s;
2385 val = ((unsigned HOST_WIDE_INT) arg0
2386 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2393 /* ??? There are simplifications that can be done. */
2400 val = trunc_int_for_mode (val, mode);
2402 return GEN_INT (val);
2405 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2408 Rather than test for specific case, we do this by a brute-force method
2409 and do all possible simplifications until no more changes occur. Then
2410 we rebuild the operation.
2412 If FORCE is true, then always generate the rtx. This is used to
2413 canonicalize stuff emitted from simplify_gen_binary. Note that this
2414 can still fail if the rtx is too complex. It won't fail just because
2415 the result is not 'simpler' than the input, however. */
2417 struct simplify_plus_minus_op_data
2424 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2426 const struct simplify_plus_minus_op_data *d1 = p1;
2427 const struct simplify_plus_minus_op_data *d2 = p2;
2429 return (commutative_operand_precedence (d2->op)
2430 - commutative_operand_precedence (d1->op));
2434 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2437 struct simplify_plus_minus_op_data ops[8];
2439 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2443 memset (ops, 0, sizeof ops);
2445 /* Set up the two operands and then expand them until nothing has been
2446 changed. If we run out of room in our array, give up; this should
2447 almost never happen. */
2452 ops[1].neg = (code == MINUS);
2458 for (i = 0; i < n_ops; i++)
2460 rtx this_op = ops[i].op;
2461 int this_neg = ops[i].neg;
2462 enum rtx_code this_code = GET_CODE (this_op);
2471 ops[n_ops].op = XEXP (this_op, 1);
2472 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2475 ops[i].op = XEXP (this_op, 0);
2481 ops[i].op = XEXP (this_op, 0);
2482 ops[i].neg = ! this_neg;
2488 && GET_CODE (XEXP (this_op, 0)) == PLUS
2489 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2490 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2492 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2493 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2494 ops[n_ops].neg = this_neg;
2502 /* ~a -> (-a - 1) */
2505 ops[n_ops].op = constm1_rtx;
2506 ops[n_ops++].neg = this_neg;
2507 ops[i].op = XEXP (this_op, 0);
2508 ops[i].neg = !this_neg;
2516 ops[i].op = neg_const_int (mode, this_op);
2529 /* If we only have two operands, we can't do anything. */
2530 if (n_ops <= 2 && !force)
2533 /* Count the number of CONSTs we didn't split above. */
2534 for (i = 0; i < n_ops; i++)
2535 if (GET_CODE (ops[i].op) == CONST)
2538 /* Now simplify each pair of operands until nothing changes. The first
2539 time through just simplify constants against each other. */
2546 for (i = 0; i < n_ops - 1; i++)
2547 for (j = i + 1; j < n_ops; j++)
2549 rtx lhs = ops[i].op, rhs = ops[j].op;
2550 int lneg = ops[i].neg, rneg = ops[j].neg;
2552 if (lhs != 0 && rhs != 0
2553 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2555 enum rtx_code ncode = PLUS;
2561 tem = lhs, lhs = rhs, rhs = tem;
2563 else if (swap_commutative_operands_p (lhs, rhs))
2564 tem = lhs, lhs = rhs, rhs = tem;
2566 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2568 /* Reject "simplifications" that just wrap the two
2569 arguments in a CONST. Failure to do so can result
2570 in infinite recursion with simplify_binary_operation
2571 when it calls us to simplify CONST operations. */
2573 && ! (GET_CODE (tem) == CONST
2574 && GET_CODE (XEXP (tem, 0)) == ncode
2575 && XEXP (XEXP (tem, 0), 0) == lhs
2576 && XEXP (XEXP (tem, 0), 1) == rhs)
2577 /* Don't allow -x + -1 -> ~x simplifications in the
2578 first pass. This allows us the chance to combine
2579 the -1 with other constants. */
2581 && GET_CODE (tem) == NOT
2582 && XEXP (tem, 0) == rhs))
2585 if (GET_CODE (tem) == NEG)
2586 tem = XEXP (tem, 0), lneg = !lneg;
2587 if (GET_CODE (tem) == CONST_INT && lneg)
2588 tem = neg_const_int (mode, tem), lneg = 0;
2592 ops[j].op = NULL_RTX;
2602 /* Pack all the operands to the lower-numbered entries. */
2603 for (i = 0, j = 0; j < n_ops; j++)
2608 /* Sort the operations based on swap_commutative_operands_p. */
2609 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2611 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2613 && GET_CODE (ops[1].op) == CONST_INT
2614 && CONSTANT_P (ops[0].op)
2616 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2618 /* We suppressed creation of trivial CONST expressions in the
2619 combination loop to avoid recursion. Create one manually now.
2620 The combination loop should have ensured that there is exactly
2621 one CONST_INT, and the sort will have ensured that it is last
2622 in the array and that any other constant will be next-to-last. */
2625 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2626 && CONSTANT_P (ops[n_ops - 2].op))
2628 rtx value = ops[n_ops - 1].op;
2629 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2630 value = neg_const_int (mode, value);
2631 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2635 /* Count the number of CONSTs that we generated. */
2637 for (i = 0; i < n_ops; i++)
2638 if (GET_CODE (ops[i].op) == CONST)
2641 /* Give up if we didn't reduce the number of operands we had. Make
2642 sure we count a CONST as two operands. If we have the same
2643 number of operands, but have made more CONSTs than before, this
2644 is also an improvement, so accept it. */
2646 && (n_ops + n_consts > input_ops
2647 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2650 /* Put a non-negated operand first, if possible. */
2652 for (i = 0; i < n_ops && ops[i].neg; i++)
2655 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2664 /* Now make the result by performing the requested operations. */
2666 for (i = 1; i < n_ops; i++)
2667 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2668 mode, result, ops[i].op);
2673 /* Check whether an operand is suitable for calling simplify_plus_minus. */
2675 plus_minus_operand_p (rtx x)
2677 return GET_CODE (x) == PLUS
2678 || GET_CODE (x) == MINUS
2679 || (GET_CODE (x) == CONST
2680 && GET_CODE (XEXP (x, 0)) == PLUS
2681 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
2682 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
2685 /* Like simplify_binary_operation except used for relational operators.
2686 MODE is the mode of the result. If MODE is VOIDmode, both operands must
2687 not also be VOIDmode.
2689 CMP_MODE specifies in which mode the comparison is done in, so it is
2690 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
2691 the operands or, if both are VOIDmode, the operands are compared in
2692 "infinite precision". */
2694 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2695 enum machine_mode cmp_mode, rtx op0, rtx op1)
2697 rtx tem, trueop0, trueop1;
2699 if (cmp_mode == VOIDmode)
2700 cmp_mode = GET_MODE (op0);
2701 if (cmp_mode == VOIDmode)
2702 cmp_mode = GET_MODE (op1);
2704 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2707 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2709 if (tem == const0_rtx)
2710 return CONST0_RTX (mode);
2711 #ifdef FLOAT_STORE_FLAG_VALUE
2713 REAL_VALUE_TYPE val;
2714 val = FLOAT_STORE_FLAG_VALUE (mode);
2715 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
2721 if (VECTOR_MODE_P (mode))
2723 if (tem == const0_rtx)
2724 return CONST0_RTX (mode);
2725 #ifdef VECTOR_STORE_FLAG_VALUE
2730 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2731 if (val == NULL_RTX)
2733 if (val == const1_rtx)
2734 return CONST1_RTX (mode);
2736 units = GET_MODE_NUNITS (mode);
2737 v = rtvec_alloc (units);
2738 for (i = 0; i < units; i++)
2739 RTVEC_ELT (v, i) = val;
2740 return gen_rtx_raw_CONST_VECTOR (mode, v);
2750 /* For the following tests, ensure const0_rtx is op1. */
2751 if (swap_commutative_operands_p (op0, op1)
2752 || (op0 == const0_rtx && op1 != const0_rtx))
2753 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
2755 /* If op0 is a compare, extract the comparison arguments from it. */
2756 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2757 return simplify_relational_operation (code, mode, VOIDmode,
2758 XEXP (op0, 0), XEXP (op0, 1));
2760 if (mode == VOIDmode
2761 || GET_MODE_CLASS (cmp_mode) == MODE_CC
2765 trueop0 = avoid_constant_pool_reference (op0);
2766 trueop1 = avoid_constant_pool_reference (op1);
2767 return simplify_relational_operation_1 (code, mode, cmp_mode,
2771 /* This part of simplify_relational_operation is only used when CMP_MODE
2772 is not in class MODE_CC (i.e. it is a real comparison).
2774 MODE is the mode of the result, while CMP_MODE specifies in which
2775 mode the comparison is done in, so it is the mode of the operands. */
2778 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
2779 enum machine_mode cmp_mode, rtx op0, rtx op1)
2781 enum rtx_code op0code = GET_CODE (op0);
2783 if (GET_CODE (op1) == CONST_INT)
2785 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
2787 /* If op0 is a comparison, extract the comparison arguments form it. */
2790 if (GET_MODE (op0) == cmp_mode)
2791 return simplify_rtx (op0);
2793 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
2794 XEXP (op0, 0), XEXP (op0, 1));
2796 else if (code == EQ)
2798 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
2799 if (new_code != UNKNOWN)
2800 return simplify_gen_relational (new_code, mode, VOIDmode,
2801 XEXP (op0, 0), XEXP (op0, 1));
2806 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
2807 if ((code == EQ || code == NE)
2808 && (op0code == PLUS || op0code == MINUS)
2810 && CONSTANT_P (XEXP (op0, 1))
2811 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
2813 rtx x = XEXP (op0, 0);
2814 rtx c = XEXP (op0, 1);
2816 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
2818 return simplify_gen_relational (code, mode, cmp_mode, x, c);
2824 /* Check if the given comparison (done in the given MODE) is actually a
2825 tautology or a contradiction.
2826 If no simplification is possible, this function returns zero.
2827 Otherwise, it returns either const_true_rtx or const0_rtx. */
2830 simplify_const_relational_operation (enum rtx_code code,
2831 enum machine_mode mode,
2834 int equal, op0lt, op0ltu, op1lt, op1ltu;
2839 gcc_assert (mode != VOIDmode
2840 || (GET_MODE (op0) == VOIDmode
2841 && GET_MODE (op1) == VOIDmode));
2843 /* If op0 is a compare, extract the comparison arguments from it. */
2844 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2845 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2847 /* We can't simplify MODE_CC values since we don't know what the
2848 actual comparison is. */
2849 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2852 /* Make sure the constant is second. */
2853 if (swap_commutative_operands_p (op0, op1))
2855 tem = op0, op0 = op1, op1 = tem;
2856 code = swap_condition (code);
2859 trueop0 = avoid_constant_pool_reference (op0);
2860 trueop1 = avoid_constant_pool_reference (op1);
2862 /* For integer comparisons of A and B maybe we can simplify A - B and can
2863 then simplify a comparison of that with zero. If A and B are both either
2864 a register or a CONST_INT, this can't help; testing for these cases will
2865 prevent infinite recursion here and speed things up.
2867 If CODE is an unsigned comparison, then we can never do this optimization,
2868 because it gives an incorrect result if the subtraction wraps around zero.
2869 ANSI C defines unsigned operations such that they never overflow, and
2870 thus such cases can not be ignored; but we cannot do it even for
2871 signed comparisons for languages such as Java, so test flag_wrapv. */
2873 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2874 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
2875 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
2876 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2877 /* We cannot do this for == or != if tem is a nonzero address. */
2878 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2879 && code != GTU && code != GEU && code != LTU && code != LEU)
2880 return simplify_const_relational_operation (signed_condition (code),
2881 mode, tem, const0_rtx);
2883 if (flag_unsafe_math_optimizations && code == ORDERED)
2884 return const_true_rtx;
2886 if (flag_unsafe_math_optimizations && code == UNORDERED)
2889 /* For modes without NaNs, if the two operands are equal, we know the
2890 result except if they have side-effects. */
2891 if (! HONOR_NANS (GET_MODE (trueop0))
2892 && rtx_equal_p (trueop0, trueop1)
2893 && ! side_effects_p (trueop0))
2894 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2896 /* If the operands are floating-point constants, see if we can fold
2898 else if (GET_CODE (trueop0) == CONST_DOUBLE
2899 && GET_CODE (trueop1) == CONST_DOUBLE
2900 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2902 REAL_VALUE_TYPE d0, d1;
2904 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2905 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2907 /* Comparisons are unordered iff at least one of the values is NaN. */
2908 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2918 return const_true_rtx;
2931 equal = REAL_VALUES_EQUAL (d0, d1);
2932 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2933 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2936 /* Otherwise, see if the operands are both integers. */
2937 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2938 && (GET_CODE (trueop0) == CONST_DOUBLE
2939 || GET_CODE (trueop0) == CONST_INT)
2940 && (GET_CODE (trueop1) == CONST_DOUBLE
2941 || GET_CODE (trueop1) == CONST_INT))
2943 int width = GET_MODE_BITSIZE (mode);
2944 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2945 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2947 /* Get the two words comprising each integer constant. */
2948 if (GET_CODE (trueop0) == CONST_DOUBLE)
2950 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2951 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2955 l0u = l0s = INTVAL (trueop0);
2956 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2959 if (GET_CODE (trueop1) == CONST_DOUBLE)
2961 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2962 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2966 l1u = l1s = INTVAL (trueop1);
2967 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2970 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2971 we have to sign or zero-extend the values. */
2972 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2974 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2975 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2977 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2978 l0s |= ((HOST_WIDE_INT) (-1) << width);
2980 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2981 l1s |= ((HOST_WIDE_INT) (-1) << width);
2983 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2984 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2986 equal = (h0u == h1u && l0u == l1u);
2987 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2988 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2989 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2990 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2993 /* Otherwise, there are some code-specific tests we can make. */
2996 /* Optimize comparisons with upper and lower bounds. */
2997 if (SCALAR_INT_MODE_P (mode)
2998 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3011 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3018 /* x >= min is always true. */
3019 if (rtx_equal_p (trueop1, mmin))
3020 tem = const_true_rtx;
3026 /* x <= max is always true. */
3027 if (rtx_equal_p (trueop1, mmax))
3028 tem = const_true_rtx;
3033 /* x > max is always false. */
3034 if (rtx_equal_p (trueop1, mmax))
3040 /* x < min is always false. */
3041 if (rtx_equal_p (trueop1, mmin))
3048 if (tem == const0_rtx
3049 || tem == const_true_rtx)
3056 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3061 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3062 return const_true_rtx;
3066 /* Optimize abs(x) < 0.0. */
3067 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
3069 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3071 if (GET_CODE (tem) == ABS)
3077 /* Optimize abs(x) >= 0.0. */
3078 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
3080 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3082 if (GET_CODE (tem) == ABS)
3083 return const_true_rtx;
3088 /* Optimize ! (abs(x) < 0.0). */
3089 if (trueop1 == CONST0_RTX (mode))
3091 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3093 if (GET_CODE (tem) == ABS)
3094 return const_true_rtx;
3105 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3111 return equal ? const_true_rtx : const0_rtx;
3114 return ! equal ? const_true_rtx : const0_rtx;
3117 return op0lt ? const_true_rtx : const0_rtx;
3120 return op1lt ? const_true_rtx : const0_rtx;
3122 return op0ltu ? const_true_rtx : const0_rtx;
3124 return op1ltu ? const_true_rtx : const0_rtx;
3127 return equal || op0lt ? const_true_rtx : const0_rtx;
3130 return equal || op1lt ? const_true_rtx : const0_rtx;
3132 return equal || op0ltu ? const_true_rtx : const0_rtx;
3134 return equal || op1ltu ? const_true_rtx : const0_rtx;
3136 return const_true_rtx;
3144 /* Simplify CODE, an operation with result mode MODE and three operands,
3145 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3146 a constant. Return 0 if no simplifications is possible. */
3149 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3150 enum machine_mode op0_mode, rtx op0, rtx op1,
3153 unsigned int width = GET_MODE_BITSIZE (mode);
3155 /* VOIDmode means "infinite" precision. */
3157 width = HOST_BITS_PER_WIDE_INT;
3163 if (GET_CODE (op0) == CONST_INT
3164 && GET_CODE (op1) == CONST_INT
3165 && GET_CODE (op2) == CONST_INT
3166 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3167 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3169 /* Extracting a bit-field from a constant */
3170 HOST_WIDE_INT val = INTVAL (op0);
3172 if (BITS_BIG_ENDIAN)
3173 val >>= (GET_MODE_BITSIZE (op0_mode)
3174 - INTVAL (op2) - INTVAL (op1));
3176 val >>= INTVAL (op2);
3178 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3180 /* First zero-extend. */
3181 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3182 /* If desired, propagate sign bit. */
3183 if (code == SIGN_EXTRACT
3184 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3185 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3188 /* Clear the bits that don't belong in our mode,
3189 unless they and our sign bit are all one.
3190 So we get either a reasonable negative value or a reasonable
3191 unsigned value for this mode. */
3192 if (width < HOST_BITS_PER_WIDE_INT
3193 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3194 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3195 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3197 return gen_int_mode (val, mode);
3202 if (GET_CODE (op0) == CONST_INT)
3203 return op0 != const0_rtx ? op1 : op2;
3205 /* Convert c ? a : a into "a". */
3206 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3209 /* Convert a != b ? a : b into "a". */
3210 if (GET_CODE (op0) == NE
3211 && ! side_effects_p (op0)
3212 && ! HONOR_NANS (mode)
3213 && ! HONOR_SIGNED_ZEROS (mode)
3214 && ((rtx_equal_p (XEXP (op0, 0), op1)
3215 && rtx_equal_p (XEXP (op0, 1), op2))
3216 || (rtx_equal_p (XEXP (op0, 0), op2)
3217 && rtx_equal_p (XEXP (op0, 1), op1))))
3220 /* Convert a == b ? a : b into "b". */
3221 if (GET_CODE (op0) == EQ
3222 && ! side_effects_p (op0)
3223 && ! HONOR_NANS (mode)
3224 && ! HONOR_SIGNED_ZEROS (mode)
3225 && ((rtx_equal_p (XEXP (op0, 0), op1)
3226 && rtx_equal_p (XEXP (op0, 1), op2))
3227 || (rtx_equal_p (XEXP (op0, 0), op2)
3228 && rtx_equal_p (XEXP (op0, 1), op1))))
3231 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3233 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3234 ? GET_MODE (XEXP (op0, 1))
3235 : GET_MODE (XEXP (op0, 0)));
3238 /* Look for happy constants in op1 and op2. */
3239 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3241 HOST_WIDE_INT t = INTVAL (op1);
3242 HOST_WIDE_INT f = INTVAL (op2);
3244 if (t == STORE_FLAG_VALUE && f == 0)
3245 code = GET_CODE (op0);
3246 else if (t == 0 && f == STORE_FLAG_VALUE)
3249 tmp = reversed_comparison_code (op0, NULL_RTX);
3257 return simplify_gen_relational (code, mode, cmp_mode,
3258 XEXP (op0, 0), XEXP (op0, 1));
3261 if (cmp_mode == VOIDmode)
3262 cmp_mode = op0_mode;
3263 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
3264 cmp_mode, XEXP (op0, 0),
3267 /* See if any simplifications were possible. */
3270 if (GET_CODE (temp) == CONST_INT)
3271 return temp == const0_rtx ? op2 : op1;
3273 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
3279 gcc_assert (GET_MODE (op0) == mode);
3280 gcc_assert (GET_MODE (op1) == mode);
3281 gcc_assert (VECTOR_MODE_P (mode));
3282 op2 = avoid_constant_pool_reference (op2);
3283 if (GET_CODE (op2) == CONST_INT)
3285 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3286 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3287 int mask = (1 << n_elts) - 1;
3289 if (!(INTVAL (op2) & mask))
3291 if ((INTVAL (op2) & mask) == mask)
3294 op0 = avoid_constant_pool_reference (op0);
3295 op1 = avoid_constant_pool_reference (op1);
3296 if (GET_CODE (op0) == CONST_VECTOR
3297 && GET_CODE (op1) == CONST_VECTOR)
3299 rtvec v = rtvec_alloc (n_elts);
3302 for (i = 0; i < n_elts; i++)
3303 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3304 ? CONST_VECTOR_ELT (op0, i)
3305 : CONST_VECTOR_ELT (op1, i));
3306 return gen_rtx_CONST_VECTOR (mode, v);
3318 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3319 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3321 Works by unpacking OP into a collection of 8-bit values
3322 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3323 and then repacking them again for OUTERMODE. */
3326 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3327 enum machine_mode innermode, unsigned int byte)
3329 /* We support up to 512-bit values (for V8DFmode). */
3333 value_mask = (1 << value_bit) - 1
3335 unsigned char value[max_bitsize / value_bit];
3344 rtvec result_v = NULL;
3345 enum mode_class outer_class;
3346 enum machine_mode outer_submode;
3348 /* Some ports misuse CCmode. */
3349 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3352 /* We have no way to represent a complex constant at the rtl level. */
3353 if (COMPLEX_MODE_P (outermode))
3356 /* Unpack the value. */
3358 if (GET_CODE (op) == CONST_VECTOR)
3360 num_elem = CONST_VECTOR_NUNITS (op);
3361 elems = &CONST_VECTOR_ELT (op, 0);
3362 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3368 elem_bitsize = max_bitsize;
3370 /* If this asserts, it is too complicated; reducing value_bit may help. */
3371 gcc_assert (BITS_PER_UNIT % value_bit == 0);
3372 /* I don't know how to handle endianness of sub-units. */
3373 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
3375 for (elem = 0; elem < num_elem; elem++)
3378 rtx el = elems[elem];
3380 /* Vectors are kept in target memory order. (This is probably
3383 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3384 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3386 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3387 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3388 unsigned bytele = (subword_byte % UNITS_PER_WORD
3389 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3390 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3393 switch (GET_CODE (el))
3397 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3399 *vp++ = INTVAL (el) >> i;
3400 /* CONST_INTs are always logically sign-extended. */
3401 for (; i < elem_bitsize; i += value_bit)
3402 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3406 if (GET_MODE (el) == VOIDmode)
3408 /* If this triggers, someone should have generated a
3409 CONST_INT instead. */
3410 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
3412 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3413 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3414 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3417 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3420 /* It shouldn't matter what's done here, so fill it with
3422 for (; i < max_bitsize; i += value_bit)
3427 long tmp[max_bitsize / 32];
3428 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3430 gcc_assert (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT);
3431 gcc_assert (bitsize <= elem_bitsize);
3432 gcc_assert (bitsize % value_bit == 0);
3434 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3437 /* real_to_target produces its result in words affected by
3438 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3439 and use WORDS_BIG_ENDIAN instead; see the documentation
3440 of SUBREG in rtl.texi. */
3441 for (i = 0; i < bitsize; i += value_bit)
3444 if (WORDS_BIG_ENDIAN)
3445 ibase = bitsize - 1 - i;
3448 *vp++ = tmp[ibase / 32] >> i % 32;
3451 /* It shouldn't matter what's done here, so fill it with
3453 for (; i < elem_bitsize; i += value_bit)
3463 /* Now, pick the right byte to start with. */
3464 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3465 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3466 will already have offset 0. */
3467 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3469 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3471 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3472 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3473 byte = (subword_byte % UNITS_PER_WORD
3474 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3477 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3478 so if it's become negative it will instead be very large.) */
3479 gcc_assert (byte < GET_MODE_SIZE (innermode));
3481 /* Convert from bytes to chunks of size value_bit. */
3482 value_start = byte * (BITS_PER_UNIT / value_bit);
3484 /* Re-pack the value. */
3486 if (VECTOR_MODE_P (outermode))
3488 num_elem = GET_MODE_NUNITS (outermode);
3489 result_v = rtvec_alloc (num_elem);
3490 elems = &RTVEC_ELT (result_v, 0);
3491 outer_submode = GET_MODE_INNER (outermode);
3497 outer_submode = outermode;
3500 outer_class = GET_MODE_CLASS (outer_submode);
3501 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3503 gcc_assert (elem_bitsize % value_bit == 0);
3504 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
3506 for (elem = 0; elem < num_elem; elem++)
3510 /* Vectors are stored in target memory order. (This is probably
3513 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3514 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3516 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3517 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3518 unsigned bytele = (subword_byte % UNITS_PER_WORD
3519 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3520 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3523 switch (outer_class)
3526 case MODE_PARTIAL_INT:
3528 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3531 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3533 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3534 for (; i < elem_bitsize; i += value_bit)
3535 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3536 << (i - HOST_BITS_PER_WIDE_INT));
3538 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3540 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3541 elems[elem] = gen_int_mode (lo, outer_submode);
3543 elems[elem] = immed_double_const (lo, hi, outer_submode);
3550 long tmp[max_bitsize / 32];
3552 /* real_from_target wants its input in words affected by
3553 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3554 and use WORDS_BIG_ENDIAN instead; see the documentation
3555 of SUBREG in rtl.texi. */
3556 for (i = 0; i < max_bitsize / 32; i++)
3558 for (i = 0; i < elem_bitsize; i += value_bit)
3561 if (WORDS_BIG_ENDIAN)
3562 ibase = elem_bitsize - 1 - i;
3565 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3568 real_from_target (&r, tmp, outer_submode);
3569 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3577 if (VECTOR_MODE_P (outermode))
3578 return gen_rtx_CONST_VECTOR (outermode, result_v);
3583 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3584 Return 0 if no simplifications are possible. */
3586 simplify_subreg (enum machine_mode outermode, rtx op,
3587 enum machine_mode innermode, unsigned int byte)
3589 /* Little bit of sanity checking. */
3590 gcc_assert (innermode != VOIDmode);
3591 gcc_assert (outermode != VOIDmode);
3592 gcc_assert (innermode != BLKmode);
3593 gcc_assert (outermode != BLKmode);
3595 gcc_assert (GET_MODE (op) == innermode
3596 || GET_MODE (op) == VOIDmode);
3598 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
3599 gcc_assert (byte < GET_MODE_SIZE (innermode));
3601 if (outermode == innermode && !byte)
3604 if (GET_CODE (op) == CONST_INT
3605 || GET_CODE (op) == CONST_DOUBLE
3606 || GET_CODE (op) == CONST_VECTOR)
3607 return simplify_immed_subreg (outermode, op, innermode, byte);
3609 /* Changing mode twice with SUBREG => just change it once,
3610 or not at all if changing back op starting mode. */
3611 if (GET_CODE (op) == SUBREG)
3613 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3614 int final_offset = byte + SUBREG_BYTE (op);
3617 if (outermode == innermostmode
3618 && byte == 0 && SUBREG_BYTE (op) == 0)
3619 return SUBREG_REG (op);
3621 /* The SUBREG_BYTE represents offset, as if the value were stored
3622 in memory. Irritating exception is paradoxical subreg, where
3623 we define SUBREG_BYTE to be 0. On big endian machines, this
3624 value should be negative. For a moment, undo this exception. */
3625 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3627 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3628 if (WORDS_BIG_ENDIAN)
3629 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3630 if (BYTES_BIG_ENDIAN)
3631 final_offset += difference % UNITS_PER_WORD;
3633 if (SUBREG_BYTE (op) == 0
3634 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3636 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3637 if (WORDS_BIG_ENDIAN)
3638 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3639 if (BYTES_BIG_ENDIAN)
3640 final_offset += difference % UNITS_PER_WORD;
3643 /* See whether resulting subreg will be paradoxical. */
3644 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3646 /* In nonparadoxical subregs we can't handle negative offsets. */
3647 if (final_offset < 0)
3649 /* Bail out in case resulting subreg would be incorrect. */
3650 if (final_offset % GET_MODE_SIZE (outermode)
3651 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3657 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3659 /* In paradoxical subreg, see if we are still looking on lower part.
3660 If so, our SUBREG_BYTE will be 0. */
3661 if (WORDS_BIG_ENDIAN)
3662 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3663 if (BYTES_BIG_ENDIAN)
3664 offset += difference % UNITS_PER_WORD;
3665 if (offset == final_offset)
3671 /* Recurse for further possible simplifications. */
3672 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
3676 if (validate_subreg (outermode, innermostmode,
3677 SUBREG_REG (op), final_offset))
3678 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3682 /* SUBREG of a hard register => just change the register number
3683 and/or mode. If the hard register is not valid in that mode,
3684 suppress this simplification. If the hard register is the stack,
3685 frame, or argument pointer, leave this as a SUBREG. */
3688 && REGNO (op) < FIRST_PSEUDO_REGISTER
3689 #ifdef CANNOT_CHANGE_MODE_CLASS
3690 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3691 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3692 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3694 && ((reload_completed && !frame_pointer_needed)
3695 || (REGNO (op) != FRAME_POINTER_REGNUM
3696 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3697 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3700 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3701 && REGNO (op) != ARG_POINTER_REGNUM
3703 && REGNO (op) != STACK_POINTER_REGNUM
3704 && subreg_offset_representable_p (REGNO (op), innermode,
3707 unsigned int regno = REGNO (op);
3708 unsigned int final_regno
3709 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
3711 /* ??? We do allow it if the current REG is not valid for
3712 its mode. This is a kludge to work around how float/complex
3713 arguments are passed on 32-bit SPARC and should be fixed. */
3714 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3715 || ! HARD_REGNO_MODE_OK (regno, innermode))
3717 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3719 /* Propagate original regno. We don't have any way to specify
3720 the offset inside original regno, so do so only for lowpart.
3721 The information is used only by alias analysis that can not
3722 grog partial register anyway. */
3724 if (subreg_lowpart_offset (outermode, innermode) == byte)
3725 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3730 /* If we have a SUBREG of a register that we are replacing and we are
3731 replacing it with a MEM, make a new MEM and try replacing the
3732 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3733 or if we would be widening it. */
3736 && ! mode_dependent_address_p (XEXP (op, 0))
3737 /* Allow splitting of volatile memory references in case we don't
3738 have instruction to move the whole thing. */
3739 && (! MEM_VOLATILE_P (op)
3740 || ! have_insn_for (SET, innermode))
3741 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3742 return adjust_address_nv (op, outermode, byte);
3744 /* Handle complex values represented as CONCAT
3745 of real and imaginary part. */
3746 if (GET_CODE (op) == CONCAT)
3748 unsigned int inner_size, final_offset;
3751 inner_size = GET_MODE_UNIT_SIZE (innermode);
3752 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
3753 final_offset = byte % inner_size;
3754 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
3757 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3760 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
3761 return gen_rtx_SUBREG (outermode, part, final_offset);
3765 /* Optimize SUBREG truncations of zero and sign extended values. */
3766 if ((GET_CODE (op) == ZERO_EXTEND
3767 || GET_CODE (op) == SIGN_EXTEND)
3768 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3770 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3772 /* If we're requesting the lowpart of a zero or sign extension,
3773 there are three possibilities. If the outermode is the same
3774 as the origmode, we can omit both the extension and the subreg.
3775 If the outermode is not larger than the origmode, we can apply
3776 the truncation without the extension. Finally, if the outermode
3777 is larger than the origmode, but both are integer modes, we
3778 can just extend to the appropriate mode. */
3781 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3782 if (outermode == origmode)
3783 return XEXP (op, 0);
3784 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3785 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3786 subreg_lowpart_offset (outermode,
3788 if (SCALAR_INT_MODE_P (outermode))
3789 return simplify_gen_unary (GET_CODE (op), outermode,
3790 XEXP (op, 0), origmode);
3793 /* A SUBREG resulting from a zero extension may fold to zero if
3794 it extracts higher bits that the ZERO_EXTEND's source bits. */
3795 if (GET_CODE (op) == ZERO_EXTEND
3796 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3797 return CONST0_RTX (outermode);
3803 /* Make a SUBREG operation or equivalent if it folds. */
3806 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3807 enum machine_mode innermode, unsigned int byte)
3811 newx = simplify_subreg (outermode, op, innermode, byte);
3815 if (GET_CODE (op) == SUBREG
3816 || GET_CODE (op) == CONCAT
3817 || GET_MODE (op) == VOIDmode)
3820 if (validate_subreg (outermode, innermode, op, byte))
3821 return gen_rtx_SUBREG (outermode, op, byte);
3826 /* Simplify X, an rtx expression.
3828 Return the simplified expression or NULL if no simplifications
3831 This is the preferred entry point into the simplification routines;
3832 however, we still allow passes to call the more specific routines.
3834 Right now GCC has three (yes, three) major bodies of RTL simplification
3835 code that need to be unified.
3837 1. fold_rtx in cse.c. This code uses various CSE specific
3838 information to aid in RTL simplification.
3840 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3841 it uses combine specific information to aid in RTL
3844 3. The routines in this file.
3847 Long term we want to only have one body of simplification code; to
3848 get to that state I recommend the following steps:
3850 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3851 which are not pass dependent state into these routines.
3853 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3854 use this routine whenever possible.
3856 3. Allow for pass dependent state to be provided to these
3857 routines and add simplifications based on the pass dependent
3858 state. Remove code from cse.c & combine.c that becomes
3861 It will take time, but ultimately the compiler will be easier to
3862 maintain and improve. It's totally silly that when we add a
3863 simplification that it needs to be added to 4 places (3 for RTL
3864 simplification and 1 for tree simplification. */
3867 simplify_rtx (rtx x)
3869 enum rtx_code code = GET_CODE (x);
3870 enum machine_mode mode = GET_MODE (x);
3872 switch (GET_RTX_CLASS (code))
3875 return simplify_unary_operation (code, mode,
3876 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3877 case RTX_COMM_ARITH:
3878 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3879 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3881 /* Fall through.... */
3884 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3887 case RTX_BITFIELD_OPS:
3888 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3889 XEXP (x, 0), XEXP (x, 1),
3893 case RTX_COMM_COMPARE:
3894 return simplify_relational_operation (code, mode,
3895 ((GET_MODE (XEXP (x, 0))
3897 ? GET_MODE (XEXP (x, 0))
3898 : GET_MODE (XEXP (x, 1))),
3904 return simplify_gen_subreg (mode, SUBREG_REG (x),
3905 GET_MODE (SUBREG_REG (x)),
3912 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3913 if (GET_CODE (XEXP (x, 0)) == HIGH
3914 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))