1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool mode_signbit_p (enum machine_mode, rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
65 neg_const_int (enum machine_mode mode, rtx i)
67 return gen_int_mode (- INTVAL (i), mode);
70 /* Test whether expression, X, is an immediate constant that represents
71 the most significant bit of machine mode MODE. */
74 mode_signbit_p (enum machine_mode mode, rtx x)
76 unsigned HOST_WIDE_INT val;
79 if (GET_MODE_CLASS (mode) != MODE_INT)
82 width = GET_MODE_BITSIZE (mode);
86 if (width <= HOST_BITS_PER_WIDE_INT
87 && GET_CODE (x) == CONST_INT)
89 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
90 && GET_CODE (x) == CONST_DOUBLE
91 && CONST_DOUBLE_LOW (x) == 0)
93 val = CONST_DOUBLE_HIGH (x);
94 width -= HOST_BITS_PER_WIDE_INT;
99 if (width < HOST_BITS_PER_WIDE_INT)
100 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
101 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
104 /* Make a binary operation by properly ordering the operands and
105 seeing if the expression folds. */
108 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
113 /* Put complex operands first and constants second if commutative. */
114 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
115 && swap_commutative_operands_p (op0, op1))
116 tem = op0, op0 = op1, op1 = tem;
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
123 /* Handle addition and subtraction specially. Otherwise, just form
126 if (code == PLUS || code == MINUS)
128 tem = simplify_plus_minus (code, mode, op0, op1, 1);
133 return gen_rtx_fmt_ee (code, mode, op0, op1);
136 /* If X is a MEM referencing the constant pool, return the real value.
137 Otherwise return X. */
139 avoid_constant_pool_reference (rtx x)
142 enum machine_mode cmode;
144 switch (GET_CODE (x))
150 /* Handle float extensions of constant pool references. */
152 c = avoid_constant_pool_reference (tmp);
153 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
157 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
158 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
168 /* Call target hook to avoid the effects of -fpic etc.... */
169 addr = targetm.delegitimize_address (addr);
171 if (GET_CODE (addr) == LO_SUM)
172 addr = XEXP (addr, 1);
174 if (GET_CODE (addr) != SYMBOL_REF
175 || ! CONSTANT_POOL_ADDRESS_P (addr))
178 c = get_pool_constant (addr);
179 cmode = get_pool_mode (addr);
181 /* If we're accessing the constant in a different mode than it was
182 originally stored, attempt to fix that up via subreg simplifications.
183 If that fails we have no choice but to return the original memory. */
184 if (cmode != GET_MODE (x))
186 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
193 /* Make a unary operation by first seeing if it folds and otherwise making
194 the specified operation. */
197 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
198 enum machine_mode op_mode)
202 /* If this simplifies, use it. */
203 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
206 return gen_rtx_fmt_e (code, mode, op);
209 /* Likewise for ternary operations. */
212 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
213 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
217 /* If this simplifies, use it. */
218 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
222 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
225 /* Likewise, for relational operations.
226 CMP_MODE specifies mode comparison is done in.
230 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
231 enum machine_mode cmp_mode, rtx op0, rtx op1)
235 if (cmp_mode == VOIDmode)
236 cmp_mode = GET_MODE (op0);
237 if (cmp_mode == VOIDmode)
238 cmp_mode = GET_MODE (op1);
240 if (cmp_mode != VOIDmode)
242 tem = simplify_relational_operation (code, mode, cmp_mode, op0, op1);
247 /* For the following tests, ensure const0_rtx is op1. */
248 if (swap_commutative_operands_p (op0, op1)
249 || (op0 == const0_rtx && op1 != const0_rtx))
250 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
252 /* If op0 is a compare, extract the comparison arguments from it. */
253 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
254 return simplify_gen_relational (code, mode, VOIDmode,
255 XEXP (op0, 0), XEXP (op0, 1));
257 /* If op0 is a comparison, extract the comparison arguments form it. */
258 if (COMPARISON_P (op0) && op1 == const0_rtx)
262 if (GET_MODE (op0) == mode)
264 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
265 XEXP (op0, 0), XEXP (op0, 1));
269 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
271 return simplify_gen_relational (new, mode, VOIDmode,
272 XEXP (op0, 0), XEXP (op0, 1));
276 return gen_rtx_fmt_ee (code, mode, op0, op1);
279 /* Replace all occurrences of OLD in X with NEW and try to simplify the
280 resulting RTX. Return a new RTX which is as simplified as possible. */
283 simplify_replace_rtx (rtx x, rtx old, rtx new)
285 enum rtx_code code = GET_CODE (x);
286 enum machine_mode mode = GET_MODE (x);
287 enum machine_mode op_mode;
290 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
291 to build a new expression substituting recursively. If we can't do
292 anything, return our input. */
297 switch (GET_RTX_CLASS (code))
301 op_mode = GET_MODE (op0);
302 op0 = simplify_replace_rtx (op0, old, new);
303 if (op0 == XEXP (x, 0))
305 return simplify_gen_unary (code, mode, op0, op_mode);
309 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
311 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
313 return simplify_gen_binary (code, mode, op0, op1);
316 case RTX_COMM_COMPARE:
319 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
320 op0 = simplify_replace_rtx (op0, old, new);
321 op1 = simplify_replace_rtx (op1, old, new);
322 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
324 return simplify_gen_relational (code, mode, op_mode, op0, op1);
327 case RTX_BITFIELD_OPS:
329 op_mode = GET_MODE (op0);
330 op0 = simplify_replace_rtx (op0, old, new);
331 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
332 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
333 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
335 if (op_mode == VOIDmode)
336 op_mode = GET_MODE (op0);
337 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
340 /* The only case we try to handle is a SUBREG. */
343 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
344 if (op0 == SUBREG_REG (x))
346 op0 = simplify_gen_subreg (GET_MODE (x), op0,
347 GET_MODE (SUBREG_REG (x)),
349 return op0 ? op0 : x;
356 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
357 if (op0 == XEXP (x, 0))
359 return replace_equiv_address_nv (x, op0);
361 else if (code == LO_SUM)
363 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
364 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
366 /* (lo_sum (high x) x) -> x */
367 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
370 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
372 return gen_rtx_LO_SUM (mode, op0, op1);
374 else if (code == REG)
376 if (REG_P (old) && REGNO (x) == REGNO (old))
387 /* Try to simplify a unary operation CODE whose output mode is to be
388 MODE with input operand OP whose mode was originally OP_MODE.
389 Return zero if no simplification can be made. */
391 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
392 rtx op, enum machine_mode op_mode)
394 unsigned int width = GET_MODE_BITSIZE (mode);
395 rtx trueop = avoid_constant_pool_reference (op);
397 if (code == VEC_DUPLICATE)
399 if (!VECTOR_MODE_P (mode))
401 if (GET_MODE (trueop) != VOIDmode
402 && !VECTOR_MODE_P (GET_MODE (trueop))
403 && GET_MODE_INNER (mode) != GET_MODE (trueop))
405 if (GET_MODE (trueop) != VOIDmode
406 && VECTOR_MODE_P (GET_MODE (trueop))
407 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
409 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
410 || GET_CODE (trueop) == CONST_VECTOR)
412 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
413 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
414 rtvec v = rtvec_alloc (n_elts);
417 if (GET_CODE (trueop) != CONST_VECTOR)
418 for (i = 0; i < n_elts; i++)
419 RTVEC_ELT (v, i) = trueop;
422 enum machine_mode inmode = GET_MODE (trueop);
423 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
424 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
426 if (in_n_elts >= n_elts || n_elts % in_n_elts)
428 for (i = 0; i < n_elts; i++)
429 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
431 return gen_rtx_CONST_VECTOR (mode, v);
434 else if (GET_CODE (op) == CONST)
435 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
437 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
439 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
440 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
441 enum machine_mode opmode = GET_MODE (trueop);
442 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
443 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
444 rtvec v = rtvec_alloc (n_elts);
447 if (op_n_elts != n_elts)
450 for (i = 0; i < n_elts; i++)
452 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
453 CONST_VECTOR_ELT (trueop, i),
454 GET_MODE_INNER (opmode));
457 RTVEC_ELT (v, i) = x;
459 return gen_rtx_CONST_VECTOR (mode, v);
462 /* The order of these tests is critical so that, for example, we don't
463 check the wrong mode (input vs. output) for a conversion operation,
464 such as FIX. At some point, this should be simplified. */
466 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
467 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
469 HOST_WIDE_INT hv, lv;
472 if (GET_CODE (trueop) == CONST_INT)
473 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
475 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
477 REAL_VALUE_FROM_INT (d, lv, hv, mode);
478 d = real_value_truncate (mode, d);
479 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
481 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
482 && (GET_CODE (trueop) == CONST_DOUBLE
483 || GET_CODE (trueop) == CONST_INT))
485 HOST_WIDE_INT hv, lv;
488 if (GET_CODE (trueop) == CONST_INT)
489 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
491 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
493 if (op_mode == VOIDmode)
495 /* We don't know how to interpret negative-looking numbers in
496 this case, so don't try to fold those. */
500 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
503 hv = 0, lv &= GET_MODE_MASK (op_mode);
505 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
506 d = real_value_truncate (mode, d);
507 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
510 if (GET_CODE (trueop) == CONST_INT
511 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
513 HOST_WIDE_INT arg0 = INTVAL (trueop);
527 val = (arg0 >= 0 ? arg0 : - arg0);
531 /* Don't use ffs here. Instead, get low order bit and then its
532 number. If arg0 is zero, this will return 0, as desired. */
533 arg0 &= GET_MODE_MASK (mode);
534 val = exact_log2 (arg0 & (- arg0)) + 1;
538 arg0 &= GET_MODE_MASK (mode);
539 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
542 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
546 arg0 &= GET_MODE_MASK (mode);
549 /* Even if the value at zero is undefined, we have to come
550 up with some replacement. Seems good enough. */
551 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
552 val = GET_MODE_BITSIZE (mode);
555 val = exact_log2 (arg0 & -arg0);
559 arg0 &= GET_MODE_MASK (mode);
562 val++, arg0 &= arg0 - 1;
566 arg0 &= GET_MODE_MASK (mode);
569 val++, arg0 &= arg0 - 1;
578 /* When zero-extending a CONST_INT, we need to know its
580 if (op_mode == VOIDmode)
582 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
584 /* If we were really extending the mode,
585 we would have to distinguish between zero-extension
586 and sign-extension. */
587 if (width != GET_MODE_BITSIZE (op_mode))
591 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
592 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
598 if (op_mode == VOIDmode)
600 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
602 /* If we were really extending the mode,
603 we would have to distinguish between zero-extension
604 and sign-extension. */
605 if (width != GET_MODE_BITSIZE (op_mode))
609 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
612 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
614 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
615 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
632 val = trunc_int_for_mode (val, mode);
634 return GEN_INT (val);
637 /* We can do some operations on integer CONST_DOUBLEs. Also allow
638 for a DImode operation on a CONST_INT. */
639 else if (GET_MODE (trueop) == VOIDmode
640 && width <= HOST_BITS_PER_WIDE_INT * 2
641 && (GET_CODE (trueop) == CONST_DOUBLE
642 || GET_CODE (trueop) == CONST_INT))
644 unsigned HOST_WIDE_INT l1, lv;
645 HOST_WIDE_INT h1, hv;
647 if (GET_CODE (trueop) == CONST_DOUBLE)
648 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
650 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
660 neg_double (l1, h1, &lv, &hv);
665 neg_double (l1, h1, &lv, &hv);
677 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
680 lv = exact_log2 (l1 & -l1) + 1;
686 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
687 - HOST_BITS_PER_WIDE_INT;
689 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
690 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
691 lv = GET_MODE_BITSIZE (mode);
697 lv = exact_log2 (l1 & -l1);
699 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
700 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
701 lv = GET_MODE_BITSIZE (mode);
724 /* This is just a change-of-mode, so do nothing. */
729 if (op_mode == VOIDmode)
732 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
736 lv = l1 & GET_MODE_MASK (op_mode);
740 if (op_mode == VOIDmode
741 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
745 lv = l1 & GET_MODE_MASK (op_mode);
746 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
747 && (lv & ((HOST_WIDE_INT) 1
748 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
749 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
751 hv = HWI_SIGN_EXTEND (lv);
762 return immed_double_const (lv, hv, mode);
765 else if (GET_CODE (trueop) == CONST_DOUBLE
766 && GET_MODE_CLASS (mode) == MODE_FLOAT)
768 REAL_VALUE_TYPE d, t;
769 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
774 if (HONOR_SNANS (mode) && real_isnan (&d))
776 real_sqrt (&t, mode, &d);
780 d = REAL_VALUE_ABS (d);
783 d = REAL_VALUE_NEGATE (d);
786 d = real_value_truncate (mode, d);
789 /* All this does is change the mode. */
792 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
799 real_to_target (tmp, &d, GET_MODE (trueop));
800 for (i = 0; i < 4; i++)
802 real_from_target (&d, tmp, mode);
807 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
810 else if (GET_CODE (trueop) == CONST_DOUBLE
811 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
812 && GET_MODE_CLASS (mode) == MODE_INT
813 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
815 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
816 operators are intentionally left unspecified (to ease implementation
817 by target backends), for consistency, this routine implements the
818 same semantics for constant folding as used by the middle-end. */
820 HOST_WIDE_INT xh, xl, th, tl;
821 REAL_VALUE_TYPE x, t;
822 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
826 if (REAL_VALUE_ISNAN (x))
829 /* Test against the signed upper bound. */
830 if (width > HOST_BITS_PER_WIDE_INT)
832 th = ((unsigned HOST_WIDE_INT) 1
833 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
839 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
841 real_from_integer (&t, VOIDmode, tl, th, 0);
842 if (REAL_VALUES_LESS (t, x))
849 /* Test against the signed lower bound. */
850 if (width > HOST_BITS_PER_WIDE_INT)
852 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
858 tl = (HOST_WIDE_INT) -1 << (width - 1);
860 real_from_integer (&t, VOIDmode, tl, th, 0);
861 if (REAL_VALUES_LESS (x, t))
867 REAL_VALUE_TO_INT (&xl, &xh, x);
871 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
874 /* Test against the unsigned upper bound. */
875 if (width == 2*HOST_BITS_PER_WIDE_INT)
880 else if (width >= HOST_BITS_PER_WIDE_INT)
882 th = ((unsigned HOST_WIDE_INT) 1
883 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
889 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
891 real_from_integer (&t, VOIDmode, tl, th, 1);
892 if (REAL_VALUES_LESS (t, x))
899 REAL_VALUE_TO_INT (&xl, &xh, x);
905 return immed_double_const (xl, xh, mode);
908 /* This was formerly used only for non-IEEE float.
909 eggert@twinsun.com says it is safe for IEEE also. */
912 enum rtx_code reversed;
915 /* There are some simplifications we can do even if the operands
920 /* (not (not X)) == X. */
921 if (GET_CODE (op) == NOT)
924 /* (not (eq X Y)) == (ne X Y), etc. */
925 if (COMPARISON_P (op)
926 && (mode == BImode || STORE_FLAG_VALUE == -1)
927 && ((reversed = reversed_comparison_code (op, NULL_RTX))
929 return simplify_gen_relational (reversed, mode, VOIDmode,
930 XEXP (op, 0), XEXP (op, 1));
932 /* (not (plus X -1)) can become (neg X). */
933 if (GET_CODE (op) == PLUS
934 && XEXP (op, 1) == constm1_rtx)
935 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
937 /* Similarly, (not (neg X)) is (plus X -1). */
938 if (GET_CODE (op) == NEG)
939 return plus_constant (XEXP (op, 0), -1);
941 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
942 if (GET_CODE (op) == XOR
943 && GET_CODE (XEXP (op, 1)) == CONST_INT
944 && (temp = simplify_unary_operation (NOT, mode,
947 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
949 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
950 if (GET_CODE (op) == PLUS
951 && GET_CODE (XEXP (op, 1)) == CONST_INT
952 && mode_signbit_p (mode, XEXP (op, 1))
953 && (temp = simplify_unary_operation (NOT, mode,
956 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
960 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
961 operands other than 1, but that is not valid. We could do a
962 similar simplification for (not (lshiftrt C X)) where C is
963 just the sign bit, but this doesn't seem common enough to
965 if (GET_CODE (op) == ASHIFT
966 && XEXP (op, 0) == const1_rtx)
968 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
969 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
972 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
973 by reversing the comparison code if valid. */
974 if (STORE_FLAG_VALUE == -1
976 && (reversed = reversed_comparison_code (op, NULL_RTX))
978 return simplify_gen_relational (reversed, mode, VOIDmode,
979 XEXP (op, 0), XEXP (op, 1));
981 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
982 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
983 so we can perform the above simplification. */
985 if (STORE_FLAG_VALUE == -1
986 && GET_CODE (op) == ASHIFTRT
987 && GET_CODE (XEXP (op, 1)) == CONST_INT
988 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
989 return simplify_gen_relational (GE, mode, VOIDmode,
990 XEXP (op, 0), const0_rtx);
995 /* (neg (neg X)) == X. */
996 if (GET_CODE (op) == NEG)
999 /* (neg (plus X 1)) can become (not X). */
1000 if (GET_CODE (op) == PLUS
1001 && XEXP (op, 1) == const1_rtx)
1002 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1004 /* Similarly, (neg (not X)) is (plus X 1). */
1005 if (GET_CODE (op) == NOT)
1006 return plus_constant (XEXP (op, 0), 1);
1008 /* (neg (minus X Y)) can become (minus Y X). This transformation
1009 isn't safe for modes with signed zeros, since if X and Y are
1010 both +0, (minus Y X) is the same as (minus X Y). If the
1011 rounding mode is towards +infinity (or -infinity) then the two
1012 expressions will be rounded differently. */
1013 if (GET_CODE (op) == MINUS
1014 && !HONOR_SIGNED_ZEROS (mode)
1015 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1016 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
1019 if (GET_CODE (op) == PLUS
1020 && !HONOR_SIGNED_ZEROS (mode)
1021 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1023 /* (neg (plus A C)) is simplified to (minus -C A). */
1024 if (GET_CODE (XEXP (op, 1)) == CONST_INT
1025 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
1027 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
1030 return simplify_gen_binary (MINUS, mode, temp,
1034 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1035 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1036 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1039 /* (neg (mult A B)) becomes (mult (neg A) B).
1040 This works even for floating-point values. */
1041 if (GET_CODE (op) == MULT
1042 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1044 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1045 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1048 /* NEG commutes with ASHIFT since it is multiplication. Only do
1049 this if we can then eliminate the NEG (e.g., if the operand
1051 if (GET_CODE (op) == ASHIFT)
1053 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1056 return simplify_gen_binary (ASHIFT, mode, temp,
1060 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1061 C is equal to the width of MODE minus 1. */
1062 if (GET_CODE (op) == ASHIFTRT
1063 && GET_CODE (XEXP (op, 1)) == CONST_INT
1064 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1065 return simplify_gen_binary (LSHIFTRT, mode,
1066 XEXP (op, 0), XEXP (op, 1));
1068 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1069 C is equal to the width of MODE minus 1. */
1070 if (GET_CODE (op) == LSHIFTRT
1071 && GET_CODE (XEXP (op, 1)) == CONST_INT
1072 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
1073 return simplify_gen_binary (ASHIFTRT, mode,
1074 XEXP (op, 0), XEXP (op, 1));
1079 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1080 becomes just the MINUS if its mode is MODE. This allows
1081 folding switch statements on machines using casesi (such as
1083 if (GET_CODE (op) == TRUNCATE
1084 && GET_MODE (XEXP (op, 0)) == mode
1085 && GET_CODE (XEXP (op, 0)) == MINUS
1086 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1087 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1088 return XEXP (op, 0);
1090 /* Check for a sign extension of a subreg of a promoted
1091 variable, where the promotion is sign-extended, and the
1092 target mode is the same as the variable's promotion. */
1093 if (GET_CODE (op) == SUBREG
1094 && SUBREG_PROMOTED_VAR_P (op)
1095 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1096 && GET_MODE (XEXP (op, 0)) == mode)
1097 return XEXP (op, 0);
1099 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1100 if (! POINTERS_EXTEND_UNSIGNED
1101 && mode == Pmode && GET_MODE (op) == ptr_mode
1103 || (GET_CODE (op) == SUBREG
1104 && GET_CODE (SUBREG_REG (op)) == REG
1105 && REG_POINTER (SUBREG_REG (op))
1106 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1107 return convert_memory_address (Pmode, op);
1112 /* Check for a zero extension of a subreg of a promoted
1113 variable, where the promotion is zero-extended, and the
1114 target mode is the same as the variable's promotion. */
1115 if (GET_CODE (op) == SUBREG
1116 && SUBREG_PROMOTED_VAR_P (op)
1117 && SUBREG_PROMOTED_UNSIGNED_P (op)
1118 && GET_MODE (XEXP (op, 0)) == mode)
1119 return XEXP (op, 0);
1121 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1122 if (POINTERS_EXTEND_UNSIGNED > 0
1123 && mode == Pmode && GET_MODE (op) == ptr_mode
1125 || (GET_CODE (op) == SUBREG
1126 && GET_CODE (SUBREG_REG (op)) == REG
1127 && REG_POINTER (SUBREG_REG (op))
1128 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1129 return convert_memory_address (Pmode, op);
1141 /* Subroutine of simplify_binary_operation to simplify a commutative,
1142 associative binary operation CODE with result mode MODE, operating
1143 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1144 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1145 canonicalization is possible. */
1148 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1153 /* Linearize the operator to the left. */
1154 if (GET_CODE (op1) == code)
1156 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1157 if (GET_CODE (op0) == code)
1159 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1160 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1163 /* "a op (b op c)" becomes "(b op c) op a". */
1164 if (! swap_commutative_operands_p (op1, op0))
1165 return simplify_gen_binary (code, mode, op1, op0);
1172 if (GET_CODE (op0) == code)
1174 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1175 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1177 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1178 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1181 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1182 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1183 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1184 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1186 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1188 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1189 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1190 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1191 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1193 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1199 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1200 and OP1. Return 0 if no simplification is possible.
1202 Don't use this for relational operations such as EQ or LT.
1203 Use simplify_relational_operation instead. */
1206 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1209 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1211 unsigned int width = GET_MODE_BITSIZE (mode);
1212 rtx trueop0, trueop1;
1215 #ifdef ENABLE_CHECKING
1216 /* Relational operations don't work here. We must know the mode
1217 of the operands in order to do the comparison correctly.
1218 Assuming a full word can give incorrect results.
1219 Consider comparing 128 with -128 in QImode. */
1221 if (GET_RTX_CLASS (code) == RTX_COMPARE
1222 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
1226 /* Make sure the constant is second. */
1227 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1228 && swap_commutative_operands_p (op0, op1))
1230 tem = op0, op0 = op1, op1 = tem;
1233 trueop0 = avoid_constant_pool_reference (op0);
1234 trueop1 = avoid_constant_pool_reference (op1);
1236 if (VECTOR_MODE_P (mode)
1237 && GET_CODE (trueop0) == CONST_VECTOR
1238 && GET_CODE (trueop1) == CONST_VECTOR)
1240 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1241 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1242 enum machine_mode op0mode = GET_MODE (trueop0);
1243 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1244 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1245 enum machine_mode op1mode = GET_MODE (trueop1);
1246 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1247 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1248 rtvec v = rtvec_alloc (n_elts);
1251 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1254 for (i = 0; i < n_elts; i++)
1256 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1257 CONST_VECTOR_ELT (trueop0, i),
1258 CONST_VECTOR_ELT (trueop1, i));
1261 RTVEC_ELT (v, i) = x;
1264 return gen_rtx_CONST_VECTOR (mode, v);
1267 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1268 && GET_CODE (trueop0) == CONST_DOUBLE
1269 && GET_CODE (trueop1) == CONST_DOUBLE
1270 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1281 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1283 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1285 for (i = 0; i < 4; i++)
1289 else if (code == IOR)
1291 else if (code == XOR)
1296 real_from_target (&r, tmp0, mode);
1297 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1301 REAL_VALUE_TYPE f0, f1, value;
1303 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1304 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1305 f0 = real_value_truncate (mode, f0);
1306 f1 = real_value_truncate (mode, f1);
1308 if (HONOR_SNANS (mode)
1309 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1313 && REAL_VALUES_EQUAL (f1, dconst0)
1314 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1317 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1319 value = real_value_truncate (mode, value);
1320 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1324 /* We can fold some multi-word operations. */
1325 if (GET_MODE_CLASS (mode) == MODE_INT
1326 && width == HOST_BITS_PER_WIDE_INT * 2
1327 && (GET_CODE (trueop0) == CONST_DOUBLE
1328 || GET_CODE (trueop0) == CONST_INT)
1329 && (GET_CODE (trueop1) == CONST_DOUBLE
1330 || GET_CODE (trueop1) == CONST_INT))
1332 unsigned HOST_WIDE_INT l1, l2, lv, lt;
1333 HOST_WIDE_INT h1, h2, hv, ht;
1335 if (GET_CODE (trueop0) == CONST_DOUBLE)
1336 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1338 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1340 if (GET_CODE (trueop1) == CONST_DOUBLE)
1341 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1343 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1348 /* A - B == A + (-B). */
1349 neg_double (l2, h2, &lv, &hv);
1352 /* Fall through.... */
1355 add_double (l1, h1, l2, h2, &lv, &hv);
1359 mul_double (l1, h1, l2, h2, &lv, &hv);
1363 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1364 &lv, &hv, <, &ht))
1369 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
1370 <, &ht, &lv, &hv))
1375 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1376 &lv, &hv, <, &ht))
1381 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
1382 <, &ht, &lv, &hv))
1387 lv = l1 & l2, hv = h1 & h2;
1391 lv = l1 | l2, hv = h1 | h2;
1395 lv = l1 ^ l2, hv = h1 ^ h2;
1401 && ((unsigned HOST_WIDE_INT) l1
1402 < (unsigned HOST_WIDE_INT) l2)))
1411 && ((unsigned HOST_WIDE_INT) l1
1412 > (unsigned HOST_WIDE_INT) l2)))
1419 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1421 && ((unsigned HOST_WIDE_INT) l1
1422 < (unsigned HOST_WIDE_INT) l2)))
1429 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1431 && ((unsigned HOST_WIDE_INT) l1
1432 > (unsigned HOST_WIDE_INT) l2)))
1438 case LSHIFTRT: case ASHIFTRT:
1440 case ROTATE: case ROTATERT:
1441 if (SHIFT_COUNT_TRUNCATED)
1442 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1444 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1447 if (code == LSHIFTRT || code == ASHIFTRT)
1448 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1450 else if (code == ASHIFT)
1451 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1452 else if (code == ROTATE)
1453 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1454 else /* code == ROTATERT */
1455 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1462 return immed_double_const (lv, hv, mode);
1465 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1466 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1468 /* Even if we can't compute a constant result,
1469 there are some cases worth simplifying. */
1474 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1475 when x is NaN, infinite, or finite and nonzero. They aren't
1476 when x is -0 and the rounding mode is not towards -infinity,
1477 since (-0) + 0 is then 0. */
1478 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1481 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1482 transformations are safe even for IEEE. */
1483 if (GET_CODE (op0) == NEG)
1484 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1485 else if (GET_CODE (op1) == NEG)
1486 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1488 /* (~a) + 1 -> -a */
1489 if (INTEGRAL_MODE_P (mode)
1490 && GET_CODE (op0) == NOT
1491 && trueop1 == const1_rtx)
1492 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1494 /* Handle both-operands-constant cases. We can only add
1495 CONST_INTs to constants since the sum of relocatable symbols
1496 can't be handled by most assemblers. Don't add CONST_INT
1497 to CONST_INT since overflow won't be computed properly if wider
1498 than HOST_BITS_PER_WIDE_INT. */
1500 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1501 && GET_CODE (op1) == CONST_INT)
1502 return plus_constant (op0, INTVAL (op1));
1503 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1504 && GET_CODE (op0) == CONST_INT)
1505 return plus_constant (op1, INTVAL (op0));
1507 /* See if this is something like X * C - X or vice versa or
1508 if the multiplication is written as a shift. If so, we can
1509 distribute and make a new multiply, shift, or maybe just
1510 have X (if C is 2 in the example above). But don't make
1511 real multiply if we didn't have one before. */
1513 if (! FLOAT_MODE_P (mode))
1515 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1516 rtx lhs = op0, rhs = op1;
1519 if (GET_CODE (lhs) == NEG)
1520 coeff0 = -1, lhs = XEXP (lhs, 0);
1521 else if (GET_CODE (lhs) == MULT
1522 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1524 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1527 else if (GET_CODE (lhs) == ASHIFT
1528 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1529 && INTVAL (XEXP (lhs, 1)) >= 0
1530 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1532 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1533 lhs = XEXP (lhs, 0);
1536 if (GET_CODE (rhs) == NEG)
1537 coeff1 = -1, rhs = XEXP (rhs, 0);
1538 else if (GET_CODE (rhs) == MULT
1539 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1541 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1544 else if (GET_CODE (rhs) == ASHIFT
1545 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1546 && INTVAL (XEXP (rhs, 1)) >= 0
1547 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1549 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1550 rhs = XEXP (rhs, 0);
1553 if (rtx_equal_p (lhs, rhs))
1555 tem = simplify_gen_binary (MULT, mode, lhs,
1556 GEN_INT (coeff0 + coeff1));
1557 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1561 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1562 if ((GET_CODE (op1) == CONST_INT
1563 || GET_CODE (op1) == CONST_DOUBLE)
1564 && GET_CODE (op0) == XOR
1565 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1566 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1567 && mode_signbit_p (mode, op1))
1568 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1569 simplify_gen_binary (XOR, mode, op1,
1572 /* If one of the operands is a PLUS or a MINUS, see if we can
1573 simplify this by the associative law.
1574 Don't use the associative law for floating point.
1575 The inaccuracy makes it nonassociative,
1576 and subtle programs can break if operations are associated. */
1578 if (INTEGRAL_MODE_P (mode)
1579 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1580 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1581 || (GET_CODE (op0) == CONST
1582 && GET_CODE (XEXP (op0, 0)) == PLUS)
1583 || (GET_CODE (op1) == CONST
1584 && GET_CODE (XEXP (op1, 0)) == PLUS))
1585 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1588 /* Reassociate floating point addition only when the user
1589 specifies unsafe math optimizations. */
1590 if (FLOAT_MODE_P (mode)
1591 && flag_unsafe_math_optimizations)
1593 tem = simplify_associative_operation (code, mode, op0, op1);
1601 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1602 using cc0, in which case we want to leave it as a COMPARE
1603 so we can distinguish it from a register-register-copy.
1605 In IEEE floating point, x-0 is not the same as x. */
1607 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1608 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1609 && trueop1 == CONST0_RTX (mode))
1613 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1614 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1615 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1616 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1618 rtx xop00 = XEXP (op0, 0);
1619 rtx xop10 = XEXP (op1, 0);
1622 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1624 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1625 && GET_MODE (xop00) == GET_MODE (xop10)
1626 && REGNO (xop00) == REGNO (xop10)
1627 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1628 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1635 /* We can't assume x-x is 0 even with non-IEEE floating point,
1636 but since it is zero except in very strange circumstances, we
1637 will treat it as zero with -funsafe-math-optimizations. */
1638 if (rtx_equal_p (trueop0, trueop1)
1639 && ! side_effects_p (op0)
1640 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1641 return CONST0_RTX (mode);
1643 /* Change subtraction from zero into negation. (0 - x) is the
1644 same as -x when x is NaN, infinite, or finite and nonzero.
1645 But if the mode has signed zeros, and does not round towards
1646 -infinity, then 0 - 0 is 0, not -0. */
1647 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1648 return simplify_gen_unary (NEG, mode, op1, mode);
1650 /* (-1 - a) is ~a. */
1651 if (trueop0 == constm1_rtx)
1652 return simplify_gen_unary (NOT, mode, op1, mode);
1654 /* Subtracting 0 has no effect unless the mode has signed zeros
1655 and supports rounding towards -infinity. In such a case,
1657 if (!(HONOR_SIGNED_ZEROS (mode)
1658 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1659 && trueop1 == CONST0_RTX (mode))
1662 /* See if this is something like X * C - X or vice versa or
1663 if the multiplication is written as a shift. If so, we can
1664 distribute and make a new multiply, shift, or maybe just
1665 have X (if C is 2 in the example above). But don't make
1666 real multiply if we didn't have one before. */
1668 if (! FLOAT_MODE_P (mode))
1670 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1671 rtx lhs = op0, rhs = op1;
1674 if (GET_CODE (lhs) == NEG)
1675 coeff0 = -1, lhs = XEXP (lhs, 0);
1676 else if (GET_CODE (lhs) == MULT
1677 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1679 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1682 else if (GET_CODE (lhs) == ASHIFT
1683 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1684 && INTVAL (XEXP (lhs, 1)) >= 0
1685 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1687 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1688 lhs = XEXP (lhs, 0);
1691 if (GET_CODE (rhs) == NEG)
1692 coeff1 = - 1, rhs = XEXP (rhs, 0);
1693 else if (GET_CODE (rhs) == MULT
1694 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1696 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1699 else if (GET_CODE (rhs) == ASHIFT
1700 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1701 && INTVAL (XEXP (rhs, 1)) >= 0
1702 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1704 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1705 rhs = XEXP (rhs, 0);
1708 if (rtx_equal_p (lhs, rhs))
1710 tem = simplify_gen_binary (MULT, mode, lhs,
1711 GEN_INT (coeff0 - coeff1));
1712 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1716 /* (a - (-b)) -> (a + b). True even for IEEE. */
1717 if (GET_CODE (op1) == NEG)
1718 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1720 /* (-x - c) may be simplified as (-c - x). */
1721 if (GET_CODE (op0) == NEG
1722 && (GET_CODE (op1) == CONST_INT
1723 || GET_CODE (op1) == CONST_DOUBLE))
1725 tem = simplify_unary_operation (NEG, mode, op1, mode);
1727 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1730 /* If one of the operands is a PLUS or a MINUS, see if we can
1731 simplify this by the associative law.
1732 Don't use the associative law for floating point.
1733 The inaccuracy makes it nonassociative,
1734 and subtle programs can break if operations are associated. */
1736 if (INTEGRAL_MODE_P (mode)
1737 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1738 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1739 || (GET_CODE (op0) == CONST
1740 && GET_CODE (XEXP (op0, 0)) == PLUS)
1741 || (GET_CODE (op1) == CONST
1742 && GET_CODE (XEXP (op1, 0)) == PLUS))
1743 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1746 /* Don't let a relocatable value get a negative coeff. */
1747 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1748 return simplify_gen_binary (PLUS, mode,
1750 neg_const_int (mode, op1));
1752 /* (x - (x & y)) -> (x & ~y) */
1753 if (GET_CODE (op1) == AND)
1755 if (rtx_equal_p (op0, XEXP (op1, 0)))
1757 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1758 GET_MODE (XEXP (op1, 1)));
1759 return simplify_gen_binary (AND, mode, op0, tem);
1761 if (rtx_equal_p (op0, XEXP (op1, 1)))
1763 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1764 GET_MODE (XEXP (op1, 0)));
1765 return simplify_gen_binary (AND, mode, op0, tem);
1771 if (trueop1 == constm1_rtx)
1772 return simplify_gen_unary (NEG, mode, op0, mode);
1774 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1775 x is NaN, since x * 0 is then also NaN. Nor is it valid
1776 when the mode has signed zeros, since multiplying a negative
1777 number by 0 will give -0, not 0. */
1778 if (!HONOR_NANS (mode)
1779 && !HONOR_SIGNED_ZEROS (mode)
1780 && trueop1 == CONST0_RTX (mode)
1781 && ! side_effects_p (op0))
1784 /* In IEEE floating point, x*1 is not equivalent to x for
1786 if (!HONOR_SNANS (mode)
1787 && trueop1 == CONST1_RTX (mode))
1790 /* Convert multiply by constant power of two into shift unless
1791 we are still generating RTL. This test is a kludge. */
1792 if (GET_CODE (trueop1) == CONST_INT
1793 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1794 /* If the mode is larger than the host word size, and the
1795 uppermost bit is set, then this isn't a power of two due
1796 to implicit sign extension. */
1797 && (width <= HOST_BITS_PER_WIDE_INT
1798 || val != HOST_BITS_PER_WIDE_INT - 1)
1799 && ! rtx_equal_function_value_matters)
1800 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1802 /* x*2 is x+x and x*(-1) is -x */
1803 if (GET_CODE (trueop1) == CONST_DOUBLE
1804 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1805 && GET_MODE (op0) == mode)
1808 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1810 if (REAL_VALUES_EQUAL (d, dconst2))
1811 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1813 if (REAL_VALUES_EQUAL (d, dconstm1))
1814 return simplify_gen_unary (NEG, mode, op0, mode);
1817 /* Reassociate multiplication, but for floating point MULTs
1818 only when the user specifies unsafe math optimizations. */
1819 if (! FLOAT_MODE_P (mode)
1820 || flag_unsafe_math_optimizations)
1822 tem = simplify_associative_operation (code, mode, op0, op1);
1829 if (trueop1 == const0_rtx)
1831 if (GET_CODE (trueop1) == CONST_INT
1832 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1833 == GET_MODE_MASK (mode)))
1835 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1837 /* A | (~A) -> -1 */
1838 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1839 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1840 && ! side_effects_p (op0)
1841 && GET_MODE_CLASS (mode) != MODE_CC)
1843 tem = simplify_associative_operation (code, mode, op0, op1);
1849 if (trueop1 == const0_rtx)
1851 if (GET_CODE (trueop1) == CONST_INT
1852 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1853 == GET_MODE_MASK (mode)))
1854 return simplify_gen_unary (NOT, mode, op0, mode);
1855 if (trueop0 == trueop1
1856 && ! side_effects_p (op0)
1857 && GET_MODE_CLASS (mode) != MODE_CC)
1860 /* Canonicalize XOR of the most significant bit to PLUS. */
1861 if ((GET_CODE (op1) == CONST_INT
1862 || GET_CODE (op1) == CONST_DOUBLE)
1863 && mode_signbit_p (mode, op1))
1864 return simplify_gen_binary (PLUS, mode, op0, op1);
1865 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
1866 if ((GET_CODE (op1) == CONST_INT
1867 || GET_CODE (op1) == CONST_DOUBLE)
1868 && GET_CODE (op0) == PLUS
1869 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1870 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1871 && mode_signbit_p (mode, XEXP (op0, 1)))
1872 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1873 simplify_gen_binary (XOR, mode, op1,
1876 tem = simplify_associative_operation (code, mode, op0, op1);
1882 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1884 if (GET_CODE (trueop1) == CONST_INT
1885 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1886 == GET_MODE_MASK (mode)))
1888 if (trueop0 == trueop1 && ! side_effects_p (op0)
1889 && GET_MODE_CLASS (mode) != MODE_CC)
1892 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1893 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1894 && ! side_effects_p (op0)
1895 && GET_MODE_CLASS (mode) != MODE_CC)
1897 tem = simplify_associative_operation (code, mode, op0, op1);
1903 /* 0/x is 0 (or x&0 if x has side-effects). */
1904 if (trueop0 == const0_rtx)
1905 return side_effects_p (op1)
1906 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1909 if (trueop1 == const1_rtx)
1911 /* Handle narrowing UDIV. */
1912 rtx x = gen_lowpart_common (mode, op0);
1915 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1916 return gen_lowpart_SUBREG (mode, op0);
1919 /* Convert divide by power of two into shift. */
1920 if (GET_CODE (trueop1) == CONST_INT
1921 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1922 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1926 /* Handle floating point and integers separately. */
1927 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1929 /* Maybe change 0.0 / x to 0.0. This transformation isn't
1930 safe for modes with NaNs, since 0.0 / 0.0 will then be
1931 NaN rather than 0.0. Nor is it safe for modes with signed
1932 zeros, since dividing 0 by a negative number gives -0.0 */
1933 if (trueop0 == CONST0_RTX (mode)
1934 && !HONOR_NANS (mode)
1935 && !HONOR_SIGNED_ZEROS (mode)
1936 && ! side_effects_p (op1))
1939 if (trueop1 == CONST1_RTX (mode)
1940 && !HONOR_SNANS (mode))
1943 if (GET_CODE (trueop1) == CONST_DOUBLE
1944 && trueop1 != CONST0_RTX (mode))
1947 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1950 if (REAL_VALUES_EQUAL (d, dconstm1)
1951 && !HONOR_SNANS (mode))
1952 return simplify_gen_unary (NEG, mode, op0, mode);
1954 /* Change FP division by a constant into multiplication.
1955 Only do this with -funsafe-math-optimizations. */
1956 if (flag_unsafe_math_optimizations
1957 && !REAL_VALUES_EQUAL (d, dconst0))
1959 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
1960 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1961 return simplify_gen_binary (MULT, mode, op0, tem);
1967 /* 0/x is 0 (or x&0 if x has side-effects). */
1968 if (trueop0 == const0_rtx)
1969 return side_effects_p (op1)
1970 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
1973 if (trueop1 == const1_rtx)
1975 /* Handle narrowing DIV. */
1976 rtx x = gen_lowpart_common (mode, op0);
1979 if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1980 return gen_lowpart_SUBREG (mode, op0);
1984 if (trueop1 == constm1_rtx)
1986 rtx x = gen_lowpart_common (mode, op0);
1988 x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1989 ? gen_lowpart_SUBREG (mode, op0) : op0;
1990 return simplify_gen_unary (NEG, mode, x, mode);
1996 /* 0%x is 0 (or x&0 if x has side-effects). */
1997 if (trueop0 == const0_rtx)
1998 return side_effects_p (op1)
1999 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2001 /* x%1 is 0 (of x&0 if x has side-effects). */
2002 if (trueop1 == const1_rtx)
2003 return side_effects_p (op0)
2004 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2006 /* Implement modulus by power of two as AND. */
2007 if (GET_CODE (trueop1) == CONST_INT
2008 && exact_log2 (INTVAL (trueop1)) > 0)
2009 return simplify_gen_binary (AND, mode, op0,
2010 GEN_INT (INTVAL (op1) - 1));
2014 /* 0%x is 0 (or x&0 if x has side-effects). */
2015 if (trueop0 == const0_rtx)
2016 return side_effects_p (op1)
2017 ? simplify_gen_binary (AND, mode, op1, const0_rtx)
2019 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2020 if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
2021 return side_effects_p (op0)
2022 ? simplify_gen_binary (AND, mode, op0, const0_rtx)
2029 /* Rotating ~0 always results in ~0. */
2030 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2031 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2032 && ! side_effects_p (op1))
2035 /* Fall through.... */
2039 if (trueop1 == const0_rtx)
2041 if (trueop0 == const0_rtx && ! side_effects_p (op1))
2046 if (width <= HOST_BITS_PER_WIDE_INT
2047 && GET_CODE (trueop1) == CONST_INT
2048 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2049 && ! side_effects_p (op0))
2051 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2053 tem = simplify_associative_operation (code, mode, op0, op1);
2059 if (width <= HOST_BITS_PER_WIDE_INT
2060 && GET_CODE (trueop1) == CONST_INT
2061 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2062 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2063 && ! side_effects_p (op0))
2065 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2067 tem = simplify_associative_operation (code, mode, op0, op1);
2073 if (trueop1 == const0_rtx && ! side_effects_p (op0))
2075 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2077 tem = simplify_associative_operation (code, mode, op0, op1);
2083 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2085 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2087 tem = simplify_associative_operation (code, mode, op0, op1);
2096 /* ??? There are simplifications that can be done. */
2100 if (!VECTOR_MODE_P (mode))
2102 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2104 != GET_MODE_INNER (GET_MODE (trueop0)))
2105 || GET_CODE (trueop1) != PARALLEL
2106 || XVECLEN (trueop1, 0) != 1
2107 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
2110 if (GET_CODE (trueop0) == CONST_VECTOR)
2111 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
2115 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2116 || (GET_MODE_INNER (mode)
2117 != GET_MODE_INNER (GET_MODE (trueop0)))
2118 || GET_CODE (trueop1) != PARALLEL)
2121 if (GET_CODE (trueop0) == CONST_VECTOR)
2123 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2124 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2125 rtvec v = rtvec_alloc (n_elts);
2128 if (XVECLEN (trueop1, 0) != (int) n_elts)
2130 for (i = 0; i < n_elts; i++)
2132 rtx x = XVECEXP (trueop1, 0, i);
2134 if (GET_CODE (x) != CONST_INT)
2136 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
2139 return gen_rtx_CONST_VECTOR (mode, v);
2145 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2146 ? GET_MODE (trueop0)
2147 : GET_MODE_INNER (mode));
2148 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2149 ? GET_MODE (trueop1)
2150 : GET_MODE_INNER (mode));
2152 if (!VECTOR_MODE_P (mode)
2153 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2154 != GET_MODE_SIZE (mode)))
2157 if ((VECTOR_MODE_P (op0_mode)
2158 && (GET_MODE_INNER (mode)
2159 != GET_MODE_INNER (op0_mode)))
2160 || (!VECTOR_MODE_P (op0_mode)
2161 && GET_MODE_INNER (mode) != op0_mode))
2164 if ((VECTOR_MODE_P (op1_mode)
2165 && (GET_MODE_INNER (mode)
2166 != GET_MODE_INNER (op1_mode)))
2167 || (!VECTOR_MODE_P (op1_mode)
2168 && GET_MODE_INNER (mode) != op1_mode))
2171 if ((GET_CODE (trueop0) == CONST_VECTOR
2172 || GET_CODE (trueop0) == CONST_INT
2173 || GET_CODE (trueop0) == CONST_DOUBLE)
2174 && (GET_CODE (trueop1) == CONST_VECTOR
2175 || GET_CODE (trueop1) == CONST_INT
2176 || GET_CODE (trueop1) == CONST_DOUBLE))
2178 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2179 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2180 rtvec v = rtvec_alloc (n_elts);
2182 unsigned in_n_elts = 1;
2184 if (VECTOR_MODE_P (op0_mode))
2185 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2186 for (i = 0; i < n_elts; i++)
2190 if (!VECTOR_MODE_P (op0_mode))
2191 RTVEC_ELT (v, i) = trueop0;
2193 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2197 if (!VECTOR_MODE_P (op1_mode))
2198 RTVEC_ELT (v, i) = trueop1;
2200 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2205 return gen_rtx_CONST_VECTOR (mode, v);
2217 /* Get the integer argument values in two forms:
2218 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2220 arg0 = INTVAL (trueop0);
2221 arg1 = INTVAL (trueop1);
2223 if (width < HOST_BITS_PER_WIDE_INT)
2225 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2226 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2229 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2230 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2233 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2234 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2242 /* Compute the value of the arithmetic. */
2247 val = arg0s + arg1s;
2251 val = arg0s - arg1s;
2255 val = arg0s * arg1s;
2260 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2263 val = arg0s / arg1s;
2268 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2271 val = arg0s % arg1s;
2276 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2279 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2284 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2287 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2303 /* If shift count is undefined, don't fold it; let the machine do
2304 what it wants. But truncate it if the machine will do that. */
2308 if (SHIFT_COUNT_TRUNCATED)
2311 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2318 if (SHIFT_COUNT_TRUNCATED)
2321 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2328 if (SHIFT_COUNT_TRUNCATED)
2331 val = arg0s >> arg1;
2333 /* Bootstrap compiler may not have sign extended the right shift.
2334 Manually extend the sign to insure bootstrap cc matches gcc. */
2335 if (arg0s < 0 && arg1 > 0)
2336 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2345 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2346 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2354 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2355 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2359 /* Do nothing here. */
2363 val = arg0s <= arg1s ? arg0s : arg1s;
2367 val = ((unsigned HOST_WIDE_INT) arg0
2368 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2372 val = arg0s > arg1s ? arg0s : arg1s;
2376 val = ((unsigned HOST_WIDE_INT) arg0
2377 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2384 /* ??? There are simplifications that can be done. */
2391 val = trunc_int_for_mode (val, mode);
2393 return GEN_INT (val);
2396 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2399 Rather than test for specific case, we do this by a brute-force method
2400 and do all possible simplifications until no more changes occur. Then
2401 we rebuild the operation.
2403 If FORCE is true, then always generate the rtx. This is used to
2404 canonicalize stuff emitted from simplify_gen_binary. Note that this
2405 can still fail if the rtx is too complex. It won't fail just because
2406 the result is not 'simpler' than the input, however. */
2408 struct simplify_plus_minus_op_data
2415 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2417 const struct simplify_plus_minus_op_data *d1 = p1;
2418 const struct simplify_plus_minus_op_data *d2 = p2;
2420 return (commutative_operand_precedence (d2->op)
2421 - commutative_operand_precedence (d1->op));
2425 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2428 struct simplify_plus_minus_op_data ops[8];
2430 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2434 memset (ops, 0, sizeof ops);
2436 /* Set up the two operands and then expand them until nothing has been
2437 changed. If we run out of room in our array, give up; this should
2438 almost never happen. */
2443 ops[1].neg = (code == MINUS);
2449 for (i = 0; i < n_ops; i++)
2451 rtx this_op = ops[i].op;
2452 int this_neg = ops[i].neg;
2453 enum rtx_code this_code = GET_CODE (this_op);
2462 ops[n_ops].op = XEXP (this_op, 1);
2463 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2466 ops[i].op = XEXP (this_op, 0);
2472 ops[i].op = XEXP (this_op, 0);
2473 ops[i].neg = ! this_neg;
2479 && GET_CODE (XEXP (this_op, 0)) == PLUS
2480 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2481 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2483 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2484 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2485 ops[n_ops].neg = this_neg;
2493 /* ~a -> (-a - 1) */
2496 ops[n_ops].op = constm1_rtx;
2497 ops[n_ops++].neg = this_neg;
2498 ops[i].op = XEXP (this_op, 0);
2499 ops[i].neg = !this_neg;
2507 ops[i].op = neg_const_int (mode, this_op);
2520 /* If we only have two operands, we can't do anything. */
2521 if (n_ops <= 2 && !force)
2524 /* Count the number of CONSTs we didn't split above. */
2525 for (i = 0; i < n_ops; i++)
2526 if (GET_CODE (ops[i].op) == CONST)
2529 /* Now simplify each pair of operands until nothing changes. The first
2530 time through just simplify constants against each other. */
2537 for (i = 0; i < n_ops - 1; i++)
2538 for (j = i + 1; j < n_ops; j++)
2540 rtx lhs = ops[i].op, rhs = ops[j].op;
2541 int lneg = ops[i].neg, rneg = ops[j].neg;
2543 if (lhs != 0 && rhs != 0
2544 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2546 enum rtx_code ncode = PLUS;
2552 tem = lhs, lhs = rhs, rhs = tem;
2554 else if (swap_commutative_operands_p (lhs, rhs))
2555 tem = lhs, lhs = rhs, rhs = tem;
2557 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2559 /* Reject "simplifications" that just wrap the two
2560 arguments in a CONST. Failure to do so can result
2561 in infinite recursion with simplify_binary_operation
2562 when it calls us to simplify CONST operations. */
2564 && ! (GET_CODE (tem) == CONST
2565 && GET_CODE (XEXP (tem, 0)) == ncode
2566 && XEXP (XEXP (tem, 0), 0) == lhs
2567 && XEXP (XEXP (tem, 0), 1) == rhs)
2568 /* Don't allow -x + -1 -> ~x simplifications in the
2569 first pass. This allows us the chance to combine
2570 the -1 with other constants. */
2572 && GET_CODE (tem) == NOT
2573 && XEXP (tem, 0) == rhs))
2576 if (GET_CODE (tem) == NEG)
2577 tem = XEXP (tem, 0), lneg = !lneg;
2578 if (GET_CODE (tem) == CONST_INT && lneg)
2579 tem = neg_const_int (mode, tem), lneg = 0;
2583 ops[j].op = NULL_RTX;
2593 /* Pack all the operands to the lower-numbered entries. */
2594 for (i = 0, j = 0; j < n_ops; j++)
2599 /* Sort the operations based on swap_commutative_operands_p. */
2600 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2602 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2604 && GET_CODE (ops[1].op) == CONST_INT
2605 && CONSTANT_P (ops[0].op)
2607 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2609 /* We suppressed creation of trivial CONST expressions in the
2610 combination loop to avoid recursion. Create one manually now.
2611 The combination loop should have ensured that there is exactly
2612 one CONST_INT, and the sort will have ensured that it is last
2613 in the array and that any other constant will be next-to-last. */
2616 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2617 && CONSTANT_P (ops[n_ops - 2].op))
2619 rtx value = ops[n_ops - 1].op;
2620 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2621 value = neg_const_int (mode, value);
2622 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2626 /* Count the number of CONSTs that we generated. */
2628 for (i = 0; i < n_ops; i++)
2629 if (GET_CODE (ops[i].op) == CONST)
2632 /* Give up if we didn't reduce the number of operands we had. Make
2633 sure we count a CONST as two operands. If we have the same
2634 number of operands, but have made more CONSTs than before, this
2635 is also an improvement, so accept it. */
2637 && (n_ops + n_consts > input_ops
2638 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2641 /* Put a non-negated operand first, if possible. */
2643 for (i = 0; i < n_ops && ops[i].neg; i++)
2646 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2655 /* Now make the result by performing the requested operations. */
2657 for (i = 1; i < n_ops; i++)
2658 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2659 mode, result, ops[i].op);
2664 /* Like simplify_binary_operation except used for relational operators.
2665 MODE is the mode of the operands, not that of the result. If MODE
2666 is VOIDmode, both operands must also be VOIDmode and we compare the
2667 operands in "infinite precision".
2669 If no simplification is possible, this function returns zero.
2670 Otherwise, it returns either const_true_rtx or const0_rtx. */
2673 simplify_const_relational_operation (enum rtx_code code,
2674 enum machine_mode mode,
2677 int equal, op0lt, op0ltu, op1lt, op1ltu;
2682 if (mode == VOIDmode
2683 && (GET_MODE (op0) != VOIDmode
2684 || GET_MODE (op1) != VOIDmode))
2687 /* If op0 is a compare, extract the comparison arguments from it. */
2688 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2689 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2691 /* We can't simplify MODE_CC values since we don't know what the
2692 actual comparison is. */
2693 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2696 /* Make sure the constant is second. */
2697 if (swap_commutative_operands_p (op0, op1))
2699 tem = op0, op0 = op1, op1 = tem;
2700 code = swap_condition (code);
2703 trueop0 = avoid_constant_pool_reference (op0);
2704 trueop1 = avoid_constant_pool_reference (op1);
2706 /* For integer comparisons of A and B maybe we can simplify A - B and can
2707 then simplify a comparison of that with zero. If A and B are both either
2708 a register or a CONST_INT, this can't help; testing for these cases will
2709 prevent infinite recursion here and speed things up.
2711 If CODE is an unsigned comparison, then we can never do this optimization,
2712 because it gives an incorrect result if the subtraction wraps around zero.
2713 ANSI C defines unsigned operations such that they never overflow, and
2714 thus such cases can not be ignored; but we cannot do it even for
2715 signed comparisons for languages such as Java, so test flag_wrapv. */
2717 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2718 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2719 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2720 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2721 /* We cannot do this for == or != if tem is a nonzero address. */
2722 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2723 && code != GTU && code != GEU && code != LTU && code != LEU)
2724 return simplify_const_relational_operation (signed_condition (code),
2725 mode, tem, const0_rtx);
2727 if (flag_unsafe_math_optimizations && code == ORDERED)
2728 return const_true_rtx;
2730 if (flag_unsafe_math_optimizations && code == UNORDERED)
2733 /* For modes without NaNs, if the two operands are equal, we know the
2734 result except if they have side-effects. */
2735 if (! HONOR_NANS (GET_MODE (trueop0))
2736 && rtx_equal_p (trueop0, trueop1)
2737 && ! side_effects_p (trueop0))
2738 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2740 /* If the operands are floating-point constants, see if we can fold
2742 else if (GET_CODE (trueop0) == CONST_DOUBLE
2743 && GET_CODE (trueop1) == CONST_DOUBLE
2744 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2746 REAL_VALUE_TYPE d0, d1;
2748 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2749 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2751 /* Comparisons are unordered iff at least one of the values is NaN. */
2752 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2762 return const_true_rtx;
2775 equal = REAL_VALUES_EQUAL (d0, d1);
2776 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2777 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2780 /* Otherwise, see if the operands are both integers. */
2781 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2782 && (GET_CODE (trueop0) == CONST_DOUBLE
2783 || GET_CODE (trueop0) == CONST_INT)
2784 && (GET_CODE (trueop1) == CONST_DOUBLE
2785 || GET_CODE (trueop1) == CONST_INT))
2787 int width = GET_MODE_BITSIZE (mode);
2788 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2789 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2791 /* Get the two words comprising each integer constant. */
2792 if (GET_CODE (trueop0) == CONST_DOUBLE)
2794 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2795 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2799 l0u = l0s = INTVAL (trueop0);
2800 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2803 if (GET_CODE (trueop1) == CONST_DOUBLE)
2805 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2806 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2810 l1u = l1s = INTVAL (trueop1);
2811 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2814 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2815 we have to sign or zero-extend the values. */
2816 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2818 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2819 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2821 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2822 l0s |= ((HOST_WIDE_INT) (-1) << width);
2824 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2825 l1s |= ((HOST_WIDE_INT) (-1) << width);
2827 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2828 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2830 equal = (h0u == h1u && l0u == l1u);
2831 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2832 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2833 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2834 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2837 /* Otherwise, there are some code-specific tests we can make. */
2843 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2848 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2849 return const_true_rtx;
2853 /* Unsigned values are never negative. */
2854 if (trueop1 == const0_rtx)
2855 return const_true_rtx;
2859 if (trueop1 == const0_rtx)
2864 /* Unsigned values are never greater than the largest
2866 if (GET_CODE (trueop1) == CONST_INT
2867 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2868 && INTEGRAL_MODE_P (mode))
2869 return const_true_rtx;
2873 if (GET_CODE (trueop1) == CONST_INT
2874 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2875 && INTEGRAL_MODE_P (mode))
2880 /* Optimize abs(x) < 0.0. */
2881 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2883 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2885 if (GET_CODE (tem) == ABS)
2891 /* Optimize abs(x) >= 0.0. */
2892 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2894 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2896 if (GET_CODE (tem) == ABS)
2897 return const_true_rtx;
2902 /* Optimize ! (abs(x) < 0.0). */
2903 if (trueop1 == CONST0_RTX (mode))
2905 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2907 if (GET_CODE (tem) == ABS)
2908 return const_true_rtx;
2919 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2925 return equal ? const_true_rtx : const0_rtx;
2928 return ! equal ? const_true_rtx : const0_rtx;
2931 return op0lt ? const_true_rtx : const0_rtx;
2934 return op1lt ? const_true_rtx : const0_rtx;
2936 return op0ltu ? const_true_rtx : const0_rtx;
2938 return op1ltu ? const_true_rtx : const0_rtx;
2941 return equal || op0lt ? const_true_rtx : const0_rtx;
2944 return equal || op1lt ? const_true_rtx : const0_rtx;
2946 return equal || op0ltu ? const_true_rtx : const0_rtx;
2948 return equal || op1ltu ? const_true_rtx : const0_rtx;
2950 return const_true_rtx;
2958 /* Like simplify_binary_operation except used for relational operators.
2959 MODE is the mode of the result, and CMP_MODE is the mode of the operands.
2960 If CMP_MODE is VOIDmode, both operands must also be VOIDmode and we
2961 compare the operands in "infinite precision". */
2964 simplify_relational_operation (enum rtx_code code,
2965 enum machine_mode mode ATTRIBUTE_UNUSED,
2966 enum machine_mode cmp_mode, rtx op0, rtx op1)
2970 tmp = simplify_const_relational_operation (code, cmp_mode, op0, op1);
2973 #ifdef FLOAT_STORE_FLAG_VALUE
2974 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2976 if (tmp == const0_rtx)
2977 return CONST0_RTX (mode);
2978 return CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
2988 /* Simplify CODE, an operation with result mode MODE and three operands,
2989 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2990 a constant. Return 0 if no simplifications is possible. */
2993 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2994 enum machine_mode op0_mode, rtx op0, rtx op1,
2997 unsigned int width = GET_MODE_BITSIZE (mode);
2999 /* VOIDmode means "infinite" precision. */
3001 width = HOST_BITS_PER_WIDE_INT;
3007 if (GET_CODE (op0) == CONST_INT
3008 && GET_CODE (op1) == CONST_INT
3009 && GET_CODE (op2) == CONST_INT
3010 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3011 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3013 /* Extracting a bit-field from a constant */
3014 HOST_WIDE_INT val = INTVAL (op0);
3016 if (BITS_BIG_ENDIAN)
3017 val >>= (GET_MODE_BITSIZE (op0_mode)
3018 - INTVAL (op2) - INTVAL (op1));
3020 val >>= INTVAL (op2);
3022 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3024 /* First zero-extend. */
3025 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3026 /* If desired, propagate sign bit. */
3027 if (code == SIGN_EXTRACT
3028 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3029 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3032 /* Clear the bits that don't belong in our mode,
3033 unless they and our sign bit are all one.
3034 So we get either a reasonable negative value or a reasonable
3035 unsigned value for this mode. */
3036 if (width < HOST_BITS_PER_WIDE_INT
3037 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3038 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3039 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3041 return GEN_INT (val);
3046 if (GET_CODE (op0) == CONST_INT)
3047 return op0 != const0_rtx ? op1 : op2;
3049 /* Convert c ? a : a into "a". */
3050 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3053 /* Convert a != b ? a : b into "a". */
3054 if (GET_CODE (op0) == NE
3055 && ! side_effects_p (op0)
3056 && ! HONOR_NANS (mode)
3057 && ! HONOR_SIGNED_ZEROS (mode)
3058 && ((rtx_equal_p (XEXP (op0, 0), op1)
3059 && rtx_equal_p (XEXP (op0, 1), op2))
3060 || (rtx_equal_p (XEXP (op0, 0), op2)
3061 && rtx_equal_p (XEXP (op0, 1), op1))))
3064 /* Convert a == b ? a : b into "b". */
3065 if (GET_CODE (op0) == EQ
3066 && ! side_effects_p (op0)
3067 && ! HONOR_NANS (mode)
3068 && ! HONOR_SIGNED_ZEROS (mode)
3069 && ((rtx_equal_p (XEXP (op0, 0), op1)
3070 && rtx_equal_p (XEXP (op0, 1), op2))
3071 || (rtx_equal_p (XEXP (op0, 0), op2)
3072 && rtx_equal_p (XEXP (op0, 1), op1))))
3075 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3077 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3078 ? GET_MODE (XEXP (op0, 1))
3079 : GET_MODE (XEXP (op0, 0)));
3081 if (cmp_mode == VOIDmode)
3082 cmp_mode = op0_mode;
3083 temp = simplify_const_relational_operation (GET_CODE (op0),
3088 /* See if any simplifications were possible. */
3089 if (temp == const0_rtx)
3091 else if (temp == const_true_rtx)
3096 /* Look for happy constants in op1 and op2. */
3097 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3099 HOST_WIDE_INT t = INTVAL (op1);
3100 HOST_WIDE_INT f = INTVAL (op2);
3102 if (t == STORE_FLAG_VALUE && f == 0)
3103 code = GET_CODE (op0);
3104 else if (t == 0 && f == STORE_FLAG_VALUE)
3107 tmp = reversed_comparison_code (op0, NULL_RTX);
3115 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
3121 if (GET_MODE (op0) != mode
3122 || GET_MODE (op1) != mode
3123 || !VECTOR_MODE_P (mode))
3125 op2 = avoid_constant_pool_reference (op2);
3126 if (GET_CODE (op2) == CONST_INT)
3128 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3129 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3130 int mask = (1 << n_elts) - 1;
3132 if (!(INTVAL (op2) & mask))
3134 if ((INTVAL (op2) & mask) == mask)
3137 op0 = avoid_constant_pool_reference (op0);
3138 op1 = avoid_constant_pool_reference (op1);
3139 if (GET_CODE (op0) == CONST_VECTOR
3140 && GET_CODE (op1) == CONST_VECTOR)
3142 rtvec v = rtvec_alloc (n_elts);
3145 for (i = 0; i < n_elts; i++)
3146 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3147 ? CONST_VECTOR_ELT (op0, i)
3148 : CONST_VECTOR_ELT (op1, i));
3149 return gen_rtx_CONST_VECTOR (mode, v);
3161 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3162 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3164 Works by unpacking OP into a collection of 8-bit values
3165 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3166 and then repacking them again for OUTERMODE. */
3169 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3170 enum machine_mode innermode, unsigned int byte)
3172 /* We support up to 512-bit values (for V8DFmode). */
3176 value_mask = (1 << value_bit) - 1
3178 unsigned char value[max_bitsize / value_bit];
3187 rtvec result_v = NULL;
3188 enum mode_class outer_class;
3189 enum machine_mode outer_submode;
3191 /* Some ports misuse CCmode. */
3192 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3195 /* Unpack the value. */
3197 if (GET_CODE (op) == CONST_VECTOR)
3199 num_elem = CONST_VECTOR_NUNITS (op);
3200 elems = &CONST_VECTOR_ELT (op, 0);
3201 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3207 elem_bitsize = max_bitsize;
3210 if (BITS_PER_UNIT % value_bit != 0)
3211 abort (); /* Too complicated; reducing value_bit may help. */
3212 if (elem_bitsize % BITS_PER_UNIT != 0)
3213 abort (); /* I don't know how to handle endianness of sub-units. */
3215 for (elem = 0; elem < num_elem; elem++)
3218 rtx el = elems[elem];
3220 /* Vectors are kept in target memory order. (This is probably
3223 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3224 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3226 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3227 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3228 unsigned bytele = (subword_byte % UNITS_PER_WORD
3229 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3230 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3233 switch (GET_CODE (el))
3237 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3239 *vp++ = INTVAL (el) >> i;
3240 /* CONST_INTs are always logically sign-extended. */
3241 for (; i < elem_bitsize; i += value_bit)
3242 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3246 if (GET_MODE (el) == VOIDmode)
3248 /* If this triggers, someone should have generated a
3249 CONST_INT instead. */
3250 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3253 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3254 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3255 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3258 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3261 /* It shouldn't matter what's done here, so fill it with
3263 for (; i < max_bitsize; i += value_bit)
3266 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3268 long tmp[max_bitsize / 32];
3269 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3271 if (bitsize > elem_bitsize)
3273 if (bitsize % value_bit != 0)
3276 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3279 /* real_to_target produces its result in words affected by
3280 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3281 and use WORDS_BIG_ENDIAN instead; see the documentation
3282 of SUBREG in rtl.texi. */
3283 for (i = 0; i < bitsize; i += value_bit)
3286 if (WORDS_BIG_ENDIAN)
3287 ibase = bitsize - 1 - i;
3290 *vp++ = tmp[ibase / 32] >> i % 32;
3293 /* It shouldn't matter what's done here, so fill it with
3295 for (; i < elem_bitsize; i += value_bit)
3307 /* Now, pick the right byte to start with. */
3308 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3309 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3310 will already have offset 0. */
3311 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3313 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3315 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3316 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3317 byte = (subword_byte % UNITS_PER_WORD
3318 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3321 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3322 so if it's become negative it will instead be very large.) */
3323 if (byte >= GET_MODE_SIZE (innermode))
3326 /* Convert from bytes to chunks of size value_bit. */
3327 value_start = byte * (BITS_PER_UNIT / value_bit);
3329 /* Re-pack the value. */
3331 if (VECTOR_MODE_P (outermode))
3333 num_elem = GET_MODE_NUNITS (outermode);
3334 result_v = rtvec_alloc (num_elem);
3335 elems = &RTVEC_ELT (result_v, 0);
3336 outer_submode = GET_MODE_INNER (outermode);
3342 outer_submode = outermode;
3345 outer_class = GET_MODE_CLASS (outer_submode);
3346 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3348 if (elem_bitsize % value_bit != 0)
3350 if (elem_bitsize + value_start * value_bit > max_bitsize)
3353 for (elem = 0; elem < num_elem; elem++)
3357 /* Vectors are stored in target memory order. (This is probably
3360 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3361 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3363 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3364 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3365 unsigned bytele = (subword_byte % UNITS_PER_WORD
3366 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3367 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3370 switch (outer_class)
3373 case MODE_PARTIAL_INT:
3375 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3378 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3380 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3381 for (; i < elem_bitsize; i += value_bit)
3382 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3383 << (i - HOST_BITS_PER_WIDE_INT));
3385 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3387 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3388 elems[elem] = gen_int_mode (lo, outer_submode);
3390 elems[elem] = immed_double_const (lo, hi, outer_submode);
3397 long tmp[max_bitsize / 32];
3399 /* real_from_target wants its input in words affected by
3400 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3401 and use WORDS_BIG_ENDIAN instead; see the documentation
3402 of SUBREG in rtl.texi. */
3403 for (i = 0; i < max_bitsize / 32; i++)
3405 for (i = 0; i < elem_bitsize; i += value_bit)
3408 if (WORDS_BIG_ENDIAN)
3409 ibase = elem_bitsize - 1 - i;
3412 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3415 real_from_target (&r, tmp, outer_submode);
3416 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3424 if (VECTOR_MODE_P (outermode))
3425 return gen_rtx_CONST_VECTOR (outermode, result_v);
3430 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3431 Return 0 if no simplifications are possible. */
3433 simplify_subreg (enum machine_mode outermode, rtx op,
3434 enum machine_mode innermode, unsigned int byte)
3436 /* Little bit of sanity checking. */
3437 if (innermode == VOIDmode || outermode == VOIDmode
3438 || innermode == BLKmode || outermode == BLKmode)
3441 if (GET_MODE (op) != innermode
3442 && GET_MODE (op) != VOIDmode)
3445 if (byte % GET_MODE_SIZE (outermode)
3446 || byte >= GET_MODE_SIZE (innermode))
3449 if (outermode == innermode && !byte)
3452 if (GET_CODE (op) == CONST_INT
3453 || GET_CODE (op) == CONST_DOUBLE
3454 || GET_CODE (op) == CONST_VECTOR)
3455 return simplify_immed_subreg (outermode, op, innermode, byte);
3457 /* Changing mode twice with SUBREG => just change it once,
3458 or not at all if changing back op starting mode. */
3459 if (GET_CODE (op) == SUBREG)
3461 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3462 int final_offset = byte + SUBREG_BYTE (op);
3465 if (outermode == innermostmode
3466 && byte == 0 && SUBREG_BYTE (op) == 0)
3467 return SUBREG_REG (op);
3469 /* The SUBREG_BYTE represents offset, as if the value were stored
3470 in memory. Irritating exception is paradoxical subreg, where
3471 we define SUBREG_BYTE to be 0. On big endian machines, this
3472 value should be negative. For a moment, undo this exception. */
3473 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3475 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3476 if (WORDS_BIG_ENDIAN)
3477 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3478 if (BYTES_BIG_ENDIAN)
3479 final_offset += difference % UNITS_PER_WORD;
3481 if (SUBREG_BYTE (op) == 0
3482 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3484 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3485 if (WORDS_BIG_ENDIAN)
3486 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3487 if (BYTES_BIG_ENDIAN)
3488 final_offset += difference % UNITS_PER_WORD;
3491 /* See whether resulting subreg will be paradoxical. */
3492 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3494 /* In nonparadoxical subregs we can't handle negative offsets. */
3495 if (final_offset < 0)
3497 /* Bail out in case resulting subreg would be incorrect. */
3498 if (final_offset % GET_MODE_SIZE (outermode)
3499 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3505 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3507 /* In paradoxical subreg, see if we are still looking on lower part.
3508 If so, our SUBREG_BYTE will be 0. */
3509 if (WORDS_BIG_ENDIAN)
3510 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3511 if (BYTES_BIG_ENDIAN)
3512 offset += difference % UNITS_PER_WORD;
3513 if (offset == final_offset)
3519 /* Recurse for further possible simplifications. */
3520 new = simplify_subreg (outermode, SUBREG_REG (op),
3521 GET_MODE (SUBREG_REG (op)),
3525 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3528 /* SUBREG of a hard register => just change the register number
3529 and/or mode. If the hard register is not valid in that mode,
3530 suppress this simplification. If the hard register is the stack,
3531 frame, or argument pointer, leave this as a SUBREG. */
3534 && (! REG_FUNCTION_VALUE_P (op)
3535 || ! rtx_equal_function_value_matters)
3536 && REGNO (op) < FIRST_PSEUDO_REGISTER
3537 #ifdef CANNOT_CHANGE_MODE_CLASS
3538 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3539 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3540 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3542 && ((reload_completed && !frame_pointer_needed)
3543 || (REGNO (op) != FRAME_POINTER_REGNUM
3544 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3545 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3548 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3549 && REGNO (op) != ARG_POINTER_REGNUM
3551 && REGNO (op) != STACK_POINTER_REGNUM
3552 && subreg_offset_representable_p (REGNO (op), innermode,
3555 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3556 int final_regno = subreg_hard_regno (tem, 0);
3558 /* ??? We do allow it if the current REG is not valid for
3559 its mode. This is a kludge to work around how float/complex
3560 arguments are passed on 32-bit SPARC and should be fixed. */
3561 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3562 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3564 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3566 /* Propagate original regno. We don't have any way to specify
3567 the offset inside original regno, so do so only for lowpart.
3568 The information is used only by alias analysis that can not
3569 grog partial register anyway. */
3571 if (subreg_lowpart_offset (outermode, innermode) == byte)
3572 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3577 /* If we have a SUBREG of a register that we are replacing and we are
3578 replacing it with a MEM, make a new MEM and try replacing the
3579 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3580 or if we would be widening it. */
3582 if (GET_CODE (op) == MEM
3583 && ! mode_dependent_address_p (XEXP (op, 0))
3584 /* Allow splitting of volatile memory references in case we don't
3585 have instruction to move the whole thing. */
3586 && (! MEM_VOLATILE_P (op)
3587 || ! have_insn_for (SET, innermode))
3588 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3589 return adjust_address_nv (op, outermode, byte);
3591 /* Handle complex values represented as CONCAT
3592 of real and imaginary part. */
3593 if (GET_CODE (op) == CONCAT)
3595 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3596 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3597 unsigned int final_offset;
3600 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3601 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3604 /* We can at least simplify it by referring directly to the
3606 return gen_rtx_SUBREG (outermode, part, final_offset);
3609 /* Optimize SUBREG truncations of zero and sign extended values. */
3610 if ((GET_CODE (op) == ZERO_EXTEND
3611 || GET_CODE (op) == SIGN_EXTEND)
3612 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
3614 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
3616 /* If we're requesting the lowpart of a zero or sign extension,
3617 there are three possibilities. If the outermode is the same
3618 as the origmode, we can omit both the extension and the subreg.
3619 If the outermode is not larger than the origmode, we can apply
3620 the truncation without the extension. Finally, if the outermode
3621 is larger than the origmode, but both are integer modes, we
3622 can just extend to the appropriate mode. */
3625 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
3626 if (outermode == origmode)
3627 return XEXP (op, 0);
3628 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
3629 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
3630 subreg_lowpart_offset (outermode,
3632 if (SCALAR_INT_MODE_P (outermode))
3633 return simplify_gen_unary (GET_CODE (op), outermode,
3634 XEXP (op, 0), origmode);
3637 /* A SUBREG resulting from a zero extension may fold to zero if
3638 it extracts higher bits that the ZERO_EXTEND's source bits. */
3639 if (GET_CODE (op) == ZERO_EXTEND
3640 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
3641 return CONST0_RTX (outermode);
3647 /* Make a SUBREG operation or equivalent if it folds. */
3650 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3651 enum machine_mode innermode, unsigned int byte)
3654 /* Little bit of sanity checking. */
3655 if (innermode == VOIDmode || outermode == VOIDmode
3656 || innermode == BLKmode || outermode == BLKmode)
3659 if (GET_MODE (op) != innermode
3660 && GET_MODE (op) != VOIDmode)
3663 if (byte % GET_MODE_SIZE (outermode)
3664 || byte >= GET_MODE_SIZE (innermode))
3667 if (GET_CODE (op) == QUEUED)
3670 new = simplify_subreg (outermode, op, innermode, byte);
3674 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3677 return gen_rtx_SUBREG (outermode, op, byte);
3679 /* Simplify X, an rtx expression.
3681 Return the simplified expression or NULL if no simplifications
3684 This is the preferred entry point into the simplification routines;
3685 however, we still allow passes to call the more specific routines.
3687 Right now GCC has three (yes, three) major bodies of RTL simplification
3688 code that need to be unified.
3690 1. fold_rtx in cse.c. This code uses various CSE specific
3691 information to aid in RTL simplification.
3693 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3694 it uses combine specific information to aid in RTL
3697 3. The routines in this file.
3700 Long term we want to only have one body of simplification code; to
3701 get to that state I recommend the following steps:
3703 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3704 which are not pass dependent state into these routines.
3706 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3707 use this routine whenever possible.
3709 3. Allow for pass dependent state to be provided to these
3710 routines and add simplifications based on the pass dependent
3711 state. Remove code from cse.c & combine.c that becomes
3714 It will take time, but ultimately the compiler will be easier to
3715 maintain and improve. It's totally silly that when we add a
3716 simplification that it needs to be added to 4 places (3 for RTL
3717 simplification and 1 for tree simplification. */
3720 simplify_rtx (rtx x)
3722 enum rtx_code code = GET_CODE (x);
3723 enum machine_mode mode = GET_MODE (x);
3726 switch (GET_RTX_CLASS (code))
3729 return simplify_unary_operation (code, mode,
3730 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3731 case RTX_COMM_ARITH:
3732 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3733 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3735 /* Fall through.... */
3738 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3741 case RTX_BITFIELD_OPS:
3742 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3743 XEXP (x, 0), XEXP (x, 1),
3747 case RTX_COMM_COMPARE:
3748 temp = simplify_relational_operation (code, mode,
3749 ((GET_MODE (XEXP (x, 0))
3751 ? GET_MODE (XEXP (x, 0))
3752 : GET_MODE (XEXP (x, 1))),
3753 XEXP (x, 0), XEXP (x, 1));
3758 return simplify_gen_subreg (mode, SUBREG_REG (x),
3759 GET_MODE (SUBREG_REG (x)),
3761 if (code == CONSTANT_P_RTX)
3763 if (CONSTANT_P (XEXP (x, 0)))
3771 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3772 if (GET_CODE (XEXP (x, 0)) == HIGH
3773 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))