1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code))
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
284 return simplify_gen_unary (code, mode, op0, op_mode);
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return simplify_gen_binary (code, mode, op0, op1);
295 case RTX_COMM_COMPARE:
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
306 case RTX_BITFIELD_OPS:
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
319 /* The only case we try to handle is a SUBREG. */
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
328 return op0 ? op0 : x;
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
375 if (GET_CODE (op) == CONST)
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
497 in2 = in1; in1 = tem;
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
559 if (GET_CODE (op) == ASHIFT)
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
593 enum machine_mode inner = GET_MODE (XEXP (op, 0));
594 int isize = GET_MODE_BITSIZE (inner);
595 if (STORE_FLAG_VALUE == 1)
597 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
598 GEN_INT (isize - 1));
601 if (GET_MODE_BITSIZE (mode) > isize)
602 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
603 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
605 else if (STORE_FLAG_VALUE == -1)
607 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
608 GEN_INT (isize - 1));
611 if (GET_MODE_BITSIZE (mode) > isize)
612 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
613 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
622 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op) == SIGN_EXTEND
627 || GET_CODE (op) == ZERO_EXTEND)
628 && GET_MODE (XEXP (op, 0)) == mode)
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op) == ABS
634 || GET_CODE (op) == NEG)
635 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
637 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (XEXP (op, 0), 0), mode);
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
643 if (GET_CODE (op) == SUBREG
644 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
645 && subreg_lowpart_p (op))
646 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
647 GET_MODE (XEXP (SUBREG_REG (op), 0)));
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
657 GET_MODE_BITSIZE (GET_MODE (op)))
658 ? (num_sign_bit_copies (op, GET_MODE (op))
659 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
660 - GET_MODE_BITSIZE (mode)))
661 : truncated_to_mode (mode, op))
662 && ! (GET_CODE (op) == LSHIFTRT
663 && GET_CODE (XEXP (op, 0)) == MULT))
664 return rtl_hooks.gen_lowpart_no_emit (mode, op);
666 /* A truncate of a comparison can be replaced with a subreg if
667 STORE_FLAG_VALUE permits. This is like the previous test,
668 but it works even if the comparison is done in a mode larger
669 than HOST_BITS_PER_WIDE_INT. */
670 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
672 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
673 return rtl_hooks.gen_lowpart_no_emit (mode, op);
677 if (DECIMAL_FLOAT_MODE_P (mode))
680 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
681 if (GET_CODE (op) == FLOAT_EXTEND
682 && GET_MODE (XEXP (op, 0)) == mode)
685 /* (float_truncate:SF (float_truncate:DF foo:XF))
686 = (float_truncate:SF foo:XF).
687 This may eliminate double rounding, so it is unsafe.
689 (float_truncate:SF (float_extend:XF foo:DF))
690 = (float_truncate:SF foo:DF).
692 (float_truncate:DF (float_extend:XF foo:SF))
693 = (float_extend:SF foo:DF). */
694 if ((GET_CODE (op) == FLOAT_TRUNCATE
695 && flag_unsafe_math_optimizations)
696 || GET_CODE (op) == FLOAT_EXTEND)
697 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
699 > GET_MODE_SIZE (mode)
700 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
704 /* (float_truncate (float x)) is (float x) */
705 if (GET_CODE (op) == FLOAT
706 && (flag_unsafe_math_optimizations
707 || ((unsigned)significand_size (GET_MODE (op))
708 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
709 - num_sign_bit_copies (XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)))))))
711 return simplify_gen_unary (FLOAT, mode,
713 GET_MODE (XEXP (op, 0)));
715 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
716 (OP:SF foo:SF) if OP is NEG or ABS. */
717 if ((GET_CODE (op) == ABS
718 || GET_CODE (op) == NEG)
719 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
720 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
721 return simplify_gen_unary (GET_CODE (op), mode,
722 XEXP (XEXP (op, 0), 0), mode);
724 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
725 is (float_truncate:SF x). */
726 if (GET_CODE (op) == SUBREG
727 && subreg_lowpart_p (op)
728 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
729 return SUBREG_REG (op);
733 if (DECIMAL_FLOAT_MODE_P (mode))
736 /* (float_extend (float_extend x)) is (float_extend x)
738 (float_extend (float x)) is (float x) assuming that double
739 rounding can't happen.
741 if (GET_CODE (op) == FLOAT_EXTEND
742 || (GET_CODE (op) == FLOAT
743 && ((unsigned)significand_size (GET_MODE (op))
744 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
745 - num_sign_bit_copies (XEXP (op, 0),
746 GET_MODE (XEXP (op, 0)))))))
747 return simplify_gen_unary (GET_CODE (op), mode,
749 GET_MODE (XEXP (op, 0)));
754 /* (abs (neg <foo>)) -> (abs <foo>) */
755 if (GET_CODE (op) == NEG)
756 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
757 GET_MODE (XEXP (op, 0)));
759 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
761 if (GET_MODE (op) == VOIDmode)
764 /* If operand is something known to be positive, ignore the ABS. */
765 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
766 || ((GET_MODE_BITSIZE (GET_MODE (op))
767 <= HOST_BITS_PER_WIDE_INT)
768 && ((nonzero_bits (op, GET_MODE (op))
770 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
774 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
775 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
776 return gen_rtx_NEG (mode, op);
781 /* (ffs (*_extend <X>)) = (ffs <X>) */
782 if (GET_CODE (op) == SIGN_EXTEND
783 || GET_CODE (op) == ZERO_EXTEND)
784 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
785 GET_MODE (XEXP (op, 0)));
790 /* (pop* (zero_extend <X>)) = (pop* <X>) */
791 if (GET_CODE (op) == ZERO_EXTEND)
792 return simplify_gen_unary (code, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
797 /* (float (sign_extend <X>)) = (float <X>). */
798 if (GET_CODE (op) == SIGN_EXTEND)
799 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
808 if (GET_CODE (op) == TRUNCATE
809 && GET_MODE (XEXP (op, 0)) == mode
810 && GET_CODE (XEXP (op, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
815 /* Check for a sign extension of a subreg of a promoted
816 variable, where the promotion is sign-extended, and the
817 target mode is the same as the variable's promotion. */
818 if (GET_CODE (op) == SUBREG
819 && SUBREG_PROMOTED_VAR_P (op)
820 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
821 && GET_MODE (XEXP (op, 0)) == mode)
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
828 || (GET_CODE (op) == SUBREG
829 && REG_P (SUBREG_REG (op))
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
837 /* Check for a zero extension of a subreg of a promoted
838 variable, where the promotion is zero-extended, and the
839 target mode is the same as the variable's promotion. */
840 if (GET_CODE (op) == SUBREG
841 && SUBREG_PROMOTED_VAR_P (op)
842 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
843 && GET_MODE (XEXP (op, 0)) == mode)
846 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
847 if (POINTERS_EXTEND_UNSIGNED > 0
848 && mode == Pmode && GET_MODE (op) == ptr_mode
850 || (GET_CODE (op) == SUBREG
851 && REG_P (SUBREG_REG (op))
852 && REG_POINTER (SUBREG_REG (op))
853 && GET_MODE (SUBREG_REG (op)) == Pmode)))
854 return convert_memory_address (Pmode, op);
865 /* Try to compute the value of a unary operation CODE whose output mode is to
866 be MODE with input operand OP whose mode was originally OP_MODE.
867 Return zero if the value cannot be computed. */
869 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
870 rtx op, enum machine_mode op_mode)
872 unsigned int width = GET_MODE_BITSIZE (mode);
874 if (code == VEC_DUPLICATE)
876 gcc_assert (VECTOR_MODE_P (mode));
877 if (GET_MODE (op) != VOIDmode)
879 if (!VECTOR_MODE_P (GET_MODE (op)))
880 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
885 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
886 || GET_CODE (op) == CONST_VECTOR)
888 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
889 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
890 rtvec v = rtvec_alloc (n_elts);
893 if (GET_CODE (op) != CONST_VECTOR)
894 for (i = 0; i < n_elts; i++)
895 RTVEC_ELT (v, i) = op;
898 enum machine_mode inmode = GET_MODE (op);
899 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
900 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
902 gcc_assert (in_n_elts < n_elts);
903 gcc_assert ((n_elts % in_n_elts) == 0);
904 for (i = 0; i < n_elts; i++)
905 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
907 return gen_rtx_CONST_VECTOR (mode, v);
911 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
913 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
914 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
915 enum machine_mode opmode = GET_MODE (op);
916 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
917 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
918 rtvec v = rtvec_alloc (n_elts);
921 gcc_assert (op_n_elts == n_elts);
922 for (i = 0; i < n_elts; i++)
924 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
925 CONST_VECTOR_ELT (op, i),
926 GET_MODE_INNER (opmode));
929 RTVEC_ELT (v, i) = x;
931 return gen_rtx_CONST_VECTOR (mode, v);
934 /* The order of these tests is critical so that, for example, we don't
935 check the wrong mode (input vs. output) for a conversion operation,
936 such as FIX. At some point, this should be simplified. */
938 if (code == FLOAT && GET_MODE (op) == VOIDmode
939 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
941 HOST_WIDE_INT hv, lv;
944 if (GET_CODE (op) == CONST_INT)
945 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
947 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
949 REAL_VALUE_FROM_INT (d, lv, hv, mode);
950 d = real_value_truncate (mode, d);
951 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
953 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
954 && (GET_CODE (op) == CONST_DOUBLE
955 || GET_CODE (op) == CONST_INT))
957 HOST_WIDE_INT hv, lv;
960 if (GET_CODE (op) == CONST_INT)
961 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
963 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
965 if (op_mode == VOIDmode)
967 /* We don't know how to interpret negative-looking numbers in
968 this case, so don't try to fold those. */
972 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
975 hv = 0, lv &= GET_MODE_MASK (op_mode);
977 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
978 d = real_value_truncate (mode, d);
979 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
982 if (GET_CODE (op) == CONST_INT
983 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
985 HOST_WIDE_INT arg0 = INTVAL (op);
999 val = (arg0 >= 0 ? arg0 : - arg0);
1003 /* Don't use ffs here. Instead, get low order bit and then its
1004 number. If arg0 is zero, this will return 0, as desired. */
1005 arg0 &= GET_MODE_MASK (mode);
1006 val = exact_log2 (arg0 & (- arg0)) + 1;
1010 arg0 &= GET_MODE_MASK (mode);
1011 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1014 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1018 arg0 &= GET_MODE_MASK (mode);
1021 /* Even if the value at zero is undefined, we have to come
1022 up with some replacement. Seems good enough. */
1023 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1024 val = GET_MODE_BITSIZE (mode);
1027 val = exact_log2 (arg0 & -arg0);
1031 arg0 &= GET_MODE_MASK (mode);
1034 val++, arg0 &= arg0 - 1;
1038 arg0 &= GET_MODE_MASK (mode);
1041 val++, arg0 &= arg0 - 1;
1050 /* When zero-extending a CONST_INT, we need to know its
1052 gcc_assert (op_mode != VOIDmode);
1053 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1055 /* If we were really extending the mode,
1056 we would have to distinguish between zero-extension
1057 and sign-extension. */
1058 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1061 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1062 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1068 if (op_mode == VOIDmode)
1070 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1072 /* If we were really extending the mode,
1073 we would have to distinguish between zero-extension
1074 and sign-extension. */
1075 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1078 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1081 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1083 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1084 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1092 case FLOAT_TRUNCATE:
1102 return gen_int_mode (val, mode);
1105 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1106 for a DImode operation on a CONST_INT. */
1107 else if (GET_MODE (op) == VOIDmode
1108 && width <= HOST_BITS_PER_WIDE_INT * 2
1109 && (GET_CODE (op) == CONST_DOUBLE
1110 || GET_CODE (op) == CONST_INT))
1112 unsigned HOST_WIDE_INT l1, lv;
1113 HOST_WIDE_INT h1, hv;
1115 if (GET_CODE (op) == CONST_DOUBLE)
1116 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1118 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1128 neg_double (l1, h1, &lv, &hv);
1133 neg_double (l1, h1, &lv, &hv);
1145 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1148 lv = exact_log2 (l1 & -l1) + 1;
1154 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1155 - HOST_BITS_PER_WIDE_INT;
1157 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1158 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1159 lv = GET_MODE_BITSIZE (mode);
1165 lv = exact_log2 (l1 & -l1);
1167 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1168 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1169 lv = GET_MODE_BITSIZE (mode);
1192 /* This is just a change-of-mode, so do nothing. */
1197 gcc_assert (op_mode != VOIDmode);
1199 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1203 lv = l1 & GET_MODE_MASK (op_mode);
1207 if (op_mode == VOIDmode
1208 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1212 lv = l1 & GET_MODE_MASK (op_mode);
1213 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1214 && (lv & ((HOST_WIDE_INT) 1
1215 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1216 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1218 hv = HWI_SIGN_EXTEND (lv);
1229 return immed_double_const (lv, hv, mode);
1232 else if (GET_CODE (op) == CONST_DOUBLE
1233 && SCALAR_FLOAT_MODE_P (mode))
1235 REAL_VALUE_TYPE d, t;
1236 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1241 if (HONOR_SNANS (mode) && real_isnan (&d))
1243 real_sqrt (&t, mode, &d);
1247 d = REAL_VALUE_ABS (d);
1250 d = REAL_VALUE_NEGATE (d);
1252 case FLOAT_TRUNCATE:
1253 d = real_value_truncate (mode, d);
1256 /* All this does is change the mode. */
1259 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1266 real_to_target (tmp, &d, GET_MODE (op));
1267 for (i = 0; i < 4; i++)
1269 real_from_target (&d, tmp, mode);
1275 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1278 else if (GET_CODE (op) == CONST_DOUBLE
1279 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1280 && GET_MODE_CLASS (mode) == MODE_INT
1281 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1283 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1284 operators are intentionally left unspecified (to ease implementation
1285 by target backends), for consistency, this routine implements the
1286 same semantics for constant folding as used by the middle-end. */
1288 /* This was formerly used only for non-IEEE float.
1289 eggert@twinsun.com says it is safe for IEEE also. */
1290 HOST_WIDE_INT xh, xl, th, tl;
1291 REAL_VALUE_TYPE x, t;
1292 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1296 if (REAL_VALUE_ISNAN (x))
1299 /* Test against the signed upper bound. */
1300 if (width > HOST_BITS_PER_WIDE_INT)
1302 th = ((unsigned HOST_WIDE_INT) 1
1303 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1309 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1311 real_from_integer (&t, VOIDmode, tl, th, 0);
1312 if (REAL_VALUES_LESS (t, x))
1319 /* Test against the signed lower bound. */
1320 if (width > HOST_BITS_PER_WIDE_INT)
1322 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1328 tl = (HOST_WIDE_INT) -1 << (width - 1);
1330 real_from_integer (&t, VOIDmode, tl, th, 0);
1331 if (REAL_VALUES_LESS (x, t))
1337 REAL_VALUE_TO_INT (&xl, &xh, x);
1341 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1344 /* Test against the unsigned upper bound. */
1345 if (width == 2*HOST_BITS_PER_WIDE_INT)
1350 else if (width >= HOST_BITS_PER_WIDE_INT)
1352 th = ((unsigned HOST_WIDE_INT) 1
1353 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1359 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1361 real_from_integer (&t, VOIDmode, tl, th, 1);
1362 if (REAL_VALUES_LESS (t, x))
1369 REAL_VALUE_TO_INT (&xl, &xh, x);
1375 return immed_double_const (xl, xh, mode);
1381 /* Subroutine of simplify_binary_operation to simplify a commutative,
1382 associative binary operation CODE with result mode MODE, operating
1383 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1384 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1385 canonicalization is possible. */
1388 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1393 /* Linearize the operator to the left. */
1394 if (GET_CODE (op1) == code)
1396 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1397 if (GET_CODE (op0) == code)
1399 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1400 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1403 /* "a op (b op c)" becomes "(b op c) op a". */
1404 if (! swap_commutative_operands_p (op1, op0))
1405 return simplify_gen_binary (code, mode, op1, op0);
1412 if (GET_CODE (op0) == code)
1414 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1415 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1417 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1418 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1421 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1422 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1423 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1424 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1426 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1428 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1429 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1430 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1431 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1433 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1440 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1441 and OP1. Return 0 if no simplification is possible.
1443 Don't use this for relational operations such as EQ or LT.
1444 Use simplify_relational_operation instead. */
1446 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1449 rtx trueop0, trueop1;
1452 /* Relational operations don't work here. We must know the mode
1453 of the operands in order to do the comparison correctly.
1454 Assuming a full word can give incorrect results.
1455 Consider comparing 128 with -128 in QImode. */
1456 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1457 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1459 /* Make sure the constant is second. */
1460 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1461 && swap_commutative_operands_p (op0, op1))
1463 tem = op0, op0 = op1, op1 = tem;
1466 trueop0 = avoid_constant_pool_reference (op0);
1467 trueop1 = avoid_constant_pool_reference (op1);
1469 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1472 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1476 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1477 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1479 rtx tem, reversed, opleft, opright;
1481 unsigned int width = GET_MODE_BITSIZE (mode);
1483 /* Even if we can't compute a constant result,
1484 there are some cases worth simplifying. */
1489 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1490 when x is NaN, infinite, or finite and nonzero. They aren't
1491 when x is -0 and the rounding mode is not towards -infinity,
1492 since (-0) + 0 is then 0. */
1493 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1496 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1497 transformations are safe even for IEEE. */
1498 if (GET_CODE (op0) == NEG)
1499 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1500 else if (GET_CODE (op1) == NEG)
1501 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1503 /* (~a) + 1 -> -a */
1504 if (INTEGRAL_MODE_P (mode)
1505 && GET_CODE (op0) == NOT
1506 && trueop1 == const1_rtx)
1507 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1509 /* Handle both-operands-constant cases. We can only add
1510 CONST_INTs to constants since the sum of relocatable symbols
1511 can't be handled by most assemblers. Don't add CONST_INT
1512 to CONST_INT since overflow won't be computed properly if wider
1513 than HOST_BITS_PER_WIDE_INT. */
1515 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1516 && GET_CODE (op1) == CONST_INT)
1517 return plus_constant (op0, INTVAL (op1));
1518 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1519 && GET_CODE (op0) == CONST_INT)
1520 return plus_constant (op1, INTVAL (op0));
1522 /* See if this is something like X * C - X or vice versa or
1523 if the multiplication is written as a shift. If so, we can
1524 distribute and make a new multiply, shift, or maybe just
1525 have X (if C is 2 in the example above). But don't make
1526 something more expensive than we had before. */
1528 if (SCALAR_INT_MODE_P (mode))
1530 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1531 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1532 rtx lhs = op0, rhs = op1;
1534 if (GET_CODE (lhs) == NEG)
1538 lhs = XEXP (lhs, 0);
1540 else if (GET_CODE (lhs) == MULT
1541 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1543 coeff0l = INTVAL (XEXP (lhs, 1));
1544 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1545 lhs = XEXP (lhs, 0);
1547 else if (GET_CODE (lhs) == ASHIFT
1548 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1549 && INTVAL (XEXP (lhs, 1)) >= 0
1550 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1552 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1554 lhs = XEXP (lhs, 0);
1557 if (GET_CODE (rhs) == NEG)
1561 rhs = XEXP (rhs, 0);
1563 else if (GET_CODE (rhs) == MULT
1564 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1566 coeff1l = INTVAL (XEXP (rhs, 1));
1567 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1568 rhs = XEXP (rhs, 0);
1570 else if (GET_CODE (rhs) == ASHIFT
1571 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1572 && INTVAL (XEXP (rhs, 1)) >= 0
1573 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1575 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1577 rhs = XEXP (rhs, 0);
1580 if (rtx_equal_p (lhs, rhs))
1582 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1584 unsigned HOST_WIDE_INT l;
1587 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1588 coeff = immed_double_const (l, h, mode);
1590 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1591 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1596 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1597 if ((GET_CODE (op1) == CONST_INT
1598 || GET_CODE (op1) == CONST_DOUBLE)
1599 && GET_CODE (op0) == XOR
1600 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1601 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1602 && mode_signbit_p (mode, op1))
1603 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1604 simplify_gen_binary (XOR, mode, op1,
1607 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1608 if (GET_CODE (op0) == MULT
1609 && GET_CODE (XEXP (op0, 0)) == NEG)
1613 in1 = XEXP (XEXP (op0, 0), 0);
1614 in2 = XEXP (op0, 1);
1615 return simplify_gen_binary (MINUS, mode, op1,
1616 simplify_gen_binary (MULT, mode,
1620 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1621 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1623 if (COMPARISON_P (op0)
1624 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1625 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1626 && (reversed = reversed_comparison (op0, mode)))
1628 simplify_gen_unary (NEG, mode, reversed, mode);
1630 /* If one of the operands is a PLUS or a MINUS, see if we can
1631 simplify this by the associative law.
1632 Don't use the associative law for floating point.
1633 The inaccuracy makes it nonassociative,
1634 and subtle programs can break if operations are associated. */
1636 if (INTEGRAL_MODE_P (mode)
1637 && (plus_minus_operand_p (op0)
1638 || plus_minus_operand_p (op1))
1639 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1642 /* Reassociate floating point addition only when the user
1643 specifies unsafe math optimizations. */
1644 if (FLOAT_MODE_P (mode)
1645 && flag_unsafe_math_optimizations)
1647 tem = simplify_associative_operation (code, mode, op0, op1);
1655 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1656 using cc0, in which case we want to leave it as a COMPARE
1657 so we can distinguish it from a register-register-copy.
1659 In IEEE floating point, x-0 is not the same as x. */
1661 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1662 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1663 && trueop1 == CONST0_RTX (mode))
1667 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1668 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1669 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1670 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1672 rtx xop00 = XEXP (op0, 0);
1673 rtx xop10 = XEXP (op1, 0);
1676 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1678 if (REG_P (xop00) && REG_P (xop10)
1679 && GET_MODE (xop00) == GET_MODE (xop10)
1680 && REGNO (xop00) == REGNO (xop10)
1681 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1682 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1689 /* We can't assume x-x is 0 even with non-IEEE floating point,
1690 but since it is zero except in very strange circumstances, we
1691 will treat it as zero with -funsafe-math-optimizations. */
1692 if (rtx_equal_p (trueop0, trueop1)
1693 && ! side_effects_p (op0)
1694 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1695 return CONST0_RTX (mode);
1697 /* Change subtraction from zero into negation. (0 - x) is the
1698 same as -x when x is NaN, infinite, or finite and nonzero.
1699 But if the mode has signed zeros, and does not round towards
1700 -infinity, then 0 - 0 is 0, not -0. */
1701 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1702 return simplify_gen_unary (NEG, mode, op1, mode);
1704 /* (-1 - a) is ~a. */
1705 if (trueop0 == constm1_rtx)
1706 return simplify_gen_unary (NOT, mode, op1, mode);
1708 /* Subtracting 0 has no effect unless the mode has signed zeros
1709 and supports rounding towards -infinity. In such a case,
1711 if (!(HONOR_SIGNED_ZEROS (mode)
1712 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1713 && trueop1 == CONST0_RTX (mode))
1716 /* See if this is something like X * C - X or vice versa or
1717 if the multiplication is written as a shift. If so, we can
1718 distribute and make a new multiply, shift, or maybe just
1719 have X (if C is 2 in the example above). But don't make
1720 something more expensive than we had before. */
1722 if (SCALAR_INT_MODE_P (mode))
1724 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1725 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1726 rtx lhs = op0, rhs = op1;
1728 if (GET_CODE (lhs) == NEG)
1732 lhs = XEXP (lhs, 0);
1734 else if (GET_CODE (lhs) == MULT
1735 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1737 coeff0l = INTVAL (XEXP (lhs, 1));
1738 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1739 lhs = XEXP (lhs, 0);
1741 else if (GET_CODE (lhs) == ASHIFT
1742 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1743 && INTVAL (XEXP (lhs, 1)) >= 0
1744 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1746 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1748 lhs = XEXP (lhs, 0);
1751 if (GET_CODE (rhs) == NEG)
1755 rhs = XEXP (rhs, 0);
1757 else if (GET_CODE (rhs) == MULT
1758 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1760 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1761 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1762 rhs = XEXP (rhs, 0);
1764 else if (GET_CODE (rhs) == ASHIFT
1765 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1766 && INTVAL (XEXP (rhs, 1)) >= 0
1767 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1769 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1771 rhs = XEXP (rhs, 0);
1774 if (rtx_equal_p (lhs, rhs))
1776 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1778 unsigned HOST_WIDE_INT l;
1781 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1782 coeff = immed_double_const (l, h, mode);
1784 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1785 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1790 /* (a - (-b)) -> (a + b). True even for IEEE. */
1791 if (GET_CODE (op1) == NEG)
1792 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1794 /* (-x - c) may be simplified as (-c - x). */
1795 if (GET_CODE (op0) == NEG
1796 && (GET_CODE (op1) == CONST_INT
1797 || GET_CODE (op1) == CONST_DOUBLE))
1799 tem = simplify_unary_operation (NEG, mode, op1, mode);
1801 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1804 /* Don't let a relocatable value get a negative coeff. */
1805 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1806 return simplify_gen_binary (PLUS, mode,
1808 neg_const_int (mode, op1));
1810 /* (x - (x & y)) -> (x & ~y) */
1811 if (GET_CODE (op1) == AND)
1813 if (rtx_equal_p (op0, XEXP (op1, 0)))
1815 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1816 GET_MODE (XEXP (op1, 1)));
1817 return simplify_gen_binary (AND, mode, op0, tem);
1819 if (rtx_equal_p (op0, XEXP (op1, 1)))
1821 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1822 GET_MODE (XEXP (op1, 0)));
1823 return simplify_gen_binary (AND, mode, op0, tem);
1827 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1828 by reversing the comparison code if valid. */
1829 if (STORE_FLAG_VALUE == 1
1830 && trueop0 == const1_rtx
1831 && COMPARISON_P (op1)
1832 && (reversed = reversed_comparison (op1, mode)))
1835 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1836 if (GET_CODE (op1) == MULT
1837 && GET_CODE (XEXP (op1, 0)) == NEG)
1841 in1 = XEXP (XEXP (op1, 0), 0);
1842 in2 = XEXP (op1, 1);
1843 return simplify_gen_binary (PLUS, mode,
1844 simplify_gen_binary (MULT, mode,
1849 /* Canonicalize (minus (neg A) (mult B C)) to
1850 (minus (mult (neg B) C) A). */
1851 if (GET_CODE (op1) == MULT
1852 && GET_CODE (op0) == NEG)
1856 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1857 in2 = XEXP (op1, 1);
1858 return simplify_gen_binary (MINUS, mode,
1859 simplify_gen_binary (MULT, mode,
1864 /* If one of the operands is a PLUS or a MINUS, see if we can
1865 simplify this by the associative law. This will, for example,
1866 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1867 Don't use the associative law for floating point.
1868 The inaccuracy makes it nonassociative,
1869 and subtle programs can break if operations are associated. */
1871 if (INTEGRAL_MODE_P (mode)
1872 && (plus_minus_operand_p (op0)
1873 || plus_minus_operand_p (op1))
1874 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1879 if (trueop1 == constm1_rtx)
1880 return simplify_gen_unary (NEG, mode, op0, mode);
1882 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1883 x is NaN, since x * 0 is then also NaN. Nor is it valid
1884 when the mode has signed zeros, since multiplying a negative
1885 number by 0 will give -0, not 0. */
1886 if (!HONOR_NANS (mode)
1887 && !HONOR_SIGNED_ZEROS (mode)
1888 && trueop1 == CONST0_RTX (mode)
1889 && ! side_effects_p (op0))
1892 /* In IEEE floating point, x*1 is not equivalent to x for
1894 if (!HONOR_SNANS (mode)
1895 && trueop1 == CONST1_RTX (mode))
1898 /* Convert multiply by constant power of two into shift unless
1899 we are still generating RTL. This test is a kludge. */
1900 if (GET_CODE (trueop1) == CONST_INT
1901 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1902 /* If the mode is larger than the host word size, and the
1903 uppermost bit is set, then this isn't a power of two due
1904 to implicit sign extension. */
1905 && (width <= HOST_BITS_PER_WIDE_INT
1906 || val != HOST_BITS_PER_WIDE_INT - 1))
1907 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1909 /* Likewise for multipliers wider than a word. */
1910 else if (GET_CODE (trueop1) == CONST_DOUBLE
1911 && (GET_MODE (trueop1) == VOIDmode
1912 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1913 && GET_MODE (op0) == mode
1914 && CONST_DOUBLE_LOW (trueop1) == 0
1915 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1916 return simplify_gen_binary (ASHIFT, mode, op0,
1917 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1919 /* x*2 is x+x and x*(-1) is -x */
1920 if (GET_CODE (trueop1) == CONST_DOUBLE
1921 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1922 && GET_MODE (op0) == mode)
1925 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1927 if (REAL_VALUES_EQUAL (d, dconst2))
1928 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1930 if (REAL_VALUES_EQUAL (d, dconstm1))
1931 return simplify_gen_unary (NEG, mode, op0, mode);
1934 /* Reassociate multiplication, but for floating point MULTs
1935 only when the user specifies unsafe math optimizations. */
1936 if (! FLOAT_MODE_P (mode)
1937 || flag_unsafe_math_optimizations)
1939 tem = simplify_associative_operation (code, mode, op0, op1);
1946 if (trueop1 == const0_rtx)
1948 if (GET_CODE (trueop1) == CONST_INT
1949 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1950 == GET_MODE_MASK (mode)))
1952 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1954 /* A | (~A) -> -1 */
1955 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1956 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1957 && ! side_effects_p (op0)
1958 && SCALAR_INT_MODE_P (mode))
1961 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1962 if (GET_CODE (op1) == CONST_INT
1963 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1964 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1967 /* Convert (A & B) | A to A. */
1968 if (GET_CODE (op0) == AND
1969 && (rtx_equal_p (XEXP (op0, 0), op1)
1970 || rtx_equal_p (XEXP (op0, 1), op1))
1971 && ! side_effects_p (XEXP (op0, 0))
1972 && ! side_effects_p (XEXP (op0, 1)))
1975 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1976 mode size to (rotate A CX). */
1978 if (GET_CODE (op1) == ASHIFT
1979 || GET_CODE (op1) == SUBREG)
1990 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
1991 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
1992 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
1993 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1994 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
1995 == GET_MODE_BITSIZE (mode)))
1996 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
1998 /* Same, but for ashift that has been "simplified" to a wider mode
1999 by simplify_shift_const. */
2001 if (GET_CODE (opleft) == SUBREG
2002 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2003 && GET_CODE (opright) == LSHIFTRT
2004 && GET_CODE (XEXP (opright, 0)) == SUBREG
2005 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2006 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2007 && (GET_MODE_SIZE (GET_MODE (opleft))
2008 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2009 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2010 SUBREG_REG (XEXP (opright, 0)))
2011 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2012 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2013 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2014 == GET_MODE_BITSIZE (mode)))
2015 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2016 XEXP (SUBREG_REG (opleft), 1));
2018 /* If we have (ior (and (X C1) C2)), simplify this by making
2019 C1 as small as possible if C1 actually changes. */
2020 if (GET_CODE (op1) == CONST_INT
2021 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2022 || INTVAL (op1) > 0)
2023 && GET_CODE (op0) == AND
2024 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2025 && GET_CODE (op1) == CONST_INT
2026 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2027 return simplify_gen_binary (IOR, mode,
2029 (AND, mode, XEXP (op0, 0),
2030 GEN_INT (INTVAL (XEXP (op0, 1))
2034 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2035 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2036 the PLUS does not affect any of the bits in OP1: then we can do
2037 the IOR as a PLUS and we can associate. This is valid if OP1
2038 can be safely shifted left C bits. */
2039 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2040 && GET_CODE (XEXP (op0, 0)) == PLUS
2041 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2042 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2043 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2045 int count = INTVAL (XEXP (op0, 1));
2046 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2048 if (mask >> count == INTVAL (trueop1)
2049 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2050 return simplify_gen_binary (ASHIFTRT, mode,
2051 plus_constant (XEXP (op0, 0), mask),
2055 tem = simplify_associative_operation (code, mode, op0, op1);
2061 if (trueop1 == const0_rtx)
2063 if (GET_CODE (trueop1) == CONST_INT
2064 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2065 == GET_MODE_MASK (mode)))
2066 return simplify_gen_unary (NOT, mode, op0, mode);
2067 if (rtx_equal_p (trueop0, trueop1)
2068 && ! side_effects_p (op0)
2069 && GET_MODE_CLASS (mode) != MODE_CC)
2070 return CONST0_RTX (mode);
2072 /* Canonicalize XOR of the most significant bit to PLUS. */
2073 if ((GET_CODE (op1) == CONST_INT
2074 || GET_CODE (op1) == CONST_DOUBLE)
2075 && mode_signbit_p (mode, op1))
2076 return simplify_gen_binary (PLUS, mode, op0, op1);
2077 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2078 if ((GET_CODE (op1) == CONST_INT
2079 || GET_CODE (op1) == CONST_DOUBLE)
2080 && GET_CODE (op0) == PLUS
2081 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2082 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2083 && mode_signbit_p (mode, XEXP (op0, 1)))
2084 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2085 simplify_gen_binary (XOR, mode, op1,
2088 /* If we are XORing two things that have no bits in common,
2089 convert them into an IOR. This helps to detect rotation encoded
2090 using those methods and possibly other simplifications. */
2092 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2093 && (nonzero_bits (op0, mode)
2094 & nonzero_bits (op1, mode)) == 0)
2095 return (simplify_gen_binary (IOR, mode, op0, op1));
2097 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2098 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2101 int num_negated = 0;
2103 if (GET_CODE (op0) == NOT)
2104 num_negated++, op0 = XEXP (op0, 0);
2105 if (GET_CODE (op1) == NOT)
2106 num_negated++, op1 = XEXP (op1, 0);
2108 if (num_negated == 2)
2109 return simplify_gen_binary (XOR, mode, op0, op1);
2110 else if (num_negated == 1)
2111 return simplify_gen_unary (NOT, mode,
2112 simplify_gen_binary (XOR, mode, op0, op1),
2116 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2117 correspond to a machine insn or result in further simplifications
2118 if B is a constant. */
2120 if (GET_CODE (op0) == AND
2121 && rtx_equal_p (XEXP (op0, 1), op1)
2122 && ! side_effects_p (op1))
2123 return simplify_gen_binary (AND, mode,
2124 simplify_gen_unary (NOT, mode,
2125 XEXP (op0, 0), mode),
2128 else if (GET_CODE (op0) == AND
2129 && rtx_equal_p (XEXP (op0, 0), op1)
2130 && ! side_effects_p (op1))
2131 return simplify_gen_binary (AND, mode,
2132 simplify_gen_unary (NOT, mode,
2133 XEXP (op0, 1), mode),
2136 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2137 comparison if STORE_FLAG_VALUE is 1. */
2138 if (STORE_FLAG_VALUE == 1
2139 && trueop1 == const1_rtx
2140 && COMPARISON_P (op0)
2141 && (reversed = reversed_comparison (op0, mode)))
2144 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2145 is (lt foo (const_int 0)), so we can perform the above
2146 simplification if STORE_FLAG_VALUE is 1. */
2148 if (STORE_FLAG_VALUE == 1
2149 && trueop1 == const1_rtx
2150 && GET_CODE (op0) == LSHIFTRT
2151 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2152 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2153 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2155 /* (xor (comparison foo bar) (const_int sign-bit))
2156 when STORE_FLAG_VALUE is the sign bit. */
2157 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2158 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2159 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2160 && trueop1 == const_true_rtx
2161 && COMPARISON_P (op0)
2162 && (reversed = reversed_comparison (op0, mode)))
2167 tem = simplify_associative_operation (code, mode, op0, op1);
2173 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2175 /* If we are turning off bits already known off in OP0, we need
2177 if (GET_CODE (trueop1) == CONST_INT
2178 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2179 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2181 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2182 && GET_MODE_CLASS (mode) != MODE_CC)
2185 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2186 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2187 && ! side_effects_p (op0)
2188 && GET_MODE_CLASS (mode) != MODE_CC)
2189 return CONST0_RTX (mode);
2191 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2192 there are no nonzero bits of C outside of X's mode. */
2193 if ((GET_CODE (op0) == SIGN_EXTEND
2194 || GET_CODE (op0) == ZERO_EXTEND)
2195 && GET_CODE (trueop1) == CONST_INT
2196 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2197 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2198 & INTVAL (trueop1)) == 0)
2200 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2201 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2202 gen_int_mode (INTVAL (trueop1),
2204 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2207 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2208 insn (and may simplify more). */
2209 if (GET_CODE (op0) == XOR
2210 && rtx_equal_p (XEXP (op0, 0), op1)
2211 && ! side_effects_p (op1))
2212 return simplify_gen_binary (AND, mode,
2213 simplify_gen_unary (NOT, mode,
2214 XEXP (op0, 1), mode),
2217 if (GET_CODE (op0) == XOR
2218 && rtx_equal_p (XEXP (op0, 1), op1)
2219 && ! side_effects_p (op1))
2220 return simplify_gen_binary (AND, mode,
2221 simplify_gen_unary (NOT, mode,
2222 XEXP (op0, 0), mode),
2225 /* Similarly for (~(A ^ B)) & A. */
2226 if (GET_CODE (op0) == NOT
2227 && GET_CODE (XEXP (op0, 0)) == XOR
2228 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2229 && ! side_effects_p (op1))
2230 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2232 if (GET_CODE (op0) == NOT
2233 && GET_CODE (XEXP (op0, 0)) == XOR
2234 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2235 && ! side_effects_p (op1))
2236 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2238 /* Convert (A | B) & A to A. */
2239 if (GET_CODE (op0) == IOR
2240 && (rtx_equal_p (XEXP (op0, 0), op1)
2241 || rtx_equal_p (XEXP (op0, 1), op1))
2242 && ! side_effects_p (XEXP (op0, 0))
2243 && ! side_effects_p (XEXP (op0, 1)))
2246 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2247 ((A & N) + B) & M -> (A + B) & M
2248 Similarly if (N & M) == 0,
2249 ((A | N) + B) & M -> (A + B) & M
2250 and for - instead of + and/or ^ instead of |. */
2251 if (GET_CODE (trueop1) == CONST_INT
2252 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2253 && ~INTVAL (trueop1)
2254 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2255 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2260 pmop[0] = XEXP (op0, 0);
2261 pmop[1] = XEXP (op0, 1);
2263 for (which = 0; which < 2; which++)
2266 switch (GET_CODE (tem))
2269 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2270 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2271 == INTVAL (trueop1))
2272 pmop[which] = XEXP (tem, 0);
2276 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2277 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2278 pmop[which] = XEXP (tem, 0);
2285 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2287 tem = simplify_gen_binary (GET_CODE (op0), mode,
2289 return simplify_gen_binary (code, mode, tem, op1);
2292 tem = simplify_associative_operation (code, mode, op0, op1);
2298 /* 0/x is 0 (or x&0 if x has side-effects). */
2299 if (trueop0 == CONST0_RTX (mode))
2301 if (side_effects_p (op1))
2302 return simplify_gen_binary (AND, mode, op1, trueop0);
2306 if (trueop1 == CONST1_RTX (mode))
2307 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2308 /* Convert divide by power of two into shift. */
2309 if (GET_CODE (trueop1) == CONST_INT
2310 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2311 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2315 /* Handle floating point and integers separately. */
2316 if (SCALAR_FLOAT_MODE_P (mode))
2318 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2319 safe for modes with NaNs, since 0.0 / 0.0 will then be
2320 NaN rather than 0.0. Nor is it safe for modes with signed
2321 zeros, since dividing 0 by a negative number gives -0.0 */
2322 if (trueop0 == CONST0_RTX (mode)
2323 && !HONOR_NANS (mode)
2324 && !HONOR_SIGNED_ZEROS (mode)
2325 && ! side_effects_p (op1))
2328 if (trueop1 == CONST1_RTX (mode)
2329 && !HONOR_SNANS (mode))
2332 if (GET_CODE (trueop1) == CONST_DOUBLE
2333 && trueop1 != CONST0_RTX (mode))
2336 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2339 if (REAL_VALUES_EQUAL (d, dconstm1)
2340 && !HONOR_SNANS (mode))
2341 return simplify_gen_unary (NEG, mode, op0, mode);
2343 /* Change FP division by a constant into multiplication.
2344 Only do this with -funsafe-math-optimizations. */
2345 if (flag_unsafe_math_optimizations
2346 && !REAL_VALUES_EQUAL (d, dconst0))
2348 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2349 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2350 return simplify_gen_binary (MULT, mode, op0, tem);
2356 /* 0/x is 0 (or x&0 if x has side-effects). */
2357 if (trueop0 == CONST0_RTX (mode))
2359 if (side_effects_p (op1))
2360 return simplify_gen_binary (AND, mode, op1, trueop0);
2364 if (trueop1 == CONST1_RTX (mode))
2365 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2367 if (trueop1 == constm1_rtx)
2369 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2370 return simplify_gen_unary (NEG, mode, x, mode);
2376 /* 0%x is 0 (or x&0 if x has side-effects). */
2377 if (trueop0 == CONST0_RTX (mode))
2379 if (side_effects_p (op1))
2380 return simplify_gen_binary (AND, mode, op1, trueop0);
2383 /* x%1 is 0 (of x&0 if x has side-effects). */
2384 if (trueop1 == CONST1_RTX (mode))
2386 if (side_effects_p (op0))
2387 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2388 return CONST0_RTX (mode);
2390 /* Implement modulus by power of two as AND. */
2391 if (GET_CODE (trueop1) == CONST_INT
2392 && exact_log2 (INTVAL (trueop1)) > 0)
2393 return simplify_gen_binary (AND, mode, op0,
2394 GEN_INT (INTVAL (op1) - 1));
2398 /* 0%x is 0 (or x&0 if x has side-effects). */
2399 if (trueop0 == CONST0_RTX (mode))
2401 if (side_effects_p (op1))
2402 return simplify_gen_binary (AND, mode, op1, trueop0);
2405 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2406 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2408 if (side_effects_p (op0))
2409 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2410 return CONST0_RTX (mode);
2417 /* Rotating ~0 always results in ~0. */
2418 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2419 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2420 && ! side_effects_p (op1))
2423 /* Fall through.... */
2428 if (trueop1 == CONST0_RTX (mode))
2430 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2435 if (width <= HOST_BITS_PER_WIDE_INT
2436 && GET_CODE (trueop1) == CONST_INT
2437 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2438 && ! side_effects_p (op0))
2440 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2442 tem = simplify_associative_operation (code, mode, op0, op1);
2448 if (width <= HOST_BITS_PER_WIDE_INT
2449 && GET_CODE (trueop1) == CONST_INT
2450 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2451 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2452 && ! side_effects_p (op0))
2454 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2456 tem = simplify_associative_operation (code, mode, op0, op1);
2462 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2464 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2466 tem = simplify_associative_operation (code, mode, op0, op1);
2472 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2474 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2476 tem = simplify_associative_operation (code, mode, op0, op1);
2485 /* ??? There are simplifications that can be done. */
2489 if (!VECTOR_MODE_P (mode))
2491 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2492 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2493 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2494 gcc_assert (XVECLEN (trueop1, 0) == 1);
2495 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2497 if (GET_CODE (trueop0) == CONST_VECTOR)
2498 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2503 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2504 gcc_assert (GET_MODE_INNER (mode)
2505 == GET_MODE_INNER (GET_MODE (trueop0)));
2506 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2508 if (GET_CODE (trueop0) == CONST_VECTOR)
2510 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2511 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2512 rtvec v = rtvec_alloc (n_elts);
2515 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2516 for (i = 0; i < n_elts; i++)
2518 rtx x = XVECEXP (trueop1, 0, i);
2520 gcc_assert (GET_CODE (x) == CONST_INT);
2521 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2525 return gen_rtx_CONST_VECTOR (mode, v);
2529 if (XVECLEN (trueop1, 0) == 1
2530 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2531 && GET_CODE (trueop0) == VEC_CONCAT)
2534 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2536 /* Try to find the element in the VEC_CONCAT. */
2537 while (GET_MODE (vec) != mode
2538 && GET_CODE (vec) == VEC_CONCAT)
2540 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2541 if (offset < vec_size)
2542 vec = XEXP (vec, 0);
2546 vec = XEXP (vec, 1);
2548 vec = avoid_constant_pool_reference (vec);
2551 if (GET_MODE (vec) == mode)
2558 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2559 ? GET_MODE (trueop0)
2560 : GET_MODE_INNER (mode));
2561 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2562 ? GET_MODE (trueop1)
2563 : GET_MODE_INNER (mode));
2565 gcc_assert (VECTOR_MODE_P (mode));
2566 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2567 == GET_MODE_SIZE (mode));
2569 if (VECTOR_MODE_P (op0_mode))
2570 gcc_assert (GET_MODE_INNER (mode)
2571 == GET_MODE_INNER (op0_mode));
2573 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2575 if (VECTOR_MODE_P (op1_mode))
2576 gcc_assert (GET_MODE_INNER (mode)
2577 == GET_MODE_INNER (op1_mode));
2579 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2581 if ((GET_CODE (trueop0) == CONST_VECTOR
2582 || GET_CODE (trueop0) == CONST_INT
2583 || GET_CODE (trueop0) == CONST_DOUBLE)
2584 && (GET_CODE (trueop1) == CONST_VECTOR
2585 || GET_CODE (trueop1) == CONST_INT
2586 || GET_CODE (trueop1) == CONST_DOUBLE))
2588 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2589 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2590 rtvec v = rtvec_alloc (n_elts);
2592 unsigned in_n_elts = 1;
2594 if (VECTOR_MODE_P (op0_mode))
2595 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2596 for (i = 0; i < n_elts; i++)
2600 if (!VECTOR_MODE_P (op0_mode))
2601 RTVEC_ELT (v, i) = trueop0;
2603 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2607 if (!VECTOR_MODE_P (op1_mode))
2608 RTVEC_ELT (v, i) = trueop1;
2610 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2615 return gen_rtx_CONST_VECTOR (mode, v);
2628 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2631 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2633 unsigned int width = GET_MODE_BITSIZE (mode);
2635 if (VECTOR_MODE_P (mode)
2636 && code != VEC_CONCAT
2637 && GET_CODE (op0) == CONST_VECTOR
2638 && GET_CODE (op1) == CONST_VECTOR)
2640 unsigned n_elts = GET_MODE_NUNITS (mode);
2641 enum machine_mode op0mode = GET_MODE (op0);
2642 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2643 enum machine_mode op1mode = GET_MODE (op1);
2644 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2645 rtvec v = rtvec_alloc (n_elts);
2648 gcc_assert (op0_n_elts == n_elts);
2649 gcc_assert (op1_n_elts == n_elts);
2650 for (i = 0; i < n_elts; i++)
2652 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2653 CONST_VECTOR_ELT (op0, i),
2654 CONST_VECTOR_ELT (op1, i));
2657 RTVEC_ELT (v, i) = x;
2660 return gen_rtx_CONST_VECTOR (mode, v);
2663 if (VECTOR_MODE_P (mode)
2664 && code == VEC_CONCAT
2665 && CONSTANT_P (op0) && CONSTANT_P (op1))
2667 unsigned n_elts = GET_MODE_NUNITS (mode);
2668 rtvec v = rtvec_alloc (n_elts);
2670 gcc_assert (n_elts >= 2);
2673 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2674 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2676 RTVEC_ELT (v, 0) = op0;
2677 RTVEC_ELT (v, 1) = op1;
2681 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2682 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2685 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2686 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2687 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2689 for (i = 0; i < op0_n_elts; ++i)
2690 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2691 for (i = 0; i < op1_n_elts; ++i)
2692 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2695 return gen_rtx_CONST_VECTOR (mode, v);
2698 if (SCALAR_FLOAT_MODE_P (mode)
2699 && GET_CODE (op0) == CONST_DOUBLE
2700 && GET_CODE (op1) == CONST_DOUBLE
2701 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2712 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2714 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2716 for (i = 0; i < 4; i++)
2733 real_from_target (&r, tmp0, mode);
2734 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2738 REAL_VALUE_TYPE f0, f1, value, result;
2741 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2742 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2743 real_convert (&f0, mode, &f0);
2744 real_convert (&f1, mode, &f1);
2746 if (HONOR_SNANS (mode)
2747 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2751 && REAL_VALUES_EQUAL (f1, dconst0)
2752 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2755 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2756 && flag_trapping_math
2757 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2759 int s0 = REAL_VALUE_NEGATIVE (f0);
2760 int s1 = REAL_VALUE_NEGATIVE (f1);
2765 /* Inf + -Inf = NaN plus exception. */
2770 /* Inf - Inf = NaN plus exception. */
2775 /* Inf / Inf = NaN plus exception. */
2782 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2783 && flag_trapping_math
2784 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2785 || (REAL_VALUE_ISINF (f1)
2786 && REAL_VALUES_EQUAL (f0, dconst0))))
2787 /* Inf * 0 = NaN plus exception. */
2790 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2792 real_convert (&result, mode, &value);
2794 /* Don't constant fold this floating point operation if
2795 the result has overflowed and flag_trapping_math. */
2797 if (flag_trapping_math
2798 && MODE_HAS_INFINITIES (mode)
2799 && REAL_VALUE_ISINF (result)
2800 && !REAL_VALUE_ISINF (f0)
2801 && !REAL_VALUE_ISINF (f1))
2802 /* Overflow plus exception. */
2805 /* Don't constant fold this floating point operation if the
2806 result may dependent upon the run-time rounding mode and
2807 flag_rounding_math is set, or if GCC's software emulation
2808 is unable to accurately represent the result. */
2810 if ((flag_rounding_math
2811 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2812 && !flag_unsafe_math_optimizations))
2813 && (inexact || !real_identical (&result, &value)))
2816 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2820 /* We can fold some multi-word operations. */
2821 if (GET_MODE_CLASS (mode) == MODE_INT
2822 && width == HOST_BITS_PER_WIDE_INT * 2
2823 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2824 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2826 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2827 HOST_WIDE_INT h1, h2, hv, ht;
2829 if (GET_CODE (op0) == CONST_DOUBLE)
2830 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2832 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2834 if (GET_CODE (op1) == CONST_DOUBLE)
2835 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2837 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2842 /* A - B == A + (-B). */
2843 neg_double (l2, h2, &lv, &hv);
2846 /* Fall through.... */
2849 add_double (l1, h1, l2, h2, &lv, &hv);
2853 mul_double (l1, h1, l2, h2, &lv, &hv);
2857 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2858 &lv, &hv, <, &ht))
2863 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2864 <, &ht, &lv, &hv))
2869 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2870 &lv, &hv, <, &ht))
2875 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2876 <, &ht, &lv, &hv))
2881 lv = l1 & l2, hv = h1 & h2;
2885 lv = l1 | l2, hv = h1 | h2;
2889 lv = l1 ^ l2, hv = h1 ^ h2;
2895 && ((unsigned HOST_WIDE_INT) l1
2896 < (unsigned HOST_WIDE_INT) l2)))
2905 && ((unsigned HOST_WIDE_INT) l1
2906 > (unsigned HOST_WIDE_INT) l2)))
2913 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2915 && ((unsigned HOST_WIDE_INT) l1
2916 < (unsigned HOST_WIDE_INT) l2)))
2923 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2925 && ((unsigned HOST_WIDE_INT) l1
2926 > (unsigned HOST_WIDE_INT) l2)))
2932 case LSHIFTRT: case ASHIFTRT:
2934 case ROTATE: case ROTATERT:
2935 if (SHIFT_COUNT_TRUNCATED)
2936 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2938 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2941 if (code == LSHIFTRT || code == ASHIFTRT)
2942 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2944 else if (code == ASHIFT)
2945 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2946 else if (code == ROTATE)
2947 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2948 else /* code == ROTATERT */
2949 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2956 return immed_double_const (lv, hv, mode);
2959 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2960 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2962 /* Get the integer argument values in two forms:
2963 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2965 arg0 = INTVAL (op0);
2966 arg1 = INTVAL (op1);
2968 if (width < HOST_BITS_PER_WIDE_INT)
2970 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2971 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2974 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2975 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2978 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2979 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2987 /* Compute the value of the arithmetic. */
2992 val = arg0s + arg1s;
2996 val = arg0s - arg1s;
3000 val = arg0s * arg1s;
3005 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3008 val = arg0s / arg1s;
3013 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3016 val = arg0s % arg1s;
3021 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3024 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3029 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3032 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3050 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3051 the value is in range. We can't return any old value for
3052 out-of-range arguments because either the middle-end (via
3053 shift_truncation_mask) or the back-end might be relying on
3054 target-specific knowledge. Nor can we rely on
3055 shift_truncation_mask, since the shift might not be part of an
3056 ashlM3, lshrM3 or ashrM3 instruction. */
3057 if (SHIFT_COUNT_TRUNCATED)
3058 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3059 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3062 val = (code == ASHIFT
3063 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3064 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3066 /* Sign-extend the result for arithmetic right shifts. */
3067 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3068 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3076 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3077 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3085 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3086 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3090 /* Do nothing here. */
3094 val = arg0s <= arg1s ? arg0s : arg1s;
3098 val = ((unsigned HOST_WIDE_INT) arg0
3099 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3103 val = arg0s > arg1s ? arg0s : arg1s;
3107 val = ((unsigned HOST_WIDE_INT) arg0
3108 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3116 /* ??? There are simplifications that can be done. */
3123 return gen_int_mode (val, mode);
3131 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3134 Rather than test for specific case, we do this by a brute-force method
3135 and do all possible simplifications until no more changes occur. Then
3136 we rebuild the operation. */
3138 struct simplify_plus_minus_op_data
3146 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3148 const struct simplify_plus_minus_op_data *d1 = p1;
3149 const struct simplify_plus_minus_op_data *d2 = p2;
3152 result = (commutative_operand_precedence (d2->op)
3153 - commutative_operand_precedence (d1->op));
3156 return d1->ix - d2->ix;
3160 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3163 struct simplify_plus_minus_op_data ops[8];
3165 int n_ops = 2, input_ops = 2;
3166 int first, changed, canonicalized = 0;
3169 memset (ops, 0, sizeof ops);
3171 /* Set up the two operands and then expand them until nothing has been
3172 changed. If we run out of room in our array, give up; this should
3173 almost never happen. */
3178 ops[1].neg = (code == MINUS);
3184 for (i = 0; i < n_ops; i++)
3186 rtx this_op = ops[i].op;
3187 int this_neg = ops[i].neg;
3188 enum rtx_code this_code = GET_CODE (this_op);
3197 ops[n_ops].op = XEXP (this_op, 1);
3198 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3201 ops[i].op = XEXP (this_op, 0);
3204 canonicalized |= this_neg;
3208 ops[i].op = XEXP (this_op, 0);
3209 ops[i].neg = ! this_neg;
3216 && GET_CODE (XEXP (this_op, 0)) == PLUS
3217 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3218 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3220 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3221 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3222 ops[n_ops].neg = this_neg;
3230 /* ~a -> (-a - 1) */
3233 ops[n_ops].op = constm1_rtx;
3234 ops[n_ops++].neg = this_neg;
3235 ops[i].op = XEXP (this_op, 0);
3236 ops[i].neg = !this_neg;
3245 ops[i].op = neg_const_int (mode, this_op);
3259 gcc_assert (n_ops >= 2);
3262 int n_constants = 0;
3264 for (i = 0; i < n_ops; i++)
3265 if (GET_CODE (ops[i].op) == CONST_INT)
3268 if (n_constants <= 1)
3272 /* If we only have two operands, we can avoid the loops. */
3275 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3278 /* Get the two operands. Be careful with the order, especially for
3279 the cases where code == MINUS. */
3280 if (ops[0].neg && ops[1].neg)
3282 lhs = gen_rtx_NEG (mode, ops[0].op);
3285 else if (ops[0].neg)
3296 return simplify_const_binary_operation (code, mode, lhs, rhs);
3299 /* Now simplify each pair of operands until nothing changes. The first
3300 time through just simplify constants against each other. */
3307 for (i = 0; i < n_ops - 1; i++)
3308 for (j = i + 1; j < n_ops; j++)
3310 rtx lhs = ops[i].op, rhs = ops[j].op;
3311 int lneg = ops[i].neg, rneg = ops[j].neg;
3313 if (lhs != 0 && rhs != 0
3314 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3316 enum rtx_code ncode = PLUS;
3322 tem = lhs, lhs = rhs, rhs = tem;
3324 else if (swap_commutative_operands_p (lhs, rhs))
3325 tem = lhs, lhs = rhs, rhs = tem;
3327 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3328 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3330 rtx tem_lhs, tem_rhs;
3332 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3333 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3334 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3336 if (tem && !CONSTANT_P (tem))
3337 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3340 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3342 /* Reject "simplifications" that just wrap the two
3343 arguments in a CONST. Failure to do so can result
3344 in infinite recursion with simplify_binary_operation
3345 when it calls us to simplify CONST operations. */
3347 && ! (GET_CODE (tem) == CONST
3348 && GET_CODE (XEXP (tem, 0)) == ncode
3349 && XEXP (XEXP (tem, 0), 0) == lhs
3350 && XEXP (XEXP (tem, 0), 1) == rhs)
3351 /* Don't allow -x + -1 -> ~x simplifications in the
3352 first pass. This allows us the chance to combine
3353 the -1 with other constants. */
3355 && GET_CODE (tem) == NOT
3356 && XEXP (tem, 0) == rhs))
3359 if (GET_CODE (tem) == NEG)
3360 tem = XEXP (tem, 0), lneg = !lneg;
3361 if (GET_CODE (tem) == CONST_INT && lneg)
3362 tem = neg_const_int (mode, tem), lneg = 0;
3366 ops[j].op = NULL_RTX;
3376 /* Pack all the operands to the lower-numbered entries. */
3377 for (i = 0, j = 0; j < n_ops; j++)
3381 /* Stabilize sort. */
3387 /* Sort the operations based on swap_commutative_operands_p. */
3388 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3390 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3392 && GET_CODE (ops[1].op) == CONST_INT
3393 && CONSTANT_P (ops[0].op)
3395 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3397 /* We suppressed creation of trivial CONST expressions in the
3398 combination loop to avoid recursion. Create one manually now.
3399 The combination loop should have ensured that there is exactly
3400 one CONST_INT, and the sort will have ensured that it is last
3401 in the array and that any other constant will be next-to-last. */
3404 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3405 && CONSTANT_P (ops[n_ops - 2].op))
3407 rtx value = ops[n_ops - 1].op;
3408 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3409 value = neg_const_int (mode, value);
3410 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3414 /* Put a non-negated operand first, if possible. */
3416 for (i = 0; i < n_ops && ops[i].neg; i++)
3419 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3428 /* Now make the result by performing the requested operations. */
3430 for (i = 1; i < n_ops; i++)
3431 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3432 mode, result, ops[i].op);
3437 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3439 plus_minus_operand_p (rtx x)
3441 return GET_CODE (x) == PLUS
3442 || GET_CODE (x) == MINUS
3443 || (GET_CODE (x) == CONST
3444 && GET_CODE (XEXP (x, 0)) == PLUS
3445 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3446 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3449 /* Like simplify_binary_operation except used for relational operators.
3450 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3451 not also be VOIDmode.
3453 CMP_MODE specifies in which mode the comparison is done in, so it is
3454 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3455 the operands or, if both are VOIDmode, the operands are compared in
3456 "infinite precision". */
3458 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3459 enum machine_mode cmp_mode, rtx op0, rtx op1)
3461 rtx tem, trueop0, trueop1;
3463 if (cmp_mode == VOIDmode)
3464 cmp_mode = GET_MODE (op0);
3465 if (cmp_mode == VOIDmode)
3466 cmp_mode = GET_MODE (op1);
3468 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3471 if (SCALAR_FLOAT_MODE_P (mode))
3473 if (tem == const0_rtx)
3474 return CONST0_RTX (mode);
3475 #ifdef FLOAT_STORE_FLAG_VALUE
3477 REAL_VALUE_TYPE val;
3478 val = FLOAT_STORE_FLAG_VALUE (mode);
3479 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3485 if (VECTOR_MODE_P (mode))
3487 if (tem == const0_rtx)
3488 return CONST0_RTX (mode);
3489 #ifdef VECTOR_STORE_FLAG_VALUE
3494 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3495 if (val == NULL_RTX)
3497 if (val == const1_rtx)
3498 return CONST1_RTX (mode);
3500 units = GET_MODE_NUNITS (mode);
3501 v = rtvec_alloc (units);
3502 for (i = 0; i < units; i++)
3503 RTVEC_ELT (v, i) = val;
3504 return gen_rtx_raw_CONST_VECTOR (mode, v);
3514 /* For the following tests, ensure const0_rtx is op1. */
3515 if (swap_commutative_operands_p (op0, op1)
3516 || (op0 == const0_rtx && op1 != const0_rtx))
3517 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3519 /* If op0 is a compare, extract the comparison arguments from it. */
3520 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3521 return simplify_relational_operation (code, mode, VOIDmode,
3522 XEXP (op0, 0), XEXP (op0, 1));
3524 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3528 trueop0 = avoid_constant_pool_reference (op0);
3529 trueop1 = avoid_constant_pool_reference (op1);
3530 return simplify_relational_operation_1 (code, mode, cmp_mode,
3534 /* This part of simplify_relational_operation is only used when CMP_MODE
3535 is not in class MODE_CC (i.e. it is a real comparison).
3537 MODE is the mode of the result, while CMP_MODE specifies in which
3538 mode the comparison is done in, so it is the mode of the operands. */
3541 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3542 enum machine_mode cmp_mode, rtx op0, rtx op1)
3544 enum rtx_code op0code = GET_CODE (op0);
3546 if (GET_CODE (op1) == CONST_INT)
3548 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3550 /* If op0 is a comparison, extract the comparison arguments
3554 if (GET_MODE (op0) == mode)
3555 return simplify_rtx (op0);
3557 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3558 XEXP (op0, 0), XEXP (op0, 1));
3560 else if (code == EQ)
3562 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3563 if (new_code != UNKNOWN)
3564 return simplify_gen_relational (new_code, mode, VOIDmode,
3565 XEXP (op0, 0), XEXP (op0, 1));
3570 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3571 if ((code == EQ || code == NE)
3572 && (op0code == PLUS || op0code == MINUS)
3574 && CONSTANT_P (XEXP (op0, 1))
3575 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3577 rtx x = XEXP (op0, 0);
3578 rtx c = XEXP (op0, 1);
3580 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3582 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3585 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3586 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3588 && op1 == const0_rtx
3589 && GET_MODE_CLASS (mode) == MODE_INT
3590 && cmp_mode != VOIDmode
3591 /* ??? Work-around BImode bugs in the ia64 backend. */
3593 && cmp_mode != BImode
3594 && nonzero_bits (op0, cmp_mode) == 1
3595 && STORE_FLAG_VALUE == 1)
3596 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3597 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3598 : lowpart_subreg (mode, op0, cmp_mode);
3600 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3601 if ((code == EQ || code == NE)
3602 && op1 == const0_rtx
3604 return simplify_gen_relational (code, mode, cmp_mode,
3605 XEXP (op0, 0), XEXP (op0, 1));
3607 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3608 if ((code == EQ || code == NE)
3610 && rtx_equal_p (XEXP (op0, 0), op1)
3611 && !side_effects_p (XEXP (op0, 0)))
3612 return simplify_gen_relational (code, mode, cmp_mode,
3613 XEXP (op0, 1), const0_rtx);
3615 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3616 if ((code == EQ || code == NE)
3618 && rtx_equal_p (XEXP (op0, 1), op1)
3619 && !side_effects_p (XEXP (op0, 1)))
3620 return simplify_gen_relational (code, mode, cmp_mode,
3621 XEXP (op0, 0), const0_rtx);
3623 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3624 if ((code == EQ || code == NE)
3626 && (GET_CODE (op1) == CONST_INT
3627 || GET_CODE (op1) == CONST_DOUBLE)
3628 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3629 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3630 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3631 simplify_gen_binary (XOR, cmp_mode,
3632 XEXP (op0, 1), op1));
3637 /* Check if the given comparison (done in the given MODE) is actually a
3638 tautology or a contradiction.
3639 If no simplification is possible, this function returns zero.
3640 Otherwise, it returns either const_true_rtx or const0_rtx. */
3643 simplify_const_relational_operation (enum rtx_code code,
3644 enum machine_mode mode,
3647 int equal, op0lt, op0ltu, op1lt, op1ltu;
3652 gcc_assert (mode != VOIDmode
3653 || (GET_MODE (op0) == VOIDmode
3654 && GET_MODE (op1) == VOIDmode));
3656 /* If op0 is a compare, extract the comparison arguments from it. */
3657 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3659 op1 = XEXP (op0, 1);
3660 op0 = XEXP (op0, 0);
3662 if (GET_MODE (op0) != VOIDmode)
3663 mode = GET_MODE (op0);
3664 else if (GET_MODE (op1) != VOIDmode)
3665 mode = GET_MODE (op1);
3670 /* We can't simplify MODE_CC values since we don't know what the
3671 actual comparison is. */
3672 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3675 /* Make sure the constant is second. */
3676 if (swap_commutative_operands_p (op0, op1))
3678 tem = op0, op0 = op1, op1 = tem;
3679 code = swap_condition (code);
3682 trueop0 = avoid_constant_pool_reference (op0);
3683 trueop1 = avoid_constant_pool_reference (op1);
3685 /* For integer comparisons of A and B maybe we can simplify A - B and can
3686 then simplify a comparison of that with zero. If A and B are both either
3687 a register or a CONST_INT, this can't help; testing for these cases will
3688 prevent infinite recursion here and speed things up.
3690 If CODE is an unsigned comparison, then we can never do this optimization,
3691 because it gives an incorrect result if the subtraction wraps around zero.
3692 ANSI C defines unsigned operations such that they never overflow, and
3693 thus such cases can not be ignored; but we cannot do it even for
3694 signed comparisons for languages such as Java, so test flag_wrapv. */
3696 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3697 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3698 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3699 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3700 /* We cannot do this for == or != if tem is a nonzero address. */
3701 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3702 && code != GTU && code != GEU && code != LTU && code != LEU)
3703 return simplify_const_relational_operation (signed_condition (code),
3704 mode, tem, const0_rtx);
3706 if (flag_unsafe_math_optimizations && code == ORDERED)
3707 return const_true_rtx;
3709 if (flag_unsafe_math_optimizations && code == UNORDERED)
3712 /* For modes without NaNs, if the two operands are equal, we know the
3713 result except if they have side-effects. */
3714 if (! HONOR_NANS (GET_MODE (trueop0))
3715 && rtx_equal_p (trueop0, trueop1)
3716 && ! side_effects_p (trueop0))
3717 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3719 /* If the operands are floating-point constants, see if we can fold
3721 else if (GET_CODE (trueop0) == CONST_DOUBLE
3722 && GET_CODE (trueop1) == CONST_DOUBLE
3723 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3725 REAL_VALUE_TYPE d0, d1;
3727 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3728 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3730 /* Comparisons are unordered iff at least one of the values is NaN. */
3731 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3741 return const_true_rtx;
3754 equal = REAL_VALUES_EQUAL (d0, d1);
3755 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3756 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3759 /* Otherwise, see if the operands are both integers. */
3760 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3761 && (GET_CODE (trueop0) == CONST_DOUBLE
3762 || GET_CODE (trueop0) == CONST_INT)
3763 && (GET_CODE (trueop1) == CONST_DOUBLE
3764 || GET_CODE (trueop1) == CONST_INT))
3766 int width = GET_MODE_BITSIZE (mode);
3767 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3768 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3770 /* Get the two words comprising each integer constant. */
3771 if (GET_CODE (trueop0) == CONST_DOUBLE)
3773 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3774 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3778 l0u = l0s = INTVAL (trueop0);
3779 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3782 if (GET_CODE (trueop1) == CONST_DOUBLE)
3784 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3785 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3789 l1u = l1s = INTVAL (trueop1);
3790 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3793 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3794 we have to sign or zero-extend the values. */
3795 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3797 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3798 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3800 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3801 l0s |= ((HOST_WIDE_INT) (-1) << width);
3803 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3804 l1s |= ((HOST_WIDE_INT) (-1) << width);
3806 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3807 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3809 equal = (h0u == h1u && l0u == l1u);
3810 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3811 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3812 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3813 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3816 /* Otherwise, there are some code-specific tests we can make. */
3819 /* Optimize comparisons with upper and lower bounds. */
3820 if (SCALAR_INT_MODE_P (mode)
3821 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3834 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3841 /* x >= min is always true. */
3842 if (rtx_equal_p (trueop1, mmin))
3843 tem = const_true_rtx;
3849 /* x <= max is always true. */
3850 if (rtx_equal_p (trueop1, mmax))
3851 tem = const_true_rtx;
3856 /* x > max is always false. */
3857 if (rtx_equal_p (trueop1, mmax))
3863 /* x < min is always false. */
3864 if (rtx_equal_p (trueop1, mmin))
3871 if (tem == const0_rtx
3872 || tem == const_true_rtx)
3879 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3884 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3885 return const_true_rtx;
3889 /* Optimize abs(x) < 0.0. */
3890 if (trueop1 == CONST0_RTX (mode)
3891 && !HONOR_SNANS (mode)
3892 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3894 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3896 if (GET_CODE (tem) == ABS)
3902 /* Optimize abs(x) >= 0.0. */
3903 if (trueop1 == CONST0_RTX (mode)
3904 && !HONOR_NANS (mode)
3905 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3907 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3909 if (GET_CODE (tem) == ABS)
3910 return const_true_rtx;
3915 /* Optimize ! (abs(x) < 0.0). */
3916 if (trueop1 == CONST0_RTX (mode))
3918 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3920 if (GET_CODE (tem) == ABS)
3921 return const_true_rtx;
3932 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3938 return equal ? const_true_rtx : const0_rtx;
3941 return ! equal ? const_true_rtx : const0_rtx;
3944 return op0lt ? const_true_rtx : const0_rtx;
3947 return op1lt ? const_true_rtx : const0_rtx;
3949 return op0ltu ? const_true_rtx : const0_rtx;
3951 return op1ltu ? const_true_rtx : const0_rtx;
3954 return equal || op0lt ? const_true_rtx : const0_rtx;
3957 return equal || op1lt ? const_true_rtx : const0_rtx;
3959 return equal || op0ltu ? const_true_rtx : const0_rtx;
3961 return equal || op1ltu ? const_true_rtx : const0_rtx;
3963 return const_true_rtx;
3971 /* Simplify CODE, an operation with result mode MODE and three operands,
3972 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3973 a constant. Return 0 if no simplifications is possible. */
3976 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3977 enum machine_mode op0_mode, rtx op0, rtx op1,
3980 unsigned int width = GET_MODE_BITSIZE (mode);
3982 /* VOIDmode means "infinite" precision. */
3984 width = HOST_BITS_PER_WIDE_INT;
3990 if (GET_CODE (op0) == CONST_INT
3991 && GET_CODE (op1) == CONST_INT
3992 && GET_CODE (op2) == CONST_INT
3993 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3994 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3996 /* Extracting a bit-field from a constant */
3997 HOST_WIDE_INT val = INTVAL (op0);
3999 if (BITS_BIG_ENDIAN)
4000 val >>= (GET_MODE_BITSIZE (op0_mode)
4001 - INTVAL (op2) - INTVAL (op1));
4003 val >>= INTVAL (op2);
4005 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4007 /* First zero-extend. */
4008 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4009 /* If desired, propagate sign bit. */
4010 if (code == SIGN_EXTRACT
4011 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4012 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4015 /* Clear the bits that don't belong in our mode,
4016 unless they and our sign bit are all one.
4017 So we get either a reasonable negative value or a reasonable
4018 unsigned value for this mode. */
4019 if (width < HOST_BITS_PER_WIDE_INT
4020 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4021 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4022 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4024 return gen_int_mode (val, mode);
4029 if (GET_CODE (op0) == CONST_INT)
4030 return op0 != const0_rtx ? op1 : op2;
4032 /* Convert c ? a : a into "a". */
4033 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4036 /* Convert a != b ? a : b into "a". */
4037 if (GET_CODE (op0) == NE
4038 && ! side_effects_p (op0)
4039 && ! HONOR_NANS (mode)
4040 && ! HONOR_SIGNED_ZEROS (mode)
4041 && ((rtx_equal_p (XEXP (op0, 0), op1)
4042 && rtx_equal_p (XEXP (op0, 1), op2))
4043 || (rtx_equal_p (XEXP (op0, 0), op2)
4044 && rtx_equal_p (XEXP (op0, 1), op1))))
4047 /* Convert a == b ? a : b into "b". */
4048 if (GET_CODE (op0) == EQ
4049 && ! side_effects_p (op0)
4050 && ! HONOR_NANS (mode)
4051 && ! HONOR_SIGNED_ZEROS (mode)
4052 && ((rtx_equal_p (XEXP (op0, 0), op1)
4053 && rtx_equal_p (XEXP (op0, 1), op2))
4054 || (rtx_equal_p (XEXP (op0, 0), op2)
4055 && rtx_equal_p (XEXP (op0, 1), op1))))
4058 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4060 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4061 ? GET_MODE (XEXP (op0, 1))
4062 : GET_MODE (XEXP (op0, 0)));
4065 /* Look for happy constants in op1 and op2. */
4066 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4068 HOST_WIDE_INT t = INTVAL (op1);
4069 HOST_WIDE_INT f = INTVAL (op2);
4071 if (t == STORE_FLAG_VALUE && f == 0)
4072 code = GET_CODE (op0);
4073 else if (t == 0 && f == STORE_FLAG_VALUE)
4076 tmp = reversed_comparison_code (op0, NULL_RTX);
4084 return simplify_gen_relational (code, mode, cmp_mode,
4085 XEXP (op0, 0), XEXP (op0, 1));
4088 if (cmp_mode == VOIDmode)
4089 cmp_mode = op0_mode;
4090 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4091 cmp_mode, XEXP (op0, 0),
4094 /* See if any simplifications were possible. */
4097 if (GET_CODE (temp) == CONST_INT)
4098 return temp == const0_rtx ? op2 : op1;
4100 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4106 gcc_assert (GET_MODE (op0) == mode);
4107 gcc_assert (GET_MODE (op1) == mode);
4108 gcc_assert (VECTOR_MODE_P (mode));
4109 op2 = avoid_constant_pool_reference (op2);
4110 if (GET_CODE (op2) == CONST_INT)
4112 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4113 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4114 int mask = (1 << n_elts) - 1;
4116 if (!(INTVAL (op2) & mask))
4118 if ((INTVAL (op2) & mask) == mask)
4121 op0 = avoid_constant_pool_reference (op0);
4122 op1 = avoid_constant_pool_reference (op1);
4123 if (GET_CODE (op0) == CONST_VECTOR
4124 && GET_CODE (op1) == CONST_VECTOR)
4126 rtvec v = rtvec_alloc (n_elts);
4129 for (i = 0; i < n_elts; i++)
4130 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4131 ? CONST_VECTOR_ELT (op0, i)
4132 : CONST_VECTOR_ELT (op1, i));
4133 return gen_rtx_CONST_VECTOR (mode, v);
4145 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4146 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4148 Works by unpacking OP into a collection of 8-bit values
4149 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4150 and then repacking them again for OUTERMODE. */
4153 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4154 enum machine_mode innermode, unsigned int byte)
4156 /* We support up to 512-bit values (for V8DFmode). */
4160 value_mask = (1 << value_bit) - 1
4162 unsigned char value[max_bitsize / value_bit];
4171 rtvec result_v = NULL;
4172 enum mode_class outer_class;
4173 enum machine_mode outer_submode;
4175 /* Some ports misuse CCmode. */
4176 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4179 /* We have no way to represent a complex constant at the rtl level. */
4180 if (COMPLEX_MODE_P (outermode))
4183 /* Unpack the value. */
4185 if (GET_CODE (op) == CONST_VECTOR)
4187 num_elem = CONST_VECTOR_NUNITS (op);
4188 elems = &CONST_VECTOR_ELT (op, 0);
4189 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4195 elem_bitsize = max_bitsize;
4197 /* If this asserts, it is too complicated; reducing value_bit may help. */
4198 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4199 /* I don't know how to handle endianness of sub-units. */
4200 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4202 for (elem = 0; elem < num_elem; elem++)
4205 rtx el = elems[elem];
4207 /* Vectors are kept in target memory order. (This is probably
4210 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4211 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4213 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4214 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4215 unsigned bytele = (subword_byte % UNITS_PER_WORD
4216 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4217 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4220 switch (GET_CODE (el))
4224 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4226 *vp++ = INTVAL (el) >> i;
4227 /* CONST_INTs are always logically sign-extended. */
4228 for (; i < elem_bitsize; i += value_bit)
4229 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4233 if (GET_MODE (el) == VOIDmode)
4235 /* If this triggers, someone should have generated a
4236 CONST_INT instead. */
4237 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4239 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4240 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4241 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4244 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4247 /* It shouldn't matter what's done here, so fill it with
4249 for (; i < elem_bitsize; i += value_bit)
4254 long tmp[max_bitsize / 32];
4255 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4257 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4258 gcc_assert (bitsize <= elem_bitsize);
4259 gcc_assert (bitsize % value_bit == 0);
4261 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4264 /* real_to_target produces its result in words affected by
4265 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4266 and use WORDS_BIG_ENDIAN instead; see the documentation
4267 of SUBREG in rtl.texi. */
4268 for (i = 0; i < bitsize; i += value_bit)
4271 if (WORDS_BIG_ENDIAN)
4272 ibase = bitsize - 1 - i;
4275 *vp++ = tmp[ibase / 32] >> i % 32;
4278 /* It shouldn't matter what's done here, so fill it with
4280 for (; i < elem_bitsize; i += value_bit)
4290 /* Now, pick the right byte to start with. */
4291 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4292 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4293 will already have offset 0. */
4294 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4296 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4298 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4299 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4300 byte = (subword_byte % UNITS_PER_WORD
4301 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4304 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4305 so if it's become negative it will instead be very large.) */
4306 gcc_assert (byte < GET_MODE_SIZE (innermode));
4308 /* Convert from bytes to chunks of size value_bit. */
4309 value_start = byte * (BITS_PER_UNIT / value_bit);
4311 /* Re-pack the value. */
4313 if (VECTOR_MODE_P (outermode))
4315 num_elem = GET_MODE_NUNITS (outermode);
4316 result_v = rtvec_alloc (num_elem);
4317 elems = &RTVEC_ELT (result_v, 0);
4318 outer_submode = GET_MODE_INNER (outermode);
4324 outer_submode = outermode;
4327 outer_class = GET_MODE_CLASS (outer_submode);
4328 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4330 gcc_assert (elem_bitsize % value_bit == 0);
4331 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4333 for (elem = 0; elem < num_elem; elem++)
4337 /* Vectors are stored in target memory order. (This is probably
4340 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4341 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4343 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4344 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4345 unsigned bytele = (subword_byte % UNITS_PER_WORD
4346 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4347 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4350 switch (outer_class)
4353 case MODE_PARTIAL_INT:
4355 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4358 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4360 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4361 for (; i < elem_bitsize; i += value_bit)
4362 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4363 << (i - HOST_BITS_PER_WIDE_INT));
4365 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4367 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4368 elems[elem] = gen_int_mode (lo, outer_submode);
4369 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4370 elems[elem] = immed_double_const (lo, hi, outer_submode);
4377 case MODE_DECIMAL_FLOAT:
4380 long tmp[max_bitsize / 32];
4382 /* real_from_target wants its input in words affected by
4383 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4384 and use WORDS_BIG_ENDIAN instead; see the documentation
4385 of SUBREG in rtl.texi. */
4386 for (i = 0; i < max_bitsize / 32; i++)
4388 for (i = 0; i < elem_bitsize; i += value_bit)
4391 if (WORDS_BIG_ENDIAN)
4392 ibase = elem_bitsize - 1 - i;
4395 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4398 real_from_target (&r, tmp, outer_submode);
4399 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4407 if (VECTOR_MODE_P (outermode))
4408 return gen_rtx_CONST_VECTOR (outermode, result_v);
4413 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4414 Return 0 if no simplifications are possible. */
4416 simplify_subreg (enum machine_mode outermode, rtx op,
4417 enum machine_mode innermode, unsigned int byte)
4419 /* Little bit of sanity checking. */
4420 gcc_assert (innermode != VOIDmode);
4421 gcc_assert (outermode != VOIDmode);
4422 gcc_assert (innermode != BLKmode);
4423 gcc_assert (outermode != BLKmode);
4425 gcc_assert (GET_MODE (op) == innermode
4426 || GET_MODE (op) == VOIDmode);
4428 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4429 gcc_assert (byte < GET_MODE_SIZE (innermode));
4431 if (outermode == innermode && !byte)
4434 if (GET_CODE (op) == CONST_INT
4435 || GET_CODE (op) == CONST_DOUBLE
4436 || GET_CODE (op) == CONST_VECTOR)
4437 return simplify_immed_subreg (outermode, op, innermode, byte);
4439 /* Changing mode twice with SUBREG => just change it once,
4440 or not at all if changing back op starting mode. */
4441 if (GET_CODE (op) == SUBREG)
4443 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4444 int final_offset = byte + SUBREG_BYTE (op);
4447 if (outermode == innermostmode
4448 && byte == 0 && SUBREG_BYTE (op) == 0)
4449 return SUBREG_REG (op);
4451 /* The SUBREG_BYTE represents offset, as if the value were stored
4452 in memory. Irritating exception is paradoxical subreg, where
4453 we define SUBREG_BYTE to be 0. On big endian machines, this
4454 value should be negative. For a moment, undo this exception. */
4455 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4457 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4458 if (WORDS_BIG_ENDIAN)
4459 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4460 if (BYTES_BIG_ENDIAN)
4461 final_offset += difference % UNITS_PER_WORD;
4463 if (SUBREG_BYTE (op) == 0
4464 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4466 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4467 if (WORDS_BIG_ENDIAN)
4468 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4469 if (BYTES_BIG_ENDIAN)
4470 final_offset += difference % UNITS_PER_WORD;
4473 /* See whether resulting subreg will be paradoxical. */
4474 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4476 /* In nonparadoxical subregs we can't handle negative offsets. */
4477 if (final_offset < 0)
4479 /* Bail out in case resulting subreg would be incorrect. */
4480 if (final_offset % GET_MODE_SIZE (outermode)
4481 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4487 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4489 /* In paradoxical subreg, see if we are still looking on lower part.
4490 If so, our SUBREG_BYTE will be 0. */
4491 if (WORDS_BIG_ENDIAN)
4492 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4493 if (BYTES_BIG_ENDIAN)
4494 offset += difference % UNITS_PER_WORD;
4495 if (offset == final_offset)
4501 /* Recurse for further possible simplifications. */
4502 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4506 if (validate_subreg (outermode, innermostmode,
4507 SUBREG_REG (op), final_offset))
4508 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4512 /* Merge implicit and explicit truncations. */
4514 if (GET_CODE (op) == TRUNCATE
4515 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4516 && subreg_lowpart_offset (outermode, innermode) == byte)
4517 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4518 GET_MODE (XEXP (op, 0)));
4520 /* SUBREG of a hard register => just change the register number
4521 and/or mode. If the hard register is not valid in that mode,
4522 suppress this simplification. If the hard register is the stack,
4523 frame, or argument pointer, leave this as a SUBREG. */
4526 && REGNO (op) < FIRST_PSEUDO_REGISTER
4527 #ifdef CANNOT_CHANGE_MODE_CLASS
4528 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4529 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4530 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4532 && ((reload_completed && !frame_pointer_needed)
4533 || (REGNO (op) != FRAME_POINTER_REGNUM
4534 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4535 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4538 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4539 && REGNO (op) != ARG_POINTER_REGNUM
4541 && REGNO (op) != STACK_POINTER_REGNUM
4542 && subreg_offset_representable_p (REGNO (op), innermode,
4545 unsigned int regno = REGNO (op);
4546 unsigned int final_regno
4547 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4549 /* ??? We do allow it if the current REG is not valid for
4550 its mode. This is a kludge to work around how float/complex
4551 arguments are passed on 32-bit SPARC and should be fixed. */
4552 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4553 || ! HARD_REGNO_MODE_OK (regno, innermode))
4555 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
4557 /* Propagate original regno. We don't have any way to specify
4558 the offset inside original regno, so do so only for lowpart.
4559 The information is used only by alias analysis that can not
4560 grog partial register anyway. */
4562 if (subreg_lowpart_offset (outermode, innermode) == byte)
4563 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4568 /* If we have a SUBREG of a register that we are replacing and we are
4569 replacing it with a MEM, make a new MEM and try replacing the
4570 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4571 or if we would be widening it. */
4574 && ! mode_dependent_address_p (XEXP (op, 0))
4575 /* Allow splitting of volatile memory references in case we don't
4576 have instruction to move the whole thing. */
4577 && (! MEM_VOLATILE_P (op)
4578 || ! have_insn_for (SET, innermode))
4579 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4580 return adjust_address_nv (op, outermode, byte);
4582 /* Handle complex values represented as CONCAT
4583 of real and imaginary part. */
4584 if (GET_CODE (op) == CONCAT)
4586 unsigned int inner_size, final_offset;
4589 inner_size = GET_MODE_UNIT_SIZE (innermode);
4590 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4591 final_offset = byte % inner_size;
4592 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4595 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4598 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4599 return gen_rtx_SUBREG (outermode, part, final_offset);
4603 /* Optimize SUBREG truncations of zero and sign extended values. */
4604 if ((GET_CODE (op) == ZERO_EXTEND
4605 || GET_CODE (op) == SIGN_EXTEND)
4606 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4608 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4610 /* If we're requesting the lowpart of a zero or sign extension,
4611 there are three possibilities. If the outermode is the same
4612 as the origmode, we can omit both the extension and the subreg.
4613 If the outermode is not larger than the origmode, we can apply
4614 the truncation without the extension. Finally, if the outermode
4615 is larger than the origmode, but both are integer modes, we
4616 can just extend to the appropriate mode. */
4619 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4620 if (outermode == origmode)
4621 return XEXP (op, 0);
4622 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4623 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4624 subreg_lowpart_offset (outermode,
4626 if (SCALAR_INT_MODE_P (outermode))
4627 return simplify_gen_unary (GET_CODE (op), outermode,
4628 XEXP (op, 0), origmode);
4631 /* A SUBREG resulting from a zero extension may fold to zero if
4632 it extracts higher bits that the ZERO_EXTEND's source bits. */
4633 if (GET_CODE (op) == ZERO_EXTEND
4634 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4635 return CONST0_RTX (outermode);
4638 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4639 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4640 the outer subreg is effectively a truncation to the original mode. */
4641 if ((GET_CODE (op) == LSHIFTRT
4642 || GET_CODE (op) == ASHIFTRT)
4643 && SCALAR_INT_MODE_P (outermode)
4644 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4645 to avoid the possibility that an outer LSHIFTRT shifts by more
4646 than the sign extension's sign_bit_copies and introduces zeros
4647 into the high bits of the result. */
4648 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4649 && GET_CODE (XEXP (op, 1)) == CONST_INT
4650 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4651 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4652 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4653 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4654 return simplify_gen_binary (ASHIFTRT, outermode,
4655 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4657 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4658 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4659 the outer subreg is effectively a truncation to the original mode. */
4660 if ((GET_CODE (op) == LSHIFTRT
4661 || GET_CODE (op) == ASHIFTRT)
4662 && SCALAR_INT_MODE_P (outermode)
4663 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4664 && GET_CODE (XEXP (op, 1)) == CONST_INT
4665 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4666 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4667 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4668 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4669 return simplify_gen_binary (LSHIFTRT, outermode,
4670 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4672 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4673 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4674 the outer subreg is effectively a truncation to the original mode. */
4675 if (GET_CODE (op) == ASHIFT
4676 && SCALAR_INT_MODE_P (outermode)
4677 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4678 && GET_CODE (XEXP (op, 1)) == CONST_INT
4679 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4680 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4681 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4682 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4683 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4684 return simplify_gen_binary (ASHIFT, outermode,
4685 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4690 /* Make a SUBREG operation or equivalent if it folds. */
4693 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4694 enum machine_mode innermode, unsigned int byte)
4698 newx = simplify_subreg (outermode, op, innermode, byte);
4702 if (GET_CODE (op) == SUBREG
4703 || GET_CODE (op) == CONCAT
4704 || GET_MODE (op) == VOIDmode)
4707 if (validate_subreg (outermode, innermode, op, byte))
4708 return gen_rtx_SUBREG (outermode, op, byte);
4713 /* Simplify X, an rtx expression.
4715 Return the simplified expression or NULL if no simplifications
4718 This is the preferred entry point into the simplification routines;
4719 however, we still allow passes to call the more specific routines.
4721 Right now GCC has three (yes, three) major bodies of RTL simplification
4722 code that need to be unified.
4724 1. fold_rtx in cse.c. This code uses various CSE specific
4725 information to aid in RTL simplification.
4727 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4728 it uses combine specific information to aid in RTL
4731 3. The routines in this file.
4734 Long term we want to only have one body of simplification code; to
4735 get to that state I recommend the following steps:
4737 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4738 which are not pass dependent state into these routines.
4740 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4741 use this routine whenever possible.
4743 3. Allow for pass dependent state to be provided to these
4744 routines and add simplifications based on the pass dependent
4745 state. Remove code from cse.c & combine.c that becomes
4748 It will take time, but ultimately the compiler will be easier to
4749 maintain and improve. It's totally silly that when we add a
4750 simplification that it needs to be added to 4 places (3 for RTL
4751 simplification and 1 for tree simplification. */
4754 simplify_rtx (rtx x)
4756 enum rtx_code code = GET_CODE (x);
4757 enum machine_mode mode = GET_MODE (x);
4759 switch (GET_RTX_CLASS (code))
4762 return simplify_unary_operation (code, mode,
4763 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4764 case RTX_COMM_ARITH:
4765 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4766 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4768 /* Fall through.... */
4771 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4774 case RTX_BITFIELD_OPS:
4775 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4776 XEXP (x, 0), XEXP (x, 1),
4780 case RTX_COMM_COMPARE:
4781 return simplify_relational_operation (code, mode,
4782 ((GET_MODE (XEXP (x, 0))
4784 ? GET_MODE (XEXP (x, 0))
4785 : GET_MODE (XEXP (x, 1))),
4791 return simplify_gen_subreg (mode, SUBREG_REG (x),
4792 GET_MODE (SUBREG_REG (x)),
4799 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4800 if (GET_CODE (XEXP (x, 0)) == HIGH
4801 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))