1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code))
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
284 return simplify_gen_unary (code, mode, op0, op_mode);
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return simplify_gen_binary (code, mode, op0, op1);
295 case RTX_COMM_COMPARE:
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
306 case RTX_BITFIELD_OPS:
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
319 /* The only case we try to handle is a SUBREG. */
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
328 return op0 ? op0 : x;
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
375 if (GET_CODE (op) == CONST)
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
497 in2 = in1; in1 = tem;
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
559 if (GET_CODE (op) == ASHIFT)
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
593 if (STORE_FLAG_VALUE == 1)
594 return simplify_gen_binary (ASHIFTRT, mode, XEXP (op, 0),
595 GEN_INT (GET_MODE_BITSIZE (mode) - 1));
596 else if (STORE_FLAG_VALUE == -1)
597 return simplify_gen_binary (LSHIFTRT, mode, XEXP (op, 0),
598 GEN_INT (GET_MODE_BITSIZE (mode) - 1));
603 /* We can't handle truncation to a partial integer mode here
604 because we don't know the real bitsize of the partial
606 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
609 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
610 if ((GET_CODE (op) == SIGN_EXTEND
611 || GET_CODE (op) == ZERO_EXTEND)
612 && GET_MODE (XEXP (op, 0)) == mode)
615 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
616 (OP:SI foo:SI) if OP is NEG or ABS. */
617 if ((GET_CODE (op) == ABS
618 || GET_CODE (op) == NEG)
619 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
620 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
621 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
622 return simplify_gen_unary (GET_CODE (op), mode,
623 XEXP (XEXP (op, 0), 0), mode);
625 /* (truncate:A (subreg:B (truncate:C X) 0)) is
627 if (GET_CODE (op) == SUBREG
628 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
629 && subreg_lowpart_p (op))
630 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
631 GET_MODE (XEXP (SUBREG_REG (op), 0)));
633 /* If we know that the value is already truncated, we can
634 replace the TRUNCATE with a SUBREG. Note that this is also
635 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
636 modes we just have to apply a different definition for
637 truncation. But don't do this for an (LSHIFTRT (MULT ...))
638 since this will cause problems with the umulXi3_highpart
640 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
641 GET_MODE_BITSIZE (GET_MODE (op)))
642 ? (num_sign_bit_copies (op, GET_MODE (op))
643 >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1))
644 : truncated_to_mode (mode, op))
645 && ! (GET_CODE (op) == LSHIFTRT
646 && GET_CODE (XEXP (op, 0)) == MULT))
647 return rtl_hooks.gen_lowpart_no_emit (mode, op);
649 /* A truncate of a comparison can be replaced with a subreg if
650 STORE_FLAG_VALUE permits. This is like the previous test,
651 but it works even if the comparison is done in a mode larger
652 than HOST_BITS_PER_WIDE_INT. */
653 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
655 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
656 return rtl_hooks.gen_lowpart_no_emit (mode, op);
660 if (DECIMAL_FLOAT_MODE_P (mode))
663 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
664 if (GET_CODE (op) == FLOAT_EXTEND
665 && GET_MODE (XEXP (op, 0)) == mode)
668 /* (float_truncate:SF (float_truncate:DF foo:XF))
669 = (float_truncate:SF foo:XF).
670 This may eliminate double rounding, so it is unsafe.
672 (float_truncate:SF (float_extend:XF foo:DF))
673 = (float_truncate:SF foo:DF).
675 (float_truncate:DF (float_extend:XF foo:SF))
676 = (float_extend:SF foo:DF). */
677 if ((GET_CODE (op) == FLOAT_TRUNCATE
678 && flag_unsafe_math_optimizations)
679 || GET_CODE (op) == FLOAT_EXTEND)
680 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
682 > GET_MODE_SIZE (mode)
683 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
687 /* (float_truncate (float x)) is (float x) */
688 if (GET_CODE (op) == FLOAT
689 && (flag_unsafe_math_optimizations
690 || ((unsigned)significand_size (GET_MODE (op))
691 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
692 - num_sign_bit_copies (XEXP (op, 0),
693 GET_MODE (XEXP (op, 0)))))))
694 return simplify_gen_unary (FLOAT, mode,
696 GET_MODE (XEXP (op, 0)));
698 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
699 (OP:SF foo:SF) if OP is NEG or ABS. */
700 if ((GET_CODE (op) == ABS
701 || GET_CODE (op) == NEG)
702 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
703 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
704 return simplify_gen_unary (GET_CODE (op), mode,
705 XEXP (XEXP (op, 0), 0), mode);
707 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
708 is (float_truncate:SF x). */
709 if (GET_CODE (op) == SUBREG
710 && subreg_lowpart_p (op)
711 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
712 return SUBREG_REG (op);
716 if (DECIMAL_FLOAT_MODE_P (mode))
719 /* (float_extend (float_extend x)) is (float_extend x)
721 (float_extend (float x)) is (float x) assuming that double
722 rounding can't happen.
724 if (GET_CODE (op) == FLOAT_EXTEND
725 || (GET_CODE (op) == FLOAT
726 && ((unsigned)significand_size (GET_MODE (op))
727 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
728 - num_sign_bit_copies (XEXP (op, 0),
729 GET_MODE (XEXP (op, 0)))))))
730 return simplify_gen_unary (GET_CODE (op), mode,
732 GET_MODE (XEXP (op, 0)));
737 /* (abs (neg <foo>)) -> (abs <foo>) */
738 if (GET_CODE (op) == NEG)
739 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
740 GET_MODE (XEXP (op, 0)));
742 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
744 if (GET_MODE (op) == VOIDmode)
747 /* If operand is something known to be positive, ignore the ABS. */
748 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
749 || ((GET_MODE_BITSIZE (GET_MODE (op))
750 <= HOST_BITS_PER_WIDE_INT)
751 && ((nonzero_bits (op, GET_MODE (op))
753 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
757 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
758 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
759 return gen_rtx_NEG (mode, op);
764 /* (ffs (*_extend <X>)) = (ffs <X>) */
765 if (GET_CODE (op) == SIGN_EXTEND
766 || GET_CODE (op) == ZERO_EXTEND)
767 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
768 GET_MODE (XEXP (op, 0)));
773 /* (pop* (zero_extend <X>)) = (pop* <X>) */
774 if (GET_CODE (op) == ZERO_EXTEND)
775 return simplify_gen_unary (code, mode, XEXP (op, 0),
776 GET_MODE (XEXP (op, 0)));
780 /* (float (sign_extend <X>)) = (float <X>). */
781 if (GET_CODE (op) == SIGN_EXTEND)
782 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
783 GET_MODE (XEXP (op, 0)));
787 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
788 becomes just the MINUS if its mode is MODE. This allows
789 folding switch statements on machines using casesi (such as
791 if (GET_CODE (op) == TRUNCATE
792 && GET_MODE (XEXP (op, 0)) == mode
793 && GET_CODE (XEXP (op, 0)) == MINUS
794 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
795 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
798 /* Check for a sign extension of a subreg of a promoted
799 variable, where the promotion is sign-extended, and the
800 target mode is the same as the variable's promotion. */
801 if (GET_CODE (op) == SUBREG
802 && SUBREG_PROMOTED_VAR_P (op)
803 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
804 && GET_MODE (XEXP (op, 0)) == mode)
807 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
808 if (! POINTERS_EXTEND_UNSIGNED
809 && mode == Pmode && GET_MODE (op) == ptr_mode
811 || (GET_CODE (op) == SUBREG
812 && REG_P (SUBREG_REG (op))
813 && REG_POINTER (SUBREG_REG (op))
814 && GET_MODE (SUBREG_REG (op)) == Pmode)))
815 return convert_memory_address (Pmode, op);
820 /* Check for a zero extension of a subreg of a promoted
821 variable, where the promotion is zero-extended, and the
822 target mode is the same as the variable's promotion. */
823 if (GET_CODE (op) == SUBREG
824 && SUBREG_PROMOTED_VAR_P (op)
825 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
826 && GET_MODE (XEXP (op, 0)) == mode)
829 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
830 if (POINTERS_EXTEND_UNSIGNED > 0
831 && mode == Pmode && GET_MODE (op) == ptr_mode
833 || (GET_CODE (op) == SUBREG
834 && REG_P (SUBREG_REG (op))
835 && REG_POINTER (SUBREG_REG (op))
836 && GET_MODE (SUBREG_REG (op)) == Pmode)))
837 return convert_memory_address (Pmode, op);
848 /* Try to compute the value of a unary operation CODE whose output mode is to
849 be MODE with input operand OP whose mode was originally OP_MODE.
850 Return zero if the value cannot be computed. */
852 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
853 rtx op, enum machine_mode op_mode)
855 unsigned int width = GET_MODE_BITSIZE (mode);
857 if (code == VEC_DUPLICATE)
859 gcc_assert (VECTOR_MODE_P (mode));
860 if (GET_MODE (op) != VOIDmode)
862 if (!VECTOR_MODE_P (GET_MODE (op)))
863 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
865 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
868 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
869 || GET_CODE (op) == CONST_VECTOR)
871 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
872 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
873 rtvec v = rtvec_alloc (n_elts);
876 if (GET_CODE (op) != CONST_VECTOR)
877 for (i = 0; i < n_elts; i++)
878 RTVEC_ELT (v, i) = op;
881 enum machine_mode inmode = GET_MODE (op);
882 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
883 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
885 gcc_assert (in_n_elts < n_elts);
886 gcc_assert ((n_elts % in_n_elts) == 0);
887 for (i = 0; i < n_elts; i++)
888 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
890 return gen_rtx_CONST_VECTOR (mode, v);
894 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
896 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
897 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
898 enum machine_mode opmode = GET_MODE (op);
899 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
900 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
901 rtvec v = rtvec_alloc (n_elts);
904 gcc_assert (op_n_elts == n_elts);
905 for (i = 0; i < n_elts; i++)
907 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
908 CONST_VECTOR_ELT (op, i),
909 GET_MODE_INNER (opmode));
912 RTVEC_ELT (v, i) = x;
914 return gen_rtx_CONST_VECTOR (mode, v);
917 /* The order of these tests is critical so that, for example, we don't
918 check the wrong mode (input vs. output) for a conversion operation,
919 such as FIX. At some point, this should be simplified. */
921 if (code == FLOAT && GET_MODE (op) == VOIDmode
922 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
924 HOST_WIDE_INT hv, lv;
927 if (GET_CODE (op) == CONST_INT)
928 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
930 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
932 REAL_VALUE_FROM_INT (d, lv, hv, mode);
933 d = real_value_truncate (mode, d);
934 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
936 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
937 && (GET_CODE (op) == CONST_DOUBLE
938 || GET_CODE (op) == CONST_INT))
940 HOST_WIDE_INT hv, lv;
943 if (GET_CODE (op) == CONST_INT)
944 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
946 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
948 if (op_mode == VOIDmode)
950 /* We don't know how to interpret negative-looking numbers in
951 this case, so don't try to fold those. */
955 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
958 hv = 0, lv &= GET_MODE_MASK (op_mode);
960 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
961 d = real_value_truncate (mode, d);
962 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
965 if (GET_CODE (op) == CONST_INT
966 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
968 HOST_WIDE_INT arg0 = INTVAL (op);
982 val = (arg0 >= 0 ? arg0 : - arg0);
986 /* Don't use ffs here. Instead, get low order bit and then its
987 number. If arg0 is zero, this will return 0, as desired. */
988 arg0 &= GET_MODE_MASK (mode);
989 val = exact_log2 (arg0 & (- arg0)) + 1;
993 arg0 &= GET_MODE_MASK (mode);
994 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
997 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1001 arg0 &= GET_MODE_MASK (mode);
1004 /* Even if the value at zero is undefined, we have to come
1005 up with some replacement. Seems good enough. */
1006 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1007 val = GET_MODE_BITSIZE (mode);
1010 val = exact_log2 (arg0 & -arg0);
1014 arg0 &= GET_MODE_MASK (mode);
1017 val++, arg0 &= arg0 - 1;
1021 arg0 &= GET_MODE_MASK (mode);
1024 val++, arg0 &= arg0 - 1;
1033 /* When zero-extending a CONST_INT, we need to know its
1035 gcc_assert (op_mode != VOIDmode);
1036 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1038 /* If we were really extending the mode,
1039 we would have to distinguish between zero-extension
1040 and sign-extension. */
1041 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1044 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1045 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1051 if (op_mode == VOIDmode)
1053 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1055 /* If we were really extending the mode,
1056 we would have to distinguish between zero-extension
1057 and sign-extension. */
1058 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1061 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1064 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1066 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1067 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1075 case FLOAT_TRUNCATE:
1084 return gen_int_mode (val, mode);
1087 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1088 for a DImode operation on a CONST_INT. */
1089 else if (GET_MODE (op) == VOIDmode
1090 && width <= HOST_BITS_PER_WIDE_INT * 2
1091 && (GET_CODE (op) == CONST_DOUBLE
1092 || GET_CODE (op) == CONST_INT))
1094 unsigned HOST_WIDE_INT l1, lv;
1095 HOST_WIDE_INT h1, hv;
1097 if (GET_CODE (op) == CONST_DOUBLE)
1098 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1100 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1110 neg_double (l1, h1, &lv, &hv);
1115 neg_double (l1, h1, &lv, &hv);
1127 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1130 lv = exact_log2 (l1 & -l1) + 1;
1136 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1137 - HOST_BITS_PER_WIDE_INT;
1139 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1140 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1141 lv = GET_MODE_BITSIZE (mode);
1147 lv = exact_log2 (l1 & -l1);
1149 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1150 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1151 lv = GET_MODE_BITSIZE (mode);
1174 /* This is just a change-of-mode, so do nothing. */
1179 gcc_assert (op_mode != VOIDmode);
1181 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1185 lv = l1 & GET_MODE_MASK (op_mode);
1189 if (op_mode == VOIDmode
1190 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1194 lv = l1 & GET_MODE_MASK (op_mode);
1195 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1196 && (lv & ((HOST_WIDE_INT) 1
1197 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1198 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1200 hv = HWI_SIGN_EXTEND (lv);
1211 return immed_double_const (lv, hv, mode);
1214 else if (GET_CODE (op) == CONST_DOUBLE
1215 && SCALAR_FLOAT_MODE_P (mode))
1217 REAL_VALUE_TYPE d, t;
1218 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1223 if (HONOR_SNANS (mode) && real_isnan (&d))
1225 real_sqrt (&t, mode, &d);
1229 d = REAL_VALUE_ABS (d);
1232 d = REAL_VALUE_NEGATE (d);
1234 case FLOAT_TRUNCATE:
1235 d = real_value_truncate (mode, d);
1238 /* All this does is change the mode. */
1241 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1248 real_to_target (tmp, &d, GET_MODE (op));
1249 for (i = 0; i < 4; i++)
1251 real_from_target (&d, tmp, mode);
1257 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1260 else if (GET_CODE (op) == CONST_DOUBLE
1261 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1262 && GET_MODE_CLASS (mode) == MODE_INT
1263 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1265 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1266 operators are intentionally left unspecified (to ease implementation
1267 by target backends), for consistency, this routine implements the
1268 same semantics for constant folding as used by the middle-end. */
1270 /* This was formerly used only for non-IEEE float.
1271 eggert@twinsun.com says it is safe for IEEE also. */
1272 HOST_WIDE_INT xh, xl, th, tl;
1273 REAL_VALUE_TYPE x, t;
1274 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1278 if (REAL_VALUE_ISNAN (x))
1281 /* Test against the signed upper bound. */
1282 if (width > HOST_BITS_PER_WIDE_INT)
1284 th = ((unsigned HOST_WIDE_INT) 1
1285 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1291 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1293 real_from_integer (&t, VOIDmode, tl, th, 0);
1294 if (REAL_VALUES_LESS (t, x))
1301 /* Test against the signed lower bound. */
1302 if (width > HOST_BITS_PER_WIDE_INT)
1304 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1310 tl = (HOST_WIDE_INT) -1 << (width - 1);
1312 real_from_integer (&t, VOIDmode, tl, th, 0);
1313 if (REAL_VALUES_LESS (x, t))
1319 REAL_VALUE_TO_INT (&xl, &xh, x);
1323 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1326 /* Test against the unsigned upper bound. */
1327 if (width == 2*HOST_BITS_PER_WIDE_INT)
1332 else if (width >= HOST_BITS_PER_WIDE_INT)
1334 th = ((unsigned HOST_WIDE_INT) 1
1335 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1341 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1343 real_from_integer (&t, VOIDmode, tl, th, 1);
1344 if (REAL_VALUES_LESS (t, x))
1351 REAL_VALUE_TO_INT (&xl, &xh, x);
1357 return immed_double_const (xl, xh, mode);
1363 /* Subroutine of simplify_binary_operation to simplify a commutative,
1364 associative binary operation CODE with result mode MODE, operating
1365 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1366 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1367 canonicalization is possible. */
1370 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1375 /* Linearize the operator to the left. */
1376 if (GET_CODE (op1) == code)
1378 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1379 if (GET_CODE (op0) == code)
1381 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1382 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1385 /* "a op (b op c)" becomes "(b op c) op a". */
1386 if (! swap_commutative_operands_p (op1, op0))
1387 return simplify_gen_binary (code, mode, op1, op0);
1394 if (GET_CODE (op0) == code)
1396 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1397 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1399 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1400 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1403 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1404 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1405 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1406 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1408 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1410 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1411 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1412 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1413 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1415 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1422 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1423 and OP1. Return 0 if no simplification is possible.
1425 Don't use this for relational operations such as EQ or LT.
1426 Use simplify_relational_operation instead. */
1428 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1431 rtx trueop0, trueop1;
1434 /* Relational operations don't work here. We must know the mode
1435 of the operands in order to do the comparison correctly.
1436 Assuming a full word can give incorrect results.
1437 Consider comparing 128 with -128 in QImode. */
1438 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1439 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1441 /* Make sure the constant is second. */
1442 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1443 && swap_commutative_operands_p (op0, op1))
1445 tem = op0, op0 = op1, op1 = tem;
1448 trueop0 = avoid_constant_pool_reference (op0);
1449 trueop1 = avoid_constant_pool_reference (op1);
1451 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1454 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1458 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1459 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1461 rtx tem, reversed, opleft, opright;
1463 unsigned int width = GET_MODE_BITSIZE (mode);
1465 /* Even if we can't compute a constant result,
1466 there are some cases worth simplifying. */
1471 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1472 when x is NaN, infinite, or finite and nonzero. They aren't
1473 when x is -0 and the rounding mode is not towards -infinity,
1474 since (-0) + 0 is then 0. */
1475 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1478 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1479 transformations are safe even for IEEE. */
1480 if (GET_CODE (op0) == NEG)
1481 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1482 else if (GET_CODE (op1) == NEG)
1483 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1485 /* (~a) + 1 -> -a */
1486 if (INTEGRAL_MODE_P (mode)
1487 && GET_CODE (op0) == NOT
1488 && trueop1 == const1_rtx)
1489 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1491 /* Handle both-operands-constant cases. We can only add
1492 CONST_INTs to constants since the sum of relocatable symbols
1493 can't be handled by most assemblers. Don't add CONST_INT
1494 to CONST_INT since overflow won't be computed properly if wider
1495 than HOST_BITS_PER_WIDE_INT. */
1497 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1498 && GET_CODE (op1) == CONST_INT)
1499 return plus_constant (op0, INTVAL (op1));
1500 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1501 && GET_CODE (op0) == CONST_INT)
1502 return plus_constant (op1, INTVAL (op0));
1504 /* See if this is something like X * C - X or vice versa or
1505 if the multiplication is written as a shift. If so, we can
1506 distribute and make a new multiply, shift, or maybe just
1507 have X (if C is 2 in the example above). But don't make
1508 something more expensive than we had before. */
1510 if (SCALAR_INT_MODE_P (mode))
1512 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1513 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1514 rtx lhs = op0, rhs = op1;
1516 if (GET_CODE (lhs) == NEG)
1520 lhs = XEXP (lhs, 0);
1522 else if (GET_CODE (lhs) == MULT
1523 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1525 coeff0l = INTVAL (XEXP (lhs, 1));
1526 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1527 lhs = XEXP (lhs, 0);
1529 else if (GET_CODE (lhs) == ASHIFT
1530 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1531 && INTVAL (XEXP (lhs, 1)) >= 0
1532 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1534 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1536 lhs = XEXP (lhs, 0);
1539 if (GET_CODE (rhs) == NEG)
1543 rhs = XEXP (rhs, 0);
1545 else if (GET_CODE (rhs) == MULT
1546 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1548 coeff1l = INTVAL (XEXP (rhs, 1));
1549 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1550 rhs = XEXP (rhs, 0);
1552 else if (GET_CODE (rhs) == ASHIFT
1553 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1554 && INTVAL (XEXP (rhs, 1)) >= 0
1555 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1557 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1559 rhs = XEXP (rhs, 0);
1562 if (rtx_equal_p (lhs, rhs))
1564 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1566 unsigned HOST_WIDE_INT l;
1569 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1570 coeff = immed_double_const (l, h, mode);
1572 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1573 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1578 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1579 if ((GET_CODE (op1) == CONST_INT
1580 || GET_CODE (op1) == CONST_DOUBLE)
1581 && GET_CODE (op0) == XOR
1582 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1583 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1584 && mode_signbit_p (mode, op1))
1585 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1586 simplify_gen_binary (XOR, mode, op1,
1589 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1590 if (GET_CODE (op0) == MULT
1591 && GET_CODE (XEXP (op0, 0)) == NEG)
1595 in1 = XEXP (XEXP (op0, 0), 0);
1596 in2 = XEXP (op0, 1);
1597 return simplify_gen_binary (MINUS, mode, op1,
1598 simplify_gen_binary (MULT, mode,
1602 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1603 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1605 if (COMPARISON_P (op0)
1606 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1607 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1608 && (reversed = reversed_comparison (op0, mode)))
1610 simplify_gen_unary (NEG, mode, reversed, mode);
1612 /* If one of the operands is a PLUS or a MINUS, see if we can
1613 simplify this by the associative law.
1614 Don't use the associative law for floating point.
1615 The inaccuracy makes it nonassociative,
1616 and subtle programs can break if operations are associated. */
1618 if (INTEGRAL_MODE_P (mode)
1619 && (plus_minus_operand_p (op0)
1620 || plus_minus_operand_p (op1))
1621 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1624 /* Reassociate floating point addition only when the user
1625 specifies unsafe math optimizations. */
1626 if (FLOAT_MODE_P (mode)
1627 && flag_unsafe_math_optimizations)
1629 tem = simplify_associative_operation (code, mode, op0, op1);
1637 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1638 using cc0, in which case we want to leave it as a COMPARE
1639 so we can distinguish it from a register-register-copy.
1641 In IEEE floating point, x-0 is not the same as x. */
1643 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1644 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1645 && trueop1 == CONST0_RTX (mode))
1649 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1650 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1651 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1652 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1654 rtx xop00 = XEXP (op0, 0);
1655 rtx xop10 = XEXP (op1, 0);
1658 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1660 if (REG_P (xop00) && REG_P (xop10)
1661 && GET_MODE (xop00) == GET_MODE (xop10)
1662 && REGNO (xop00) == REGNO (xop10)
1663 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1664 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1671 /* We can't assume x-x is 0 even with non-IEEE floating point,
1672 but since it is zero except in very strange circumstances, we
1673 will treat it as zero with -funsafe-math-optimizations. */
1674 if (rtx_equal_p (trueop0, trueop1)
1675 && ! side_effects_p (op0)
1676 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1677 return CONST0_RTX (mode);
1679 /* Change subtraction from zero into negation. (0 - x) is the
1680 same as -x when x is NaN, infinite, or finite and nonzero.
1681 But if the mode has signed zeros, and does not round towards
1682 -infinity, then 0 - 0 is 0, not -0. */
1683 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1684 return simplify_gen_unary (NEG, mode, op1, mode);
1686 /* (-1 - a) is ~a. */
1687 if (trueop0 == constm1_rtx)
1688 return simplify_gen_unary (NOT, mode, op1, mode);
1690 /* Subtracting 0 has no effect unless the mode has signed zeros
1691 and supports rounding towards -infinity. In such a case,
1693 if (!(HONOR_SIGNED_ZEROS (mode)
1694 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1695 && trueop1 == CONST0_RTX (mode))
1698 /* See if this is something like X * C - X or vice versa or
1699 if the multiplication is written as a shift. If so, we can
1700 distribute and make a new multiply, shift, or maybe just
1701 have X (if C is 2 in the example above). But don't make
1702 something more expensive than we had before. */
1704 if (SCALAR_INT_MODE_P (mode))
1706 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1707 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1708 rtx lhs = op0, rhs = op1;
1710 if (GET_CODE (lhs) == NEG)
1714 lhs = XEXP (lhs, 0);
1716 else if (GET_CODE (lhs) == MULT
1717 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1719 coeff0l = INTVAL (XEXP (lhs, 1));
1720 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1721 lhs = XEXP (lhs, 0);
1723 else if (GET_CODE (lhs) == ASHIFT
1724 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1725 && INTVAL (XEXP (lhs, 1)) >= 0
1726 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1728 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1730 lhs = XEXP (lhs, 0);
1733 if (GET_CODE (rhs) == NEG)
1737 rhs = XEXP (rhs, 0);
1739 else if (GET_CODE (rhs) == MULT
1740 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1742 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1743 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1744 rhs = XEXP (rhs, 0);
1746 else if (GET_CODE (rhs) == ASHIFT
1747 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1748 && INTVAL (XEXP (rhs, 1)) >= 0
1749 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1751 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1753 rhs = XEXP (rhs, 0);
1756 if (rtx_equal_p (lhs, rhs))
1758 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1760 unsigned HOST_WIDE_INT l;
1763 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1764 coeff = immed_double_const (l, h, mode);
1766 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1767 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1772 /* (a - (-b)) -> (a + b). True even for IEEE. */
1773 if (GET_CODE (op1) == NEG)
1774 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1776 /* (-x - c) may be simplified as (-c - x). */
1777 if (GET_CODE (op0) == NEG
1778 && (GET_CODE (op1) == CONST_INT
1779 || GET_CODE (op1) == CONST_DOUBLE))
1781 tem = simplify_unary_operation (NEG, mode, op1, mode);
1783 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1786 /* Don't let a relocatable value get a negative coeff. */
1787 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1788 return simplify_gen_binary (PLUS, mode,
1790 neg_const_int (mode, op1));
1792 /* (x - (x & y)) -> (x & ~y) */
1793 if (GET_CODE (op1) == AND)
1795 if (rtx_equal_p (op0, XEXP (op1, 0)))
1797 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1798 GET_MODE (XEXP (op1, 1)));
1799 return simplify_gen_binary (AND, mode, op0, tem);
1801 if (rtx_equal_p (op0, XEXP (op1, 1)))
1803 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1804 GET_MODE (XEXP (op1, 0)));
1805 return simplify_gen_binary (AND, mode, op0, tem);
1809 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1810 by reversing the comparison code if valid. */
1811 if (STORE_FLAG_VALUE == 1
1812 && trueop0 == const1_rtx
1813 && COMPARISON_P (op1)
1814 && (reversed = reversed_comparison (op1, mode)))
1817 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1818 if (GET_CODE (op1) == MULT
1819 && GET_CODE (XEXP (op1, 0)) == NEG)
1823 in1 = XEXP (XEXP (op1, 0), 0);
1824 in2 = XEXP (op1, 1);
1825 return simplify_gen_binary (PLUS, mode,
1826 simplify_gen_binary (MULT, mode,
1831 /* Canonicalize (minus (neg A) (mult B C)) to
1832 (minus (mult (neg B) C) A). */
1833 if (GET_CODE (op1) == MULT
1834 && GET_CODE (op0) == NEG)
1838 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1839 in2 = XEXP (op1, 1);
1840 return simplify_gen_binary (MINUS, mode,
1841 simplify_gen_binary (MULT, mode,
1846 /* If one of the operands is a PLUS or a MINUS, see if we can
1847 simplify this by the associative law. This will, for example,
1848 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1849 Don't use the associative law for floating point.
1850 The inaccuracy makes it nonassociative,
1851 and subtle programs can break if operations are associated. */
1853 if (INTEGRAL_MODE_P (mode)
1854 && (plus_minus_operand_p (op0)
1855 || plus_minus_operand_p (op1))
1856 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1861 if (trueop1 == constm1_rtx)
1862 return simplify_gen_unary (NEG, mode, op0, mode);
1864 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1865 x is NaN, since x * 0 is then also NaN. Nor is it valid
1866 when the mode has signed zeros, since multiplying a negative
1867 number by 0 will give -0, not 0. */
1868 if (!HONOR_NANS (mode)
1869 && !HONOR_SIGNED_ZEROS (mode)
1870 && trueop1 == CONST0_RTX (mode)
1871 && ! side_effects_p (op0))
1874 /* In IEEE floating point, x*1 is not equivalent to x for
1876 if (!HONOR_SNANS (mode)
1877 && trueop1 == CONST1_RTX (mode))
1880 /* Convert multiply by constant power of two into shift unless
1881 we are still generating RTL. This test is a kludge. */
1882 if (GET_CODE (trueop1) == CONST_INT
1883 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1884 /* If the mode is larger than the host word size, and the
1885 uppermost bit is set, then this isn't a power of two due
1886 to implicit sign extension. */
1887 && (width <= HOST_BITS_PER_WIDE_INT
1888 || val != HOST_BITS_PER_WIDE_INT - 1))
1889 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1891 /* Likewise for multipliers wider than a word. */
1892 else if (GET_CODE (trueop1) == CONST_DOUBLE
1893 && (GET_MODE (trueop1) == VOIDmode
1894 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1895 && GET_MODE (op0) == mode
1896 && CONST_DOUBLE_LOW (trueop1) == 0
1897 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1898 return simplify_gen_binary (ASHIFT, mode, op0,
1899 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1901 /* x*2 is x+x and x*(-1) is -x */
1902 if (GET_CODE (trueop1) == CONST_DOUBLE
1903 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1904 && GET_MODE (op0) == mode)
1907 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1909 if (REAL_VALUES_EQUAL (d, dconst2))
1910 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1912 if (REAL_VALUES_EQUAL (d, dconstm1))
1913 return simplify_gen_unary (NEG, mode, op0, mode);
1916 /* Reassociate multiplication, but for floating point MULTs
1917 only when the user specifies unsafe math optimizations. */
1918 if (! FLOAT_MODE_P (mode)
1919 || flag_unsafe_math_optimizations)
1921 tem = simplify_associative_operation (code, mode, op0, op1);
1928 if (trueop1 == const0_rtx)
1930 if (GET_CODE (trueop1) == CONST_INT
1931 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1932 == GET_MODE_MASK (mode)))
1934 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1936 /* A | (~A) -> -1 */
1937 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1938 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1939 && ! side_effects_p (op0)
1940 && SCALAR_INT_MODE_P (mode))
1943 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1944 if (GET_CODE (op1) == CONST_INT
1945 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1946 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1949 /* Convert (A & B) | A to A. */
1950 if (GET_CODE (op0) == AND
1951 && (rtx_equal_p (XEXP (op0, 0), op1)
1952 || rtx_equal_p (XEXP (op0, 1), op1))
1953 && ! side_effects_p (XEXP (op0, 0))
1954 && ! side_effects_p (XEXP (op0, 1)))
1957 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1958 mode size to (rotate A CX). */
1960 if (GET_CODE (op1) == ASHIFT
1961 || GET_CODE (op1) == SUBREG)
1972 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
1973 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
1974 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
1975 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1976 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
1977 == GET_MODE_BITSIZE (mode)))
1978 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
1980 /* Same, but for ashift that has been "simplified" to a wider mode
1981 by simplify_shift_const. */
1983 if (GET_CODE (opleft) == SUBREG
1984 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
1985 && GET_CODE (opright) == LSHIFTRT
1986 && GET_CODE (XEXP (opright, 0)) == SUBREG
1987 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
1988 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
1989 && (GET_MODE_SIZE (GET_MODE (opleft))
1990 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
1991 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
1992 SUBREG_REG (XEXP (opright, 0)))
1993 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
1994 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1995 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
1996 == GET_MODE_BITSIZE (mode)))
1997 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
1998 XEXP (SUBREG_REG (opleft), 1));
2000 /* If we have (ior (and (X C1) C2)), simplify this by making
2001 C1 as small as possible if C1 actually changes. */
2002 if (GET_CODE (op1) == CONST_INT
2003 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2004 || INTVAL (op1) > 0)
2005 && GET_CODE (op0) == AND
2006 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2007 && GET_CODE (op1) == CONST_INT
2008 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2009 return simplify_gen_binary (IOR, mode,
2011 (AND, mode, XEXP (op0, 0),
2012 GEN_INT (INTVAL (XEXP (op0, 1))
2016 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2017 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2018 the PLUS does not affect any of the bits in OP1: then we can do
2019 the IOR as a PLUS and we can associate. This is valid if OP1
2020 can be safely shifted left C bits. */
2021 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2022 && GET_CODE (XEXP (op0, 0)) == PLUS
2023 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2024 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2025 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2027 int count = INTVAL (XEXP (op0, 1));
2028 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2030 if (mask >> count == INTVAL (trueop1)
2031 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2032 return simplify_gen_binary (ASHIFTRT, mode,
2033 plus_constant (XEXP (op0, 0), mask),
2037 tem = simplify_associative_operation (code, mode, op0, op1);
2043 if (trueop1 == const0_rtx)
2045 if (GET_CODE (trueop1) == CONST_INT
2046 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2047 == GET_MODE_MASK (mode)))
2048 return simplify_gen_unary (NOT, mode, op0, mode);
2049 if (rtx_equal_p (trueop0, trueop1)
2050 && ! side_effects_p (op0)
2051 && GET_MODE_CLASS (mode) != MODE_CC)
2052 return CONST0_RTX (mode);
2054 /* Canonicalize XOR of the most significant bit to PLUS. */
2055 if ((GET_CODE (op1) == CONST_INT
2056 || GET_CODE (op1) == CONST_DOUBLE)
2057 && mode_signbit_p (mode, op1))
2058 return simplify_gen_binary (PLUS, mode, op0, op1);
2059 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2060 if ((GET_CODE (op1) == CONST_INT
2061 || GET_CODE (op1) == CONST_DOUBLE)
2062 && GET_CODE (op0) == PLUS
2063 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2064 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2065 && mode_signbit_p (mode, XEXP (op0, 1)))
2066 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2067 simplify_gen_binary (XOR, mode, op1,
2070 /* If we are XORing two things that have no bits in common,
2071 convert them into an IOR. This helps to detect rotation encoded
2072 using those methods and possibly other simplifications. */
2074 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2075 && (nonzero_bits (op0, mode)
2076 & nonzero_bits (op1, mode)) == 0)
2077 return (simplify_gen_binary (IOR, mode, op0, op1));
2079 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2080 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2083 int num_negated = 0;
2085 if (GET_CODE (op0) == NOT)
2086 num_negated++, op0 = XEXP (op0, 0);
2087 if (GET_CODE (op1) == NOT)
2088 num_negated++, op1 = XEXP (op1, 0);
2090 if (num_negated == 2)
2091 return simplify_gen_binary (XOR, mode, op0, op1);
2092 else if (num_negated == 1)
2093 return simplify_gen_unary (NOT, mode,
2094 simplify_gen_binary (XOR, mode, op0, op1),
2098 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2099 correspond to a machine insn or result in further simplifications
2100 if B is a constant. */
2102 if (GET_CODE (op0) == AND
2103 && rtx_equal_p (XEXP (op0, 1), op1)
2104 && ! side_effects_p (op1))
2105 return simplify_gen_binary (AND, mode,
2106 simplify_gen_unary (NOT, mode,
2107 XEXP (op0, 0), mode),
2110 else if (GET_CODE (op0) == AND
2111 && rtx_equal_p (XEXP (op0, 0), op1)
2112 && ! side_effects_p (op1))
2113 return simplify_gen_binary (AND, mode,
2114 simplify_gen_unary (NOT, mode,
2115 XEXP (op0, 1), mode),
2118 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2119 comparison if STORE_FLAG_VALUE is 1. */
2120 if (STORE_FLAG_VALUE == 1
2121 && trueop1 == const1_rtx
2122 && COMPARISON_P (op0)
2123 && (reversed = reversed_comparison (op0, mode)))
2126 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2127 is (lt foo (const_int 0)), so we can perform the above
2128 simplification if STORE_FLAG_VALUE is 1. */
2130 if (STORE_FLAG_VALUE == 1
2131 && trueop1 == const1_rtx
2132 && GET_CODE (op0) == LSHIFTRT
2133 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2134 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2135 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2137 /* (xor (comparison foo bar) (const_int sign-bit))
2138 when STORE_FLAG_VALUE is the sign bit. */
2139 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2140 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2141 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2142 && trueop1 == const_true_rtx
2143 && COMPARISON_P (op0)
2144 && (reversed = reversed_comparison (op0, mode)))
2149 tem = simplify_associative_operation (code, mode, op0, op1);
2155 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2157 /* If we are turning off bits already known off in OP0, we need
2159 if (GET_CODE (trueop1) == CONST_INT
2160 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2161 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2163 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2164 && GET_MODE_CLASS (mode) != MODE_CC)
2167 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2168 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2169 && ! side_effects_p (op0)
2170 && GET_MODE_CLASS (mode) != MODE_CC)
2171 return CONST0_RTX (mode);
2173 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2174 there are no nonzero bits of C outside of X's mode. */
2175 if ((GET_CODE (op0) == SIGN_EXTEND
2176 || GET_CODE (op0) == ZERO_EXTEND)
2177 && GET_CODE (trueop1) == CONST_INT
2178 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2179 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2180 & INTVAL (trueop1)) == 0)
2182 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2183 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2184 gen_int_mode (INTVAL (trueop1),
2186 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2189 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2190 insn (and may simplify more). */
2191 if (GET_CODE (op0) == XOR
2192 && rtx_equal_p (XEXP (op0, 0), op1)
2193 && ! side_effects_p (op1))
2194 return simplify_gen_binary (AND, mode,
2195 simplify_gen_unary (NOT, mode,
2196 XEXP (op0, 1), mode),
2199 if (GET_CODE (op0) == XOR
2200 && rtx_equal_p (XEXP (op0, 1), op1)
2201 && ! side_effects_p (op1))
2202 return simplify_gen_binary (AND, mode,
2203 simplify_gen_unary (NOT, mode,
2204 XEXP (op0, 0), mode),
2207 /* Similarly for (~(A ^ B)) & A. */
2208 if (GET_CODE (op0) == NOT
2209 && GET_CODE (XEXP (op0, 0)) == XOR
2210 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2211 && ! side_effects_p (op1))
2212 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2214 if (GET_CODE (op0) == NOT
2215 && GET_CODE (XEXP (op0, 0)) == XOR
2216 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2217 && ! side_effects_p (op1))
2218 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2220 /* Convert (A | B) & A to A. */
2221 if (GET_CODE (op0) == IOR
2222 && (rtx_equal_p (XEXP (op0, 0), op1)
2223 || rtx_equal_p (XEXP (op0, 1), op1))
2224 && ! side_effects_p (XEXP (op0, 0))
2225 && ! side_effects_p (XEXP (op0, 1)))
2228 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2229 ((A & N) + B) & M -> (A + B) & M
2230 Similarly if (N & M) == 0,
2231 ((A | N) + B) & M -> (A + B) & M
2232 and for - instead of + and/or ^ instead of |. */
2233 if (GET_CODE (trueop1) == CONST_INT
2234 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2235 && ~INTVAL (trueop1)
2236 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2237 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2242 pmop[0] = XEXP (op0, 0);
2243 pmop[1] = XEXP (op0, 1);
2245 for (which = 0; which < 2; which++)
2248 switch (GET_CODE (tem))
2251 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2252 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2253 == INTVAL (trueop1))
2254 pmop[which] = XEXP (tem, 0);
2258 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2259 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2260 pmop[which] = XEXP (tem, 0);
2267 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2269 tem = simplify_gen_binary (GET_CODE (op0), mode,
2271 return simplify_gen_binary (code, mode, tem, op1);
2274 tem = simplify_associative_operation (code, mode, op0, op1);
2280 /* 0/x is 0 (or x&0 if x has side-effects). */
2281 if (trueop0 == CONST0_RTX (mode))
2283 if (side_effects_p (op1))
2284 return simplify_gen_binary (AND, mode, op1, trueop0);
2288 if (trueop1 == CONST1_RTX (mode))
2289 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2290 /* Convert divide by power of two into shift. */
2291 if (GET_CODE (trueop1) == CONST_INT
2292 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2293 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2297 /* Handle floating point and integers separately. */
2298 if (SCALAR_FLOAT_MODE_P (mode))
2300 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2301 safe for modes with NaNs, since 0.0 / 0.0 will then be
2302 NaN rather than 0.0. Nor is it safe for modes with signed
2303 zeros, since dividing 0 by a negative number gives -0.0 */
2304 if (trueop0 == CONST0_RTX (mode)
2305 && !HONOR_NANS (mode)
2306 && !HONOR_SIGNED_ZEROS (mode)
2307 && ! side_effects_p (op1))
2310 if (trueop1 == CONST1_RTX (mode)
2311 && !HONOR_SNANS (mode))
2314 if (GET_CODE (trueop1) == CONST_DOUBLE
2315 && trueop1 != CONST0_RTX (mode))
2318 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2321 if (REAL_VALUES_EQUAL (d, dconstm1)
2322 && !HONOR_SNANS (mode))
2323 return simplify_gen_unary (NEG, mode, op0, mode);
2325 /* Change FP division by a constant into multiplication.
2326 Only do this with -funsafe-math-optimizations. */
2327 if (flag_unsafe_math_optimizations
2328 && !REAL_VALUES_EQUAL (d, dconst0))
2330 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2331 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2332 return simplify_gen_binary (MULT, mode, op0, tem);
2338 /* 0/x is 0 (or x&0 if x has side-effects). */
2339 if (trueop0 == CONST0_RTX (mode))
2341 if (side_effects_p (op1))
2342 return simplify_gen_binary (AND, mode, op1, trueop0);
2346 if (trueop1 == CONST1_RTX (mode))
2347 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2349 if (trueop1 == constm1_rtx)
2351 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2352 return simplify_gen_unary (NEG, mode, x, mode);
2358 /* 0%x is 0 (or x&0 if x has side-effects). */
2359 if (trueop0 == CONST0_RTX (mode))
2361 if (side_effects_p (op1))
2362 return simplify_gen_binary (AND, mode, op1, trueop0);
2365 /* x%1 is 0 (of x&0 if x has side-effects). */
2366 if (trueop1 == CONST1_RTX (mode))
2368 if (side_effects_p (op0))
2369 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2370 return CONST0_RTX (mode);
2372 /* Implement modulus by power of two as AND. */
2373 if (GET_CODE (trueop1) == CONST_INT
2374 && exact_log2 (INTVAL (trueop1)) > 0)
2375 return simplify_gen_binary (AND, mode, op0,
2376 GEN_INT (INTVAL (op1) - 1));
2380 /* 0%x is 0 (or x&0 if x has side-effects). */
2381 if (trueop0 == CONST0_RTX (mode))
2383 if (side_effects_p (op1))
2384 return simplify_gen_binary (AND, mode, op1, trueop0);
2387 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2388 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2390 if (side_effects_p (op0))
2391 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2392 return CONST0_RTX (mode);
2399 /* Rotating ~0 always results in ~0. */
2400 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2401 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2402 && ! side_effects_p (op1))
2405 /* Fall through.... */
2409 if (trueop1 == CONST0_RTX (mode))
2411 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2416 if (width <= HOST_BITS_PER_WIDE_INT
2417 && GET_CODE (trueop1) == CONST_INT
2418 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2419 && ! side_effects_p (op0))
2421 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2423 tem = simplify_associative_operation (code, mode, op0, op1);
2429 if (width <= HOST_BITS_PER_WIDE_INT
2430 && GET_CODE (trueop1) == CONST_INT
2431 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2432 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2433 && ! side_effects_p (op0))
2435 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2437 tem = simplify_associative_operation (code, mode, op0, op1);
2443 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2445 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2447 tem = simplify_associative_operation (code, mode, op0, op1);
2453 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2455 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2457 tem = simplify_associative_operation (code, mode, op0, op1);
2466 /* ??? There are simplifications that can be done. */
2470 if (!VECTOR_MODE_P (mode))
2472 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2473 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2474 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2475 gcc_assert (XVECLEN (trueop1, 0) == 1);
2476 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2478 if (GET_CODE (trueop0) == CONST_VECTOR)
2479 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2484 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2485 gcc_assert (GET_MODE_INNER (mode)
2486 == GET_MODE_INNER (GET_MODE (trueop0)));
2487 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2489 if (GET_CODE (trueop0) == CONST_VECTOR)
2491 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2492 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2493 rtvec v = rtvec_alloc (n_elts);
2496 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2497 for (i = 0; i < n_elts; i++)
2499 rtx x = XVECEXP (trueop1, 0, i);
2501 gcc_assert (GET_CODE (x) == CONST_INT);
2502 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2506 return gen_rtx_CONST_VECTOR (mode, v);
2510 if (XVECLEN (trueop1, 0) == 1
2511 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2512 && GET_CODE (trueop0) == VEC_CONCAT)
2515 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2517 /* Try to find the element in the VEC_CONCAT. */
2518 while (GET_MODE (vec) != mode
2519 && GET_CODE (vec) == VEC_CONCAT)
2521 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2522 if (offset < vec_size)
2523 vec = XEXP (vec, 0);
2527 vec = XEXP (vec, 1);
2529 vec = avoid_constant_pool_reference (vec);
2532 if (GET_MODE (vec) == mode)
2539 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2540 ? GET_MODE (trueop0)
2541 : GET_MODE_INNER (mode));
2542 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2543 ? GET_MODE (trueop1)
2544 : GET_MODE_INNER (mode));
2546 gcc_assert (VECTOR_MODE_P (mode));
2547 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2548 == GET_MODE_SIZE (mode));
2550 if (VECTOR_MODE_P (op0_mode))
2551 gcc_assert (GET_MODE_INNER (mode)
2552 == GET_MODE_INNER (op0_mode));
2554 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2556 if (VECTOR_MODE_P (op1_mode))
2557 gcc_assert (GET_MODE_INNER (mode)
2558 == GET_MODE_INNER (op1_mode));
2560 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2562 if ((GET_CODE (trueop0) == CONST_VECTOR
2563 || GET_CODE (trueop0) == CONST_INT
2564 || GET_CODE (trueop0) == CONST_DOUBLE)
2565 && (GET_CODE (trueop1) == CONST_VECTOR
2566 || GET_CODE (trueop1) == CONST_INT
2567 || GET_CODE (trueop1) == CONST_DOUBLE))
2569 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2570 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2571 rtvec v = rtvec_alloc (n_elts);
2573 unsigned in_n_elts = 1;
2575 if (VECTOR_MODE_P (op0_mode))
2576 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2577 for (i = 0; i < n_elts; i++)
2581 if (!VECTOR_MODE_P (op0_mode))
2582 RTVEC_ELT (v, i) = trueop0;
2584 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2588 if (!VECTOR_MODE_P (op1_mode))
2589 RTVEC_ELT (v, i) = trueop1;
2591 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2596 return gen_rtx_CONST_VECTOR (mode, v);
2609 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2612 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2614 unsigned int width = GET_MODE_BITSIZE (mode);
2616 if (VECTOR_MODE_P (mode)
2617 && code != VEC_CONCAT
2618 && GET_CODE (op0) == CONST_VECTOR
2619 && GET_CODE (op1) == CONST_VECTOR)
2621 unsigned n_elts = GET_MODE_NUNITS (mode);
2622 enum machine_mode op0mode = GET_MODE (op0);
2623 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2624 enum machine_mode op1mode = GET_MODE (op1);
2625 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2626 rtvec v = rtvec_alloc (n_elts);
2629 gcc_assert (op0_n_elts == n_elts);
2630 gcc_assert (op1_n_elts == n_elts);
2631 for (i = 0; i < n_elts; i++)
2633 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2634 CONST_VECTOR_ELT (op0, i),
2635 CONST_VECTOR_ELT (op1, i));
2638 RTVEC_ELT (v, i) = x;
2641 return gen_rtx_CONST_VECTOR (mode, v);
2644 if (VECTOR_MODE_P (mode)
2645 && code == VEC_CONCAT
2646 && CONSTANT_P (op0) && CONSTANT_P (op1))
2648 unsigned n_elts = GET_MODE_NUNITS (mode);
2649 rtvec v = rtvec_alloc (n_elts);
2651 gcc_assert (n_elts >= 2);
2654 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2655 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2657 RTVEC_ELT (v, 0) = op0;
2658 RTVEC_ELT (v, 1) = op1;
2662 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2663 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2666 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2667 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2668 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2670 for (i = 0; i < op0_n_elts; ++i)
2671 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2672 for (i = 0; i < op1_n_elts; ++i)
2673 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2676 return gen_rtx_CONST_VECTOR (mode, v);
2679 if (SCALAR_FLOAT_MODE_P (mode)
2680 && GET_CODE (op0) == CONST_DOUBLE
2681 && GET_CODE (op1) == CONST_DOUBLE
2682 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2693 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2695 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2697 for (i = 0; i < 4; i++)
2714 real_from_target (&r, tmp0, mode);
2715 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2719 REAL_VALUE_TYPE f0, f1, value, result;
2722 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2723 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2724 real_convert (&f0, mode, &f0);
2725 real_convert (&f1, mode, &f1);
2727 if (HONOR_SNANS (mode)
2728 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2732 && REAL_VALUES_EQUAL (f1, dconst0)
2733 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2736 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2737 && flag_trapping_math
2738 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2740 int s0 = REAL_VALUE_NEGATIVE (f0);
2741 int s1 = REAL_VALUE_NEGATIVE (f1);
2746 /* Inf + -Inf = NaN plus exception. */
2751 /* Inf - Inf = NaN plus exception. */
2756 /* Inf / Inf = NaN plus exception. */
2763 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2764 && flag_trapping_math
2765 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2766 || (REAL_VALUE_ISINF (f1)
2767 && REAL_VALUES_EQUAL (f0, dconst0))))
2768 /* Inf * 0 = NaN plus exception. */
2771 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2773 real_convert (&result, mode, &value);
2775 /* Don't constant fold this floating point operation if
2776 the result has overflowed and flag_trapping_math. */
2778 if (flag_trapping_math
2779 && MODE_HAS_INFINITIES (mode)
2780 && REAL_VALUE_ISINF (result)
2781 && !REAL_VALUE_ISINF (f0)
2782 && !REAL_VALUE_ISINF (f1))
2783 /* Overflow plus exception. */
2786 /* Don't constant fold this floating point operation if the
2787 result may dependent upon the run-time rounding mode and
2788 flag_rounding_math is set, or if GCC's software emulation
2789 is unable to accurately represent the result. */
2791 if ((flag_rounding_math
2792 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2793 && !flag_unsafe_math_optimizations))
2794 && (inexact || !real_identical (&result, &value)))
2797 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2801 /* We can fold some multi-word operations. */
2802 if (GET_MODE_CLASS (mode) == MODE_INT
2803 && width == HOST_BITS_PER_WIDE_INT * 2
2804 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2805 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2807 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2808 HOST_WIDE_INT h1, h2, hv, ht;
2810 if (GET_CODE (op0) == CONST_DOUBLE)
2811 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2813 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2815 if (GET_CODE (op1) == CONST_DOUBLE)
2816 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2818 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2823 /* A - B == A + (-B). */
2824 neg_double (l2, h2, &lv, &hv);
2827 /* Fall through.... */
2830 add_double (l1, h1, l2, h2, &lv, &hv);
2834 mul_double (l1, h1, l2, h2, &lv, &hv);
2838 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2839 &lv, &hv, <, &ht))
2844 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2845 <, &ht, &lv, &hv))
2850 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2851 &lv, &hv, <, &ht))
2856 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2857 <, &ht, &lv, &hv))
2862 lv = l1 & l2, hv = h1 & h2;
2866 lv = l1 | l2, hv = h1 | h2;
2870 lv = l1 ^ l2, hv = h1 ^ h2;
2876 && ((unsigned HOST_WIDE_INT) l1
2877 < (unsigned HOST_WIDE_INT) l2)))
2886 && ((unsigned HOST_WIDE_INT) l1
2887 > (unsigned HOST_WIDE_INT) l2)))
2894 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2896 && ((unsigned HOST_WIDE_INT) l1
2897 < (unsigned HOST_WIDE_INT) l2)))
2904 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2906 && ((unsigned HOST_WIDE_INT) l1
2907 > (unsigned HOST_WIDE_INT) l2)))
2913 case LSHIFTRT: case ASHIFTRT:
2915 case ROTATE: case ROTATERT:
2916 if (SHIFT_COUNT_TRUNCATED)
2917 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2919 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2922 if (code == LSHIFTRT || code == ASHIFTRT)
2923 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2925 else if (code == ASHIFT)
2926 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2927 else if (code == ROTATE)
2928 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2929 else /* code == ROTATERT */
2930 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2937 return immed_double_const (lv, hv, mode);
2940 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2941 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2943 /* Get the integer argument values in two forms:
2944 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2946 arg0 = INTVAL (op0);
2947 arg1 = INTVAL (op1);
2949 if (width < HOST_BITS_PER_WIDE_INT)
2951 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2952 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2955 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2956 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2959 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2960 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2968 /* Compute the value of the arithmetic. */
2973 val = arg0s + arg1s;
2977 val = arg0s - arg1s;
2981 val = arg0s * arg1s;
2986 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2989 val = arg0s / arg1s;
2994 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2997 val = arg0s % arg1s;
3002 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3005 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3010 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3013 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3031 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3032 the value is in range. We can't return any old value for
3033 out-of-range arguments because either the middle-end (via
3034 shift_truncation_mask) or the back-end might be relying on
3035 target-specific knowledge. Nor can we rely on
3036 shift_truncation_mask, since the shift might not be part of an
3037 ashlM3, lshrM3 or ashrM3 instruction. */
3038 if (SHIFT_COUNT_TRUNCATED)
3039 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3040 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3043 val = (code == ASHIFT
3044 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3045 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3047 /* Sign-extend the result for arithmetic right shifts. */
3048 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3049 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3057 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3058 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3066 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3067 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3071 /* Do nothing here. */
3075 val = arg0s <= arg1s ? arg0s : arg1s;
3079 val = ((unsigned HOST_WIDE_INT) arg0
3080 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3084 val = arg0s > arg1s ? arg0s : arg1s;
3088 val = ((unsigned HOST_WIDE_INT) arg0
3089 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3096 /* ??? There are simplifications that can be done. */
3103 return gen_int_mode (val, mode);
3111 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3114 Rather than test for specific case, we do this by a brute-force method
3115 and do all possible simplifications until no more changes occur. Then
3116 we rebuild the operation. */
3118 struct simplify_plus_minus_op_data
3126 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3128 const struct simplify_plus_minus_op_data *d1 = p1;
3129 const struct simplify_plus_minus_op_data *d2 = p2;
3132 result = (commutative_operand_precedence (d2->op)
3133 - commutative_operand_precedence (d1->op));
3136 return d1->ix - d2->ix;
3140 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3143 struct simplify_plus_minus_op_data ops[8];
3145 int n_ops = 2, input_ops = 2;
3146 int first, changed, canonicalized = 0;
3149 memset (ops, 0, sizeof ops);
3151 /* Set up the two operands and then expand them until nothing has been
3152 changed. If we run out of room in our array, give up; this should
3153 almost never happen. */
3158 ops[1].neg = (code == MINUS);
3164 for (i = 0; i < n_ops; i++)
3166 rtx this_op = ops[i].op;
3167 int this_neg = ops[i].neg;
3168 enum rtx_code this_code = GET_CODE (this_op);
3177 ops[n_ops].op = XEXP (this_op, 1);
3178 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3181 ops[i].op = XEXP (this_op, 0);
3184 canonicalized |= this_neg;
3188 ops[i].op = XEXP (this_op, 0);
3189 ops[i].neg = ! this_neg;
3196 && GET_CODE (XEXP (this_op, 0)) == PLUS
3197 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3198 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3200 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3201 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3202 ops[n_ops].neg = this_neg;
3210 /* ~a -> (-a - 1) */
3213 ops[n_ops].op = constm1_rtx;
3214 ops[n_ops++].neg = this_neg;
3215 ops[i].op = XEXP (this_op, 0);
3216 ops[i].neg = !this_neg;
3225 ops[i].op = neg_const_int (mode, this_op);
3239 gcc_assert (n_ops >= 2);
3242 int n_constants = 0;
3244 for (i = 0; i < n_ops; i++)
3245 if (GET_CODE (ops[i].op) == CONST_INT)
3248 if (n_constants <= 1)
3252 /* If we only have two operands, we can avoid the loops. */
3255 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3258 /* Get the two operands. Be careful with the order, especially for
3259 the cases where code == MINUS. */
3260 if (ops[0].neg && ops[1].neg)
3262 lhs = gen_rtx_NEG (mode, ops[0].op);
3265 else if (ops[0].neg)
3276 return simplify_const_binary_operation (code, mode, lhs, rhs);
3279 /* Now simplify each pair of operands until nothing changes. The first
3280 time through just simplify constants against each other. */
3287 for (i = 0; i < n_ops - 1; i++)
3288 for (j = i + 1; j < n_ops; j++)
3290 rtx lhs = ops[i].op, rhs = ops[j].op;
3291 int lneg = ops[i].neg, rneg = ops[j].neg;
3293 if (lhs != 0 && rhs != 0
3294 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3296 enum rtx_code ncode = PLUS;
3302 tem = lhs, lhs = rhs, rhs = tem;
3304 else if (swap_commutative_operands_p (lhs, rhs))
3305 tem = lhs, lhs = rhs, rhs = tem;
3307 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3309 /* Reject "simplifications" that just wrap the two
3310 arguments in a CONST. Failure to do so can result
3311 in infinite recursion with simplify_binary_operation
3312 when it calls us to simplify CONST operations. */
3314 && ! (GET_CODE (tem) == CONST
3315 && GET_CODE (XEXP (tem, 0)) == ncode
3316 && XEXP (XEXP (tem, 0), 0) == lhs
3317 && XEXP (XEXP (tem, 0), 1) == rhs)
3318 /* Don't allow -x + -1 -> ~x simplifications in the
3319 first pass. This allows us the chance to combine
3320 the -1 with other constants. */
3322 && GET_CODE (tem) == NOT
3323 && XEXP (tem, 0) == rhs))
3326 if (GET_CODE (tem) == NEG)
3327 tem = XEXP (tem, 0), lneg = !lneg;
3328 if (GET_CODE (tem) == CONST_INT && lneg)
3329 tem = neg_const_int (mode, tem), lneg = 0;
3333 ops[j].op = NULL_RTX;
3343 /* Pack all the operands to the lower-numbered entries. */
3344 for (i = 0, j = 0; j < n_ops; j++)
3348 /* Stabilize sort. */
3354 /* Sort the operations based on swap_commutative_operands_p. */
3355 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3357 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3359 && GET_CODE (ops[1].op) == CONST_INT
3360 && CONSTANT_P (ops[0].op)
3362 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3364 /* We suppressed creation of trivial CONST expressions in the
3365 combination loop to avoid recursion. Create one manually now.
3366 The combination loop should have ensured that there is exactly
3367 one CONST_INT, and the sort will have ensured that it is last
3368 in the array and that any other constant will be next-to-last. */
3371 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3372 && CONSTANT_P (ops[n_ops - 2].op))
3374 rtx value = ops[n_ops - 1].op;
3375 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3376 value = neg_const_int (mode, value);
3377 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3381 /* Put a non-negated operand first, if possible. */
3383 for (i = 0; i < n_ops && ops[i].neg; i++)
3386 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3395 /* Now make the result by performing the requested operations. */
3397 for (i = 1; i < n_ops; i++)
3398 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3399 mode, result, ops[i].op);
3404 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3406 plus_minus_operand_p (rtx x)
3408 return GET_CODE (x) == PLUS
3409 || GET_CODE (x) == MINUS
3410 || (GET_CODE (x) == CONST
3411 && GET_CODE (XEXP (x, 0)) == PLUS
3412 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3413 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3416 /* Like simplify_binary_operation except used for relational operators.
3417 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3418 not also be VOIDmode.
3420 CMP_MODE specifies in which mode the comparison is done in, so it is
3421 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3422 the operands or, if both are VOIDmode, the operands are compared in
3423 "infinite precision". */
3425 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3426 enum machine_mode cmp_mode, rtx op0, rtx op1)
3428 rtx tem, trueop0, trueop1;
3430 if (cmp_mode == VOIDmode)
3431 cmp_mode = GET_MODE (op0);
3432 if (cmp_mode == VOIDmode)
3433 cmp_mode = GET_MODE (op1);
3435 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3438 if (SCALAR_FLOAT_MODE_P (mode))
3440 if (tem == const0_rtx)
3441 return CONST0_RTX (mode);
3442 #ifdef FLOAT_STORE_FLAG_VALUE
3444 REAL_VALUE_TYPE val;
3445 val = FLOAT_STORE_FLAG_VALUE (mode);
3446 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3452 if (VECTOR_MODE_P (mode))
3454 if (tem == const0_rtx)
3455 return CONST0_RTX (mode);
3456 #ifdef VECTOR_STORE_FLAG_VALUE
3461 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3462 if (val == NULL_RTX)
3464 if (val == const1_rtx)
3465 return CONST1_RTX (mode);
3467 units = GET_MODE_NUNITS (mode);
3468 v = rtvec_alloc (units);
3469 for (i = 0; i < units; i++)
3470 RTVEC_ELT (v, i) = val;
3471 return gen_rtx_raw_CONST_VECTOR (mode, v);
3481 /* For the following tests, ensure const0_rtx is op1. */
3482 if (swap_commutative_operands_p (op0, op1)
3483 || (op0 == const0_rtx && op1 != const0_rtx))
3484 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3486 /* If op0 is a compare, extract the comparison arguments from it. */
3487 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3488 return simplify_relational_operation (code, mode, VOIDmode,
3489 XEXP (op0, 0), XEXP (op0, 1));
3491 if (mode == VOIDmode
3492 || GET_MODE_CLASS (cmp_mode) == MODE_CC
3496 trueop0 = avoid_constant_pool_reference (op0);
3497 trueop1 = avoid_constant_pool_reference (op1);
3498 return simplify_relational_operation_1 (code, mode, cmp_mode,
3502 /* This part of simplify_relational_operation is only used when CMP_MODE
3503 is not in class MODE_CC (i.e. it is a real comparison).
3505 MODE is the mode of the result, while CMP_MODE specifies in which
3506 mode the comparison is done in, so it is the mode of the operands. */
3509 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3510 enum machine_mode cmp_mode, rtx op0, rtx op1)
3512 enum rtx_code op0code = GET_CODE (op0);
3514 if (GET_CODE (op1) == CONST_INT)
3516 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3518 /* If op0 is a comparison, extract the comparison arguments
3522 if (GET_MODE (op0) == mode)
3523 return simplify_rtx (op0);
3525 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3526 XEXP (op0, 0), XEXP (op0, 1));
3528 else if (code == EQ)
3530 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3531 if (new_code != UNKNOWN)
3532 return simplify_gen_relational (new_code, mode, VOIDmode,
3533 XEXP (op0, 0), XEXP (op0, 1));
3538 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3539 if ((code == EQ || code == NE)
3540 && (op0code == PLUS || op0code == MINUS)
3542 && CONSTANT_P (XEXP (op0, 1))
3543 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3545 rtx x = XEXP (op0, 0);
3546 rtx c = XEXP (op0, 1);
3548 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3550 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3553 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3554 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3556 && op1 == const0_rtx
3557 && GET_MODE_CLASS (mode) == MODE_INT
3558 && cmp_mode != VOIDmode
3559 /* ??? Work-around BImode bugs in the ia64 backend. */
3561 && cmp_mode != BImode
3562 && nonzero_bits (op0, cmp_mode) == 1
3563 && STORE_FLAG_VALUE == 1)
3564 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3565 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3566 : lowpart_subreg (mode, op0, cmp_mode);
3568 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3569 if ((code == EQ || code == NE)
3570 && op1 == const0_rtx
3572 return simplify_gen_relational (code, mode, cmp_mode,
3573 XEXP (op0, 0), XEXP (op0, 1));
3575 /* (eq/ne (xor x y) x) simplifies to (eq/ne x 0). */
3576 if ((code == EQ || code == NE)
3578 && rtx_equal_p (XEXP (op0, 0), op1)
3579 && !side_effects_p (XEXP (op0, 1)))
3580 return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
3581 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne y 0). */
3582 if ((code == EQ || code == NE)
3584 && rtx_equal_p (XEXP (op0, 1), op1)
3585 && !side_effects_p (XEXP (op0, 0)))
3586 return simplify_gen_relational (code, mode, cmp_mode, op1, const0_rtx);
3588 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3589 if ((code == EQ || code == NE)
3591 && (GET_CODE (op1) == CONST_INT
3592 || GET_CODE (op1) == CONST_DOUBLE)
3593 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3594 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3595 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3596 simplify_gen_binary (XOR, cmp_mode,
3597 XEXP (op0, 1), op1));
3602 /* Check if the given comparison (done in the given MODE) is actually a
3603 tautology or a contradiction.
3604 If no simplification is possible, this function returns zero.
3605 Otherwise, it returns either const_true_rtx or const0_rtx. */
3608 simplify_const_relational_operation (enum rtx_code code,
3609 enum machine_mode mode,
3612 int equal, op0lt, op0ltu, op1lt, op1ltu;
3617 gcc_assert (mode != VOIDmode
3618 || (GET_MODE (op0) == VOIDmode
3619 && GET_MODE (op1) == VOIDmode));
3621 /* If op0 is a compare, extract the comparison arguments from it. */
3622 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3624 op1 = XEXP (op0, 1);
3625 op0 = XEXP (op0, 0);
3627 if (GET_MODE (op0) != VOIDmode)
3628 mode = GET_MODE (op0);
3629 else if (GET_MODE (op1) != VOIDmode)
3630 mode = GET_MODE (op1);
3635 /* We can't simplify MODE_CC values since we don't know what the
3636 actual comparison is. */
3637 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3640 /* Make sure the constant is second. */
3641 if (swap_commutative_operands_p (op0, op1))
3643 tem = op0, op0 = op1, op1 = tem;
3644 code = swap_condition (code);
3647 trueop0 = avoid_constant_pool_reference (op0);
3648 trueop1 = avoid_constant_pool_reference (op1);
3650 /* For integer comparisons of A and B maybe we can simplify A - B and can
3651 then simplify a comparison of that with zero. If A and B are both either
3652 a register or a CONST_INT, this can't help; testing for these cases will
3653 prevent infinite recursion here and speed things up.
3655 If CODE is an unsigned comparison, then we can never do this optimization,
3656 because it gives an incorrect result if the subtraction wraps around zero.
3657 ANSI C defines unsigned operations such that they never overflow, and
3658 thus such cases can not be ignored; but we cannot do it even for
3659 signed comparisons for languages such as Java, so test flag_wrapv. */
3661 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3662 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3663 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3664 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3665 /* We cannot do this for == or != if tem is a nonzero address. */
3666 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3667 && code != GTU && code != GEU && code != LTU && code != LEU)
3668 return simplify_const_relational_operation (signed_condition (code),
3669 mode, tem, const0_rtx);
3671 if (flag_unsafe_math_optimizations && code == ORDERED)
3672 return const_true_rtx;
3674 if (flag_unsafe_math_optimizations && code == UNORDERED)
3677 /* For modes without NaNs, if the two operands are equal, we know the
3678 result except if they have side-effects. */
3679 if (! HONOR_NANS (GET_MODE (trueop0))
3680 && rtx_equal_p (trueop0, trueop1)
3681 && ! side_effects_p (trueop0))
3682 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3684 /* If the operands are floating-point constants, see if we can fold
3686 else if (GET_CODE (trueop0) == CONST_DOUBLE
3687 && GET_CODE (trueop1) == CONST_DOUBLE
3688 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3690 REAL_VALUE_TYPE d0, d1;
3692 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3693 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3695 /* Comparisons are unordered iff at least one of the values is NaN. */
3696 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3706 return const_true_rtx;
3719 equal = REAL_VALUES_EQUAL (d0, d1);
3720 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3721 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3724 /* Otherwise, see if the operands are both integers. */
3725 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3726 && (GET_CODE (trueop0) == CONST_DOUBLE
3727 || GET_CODE (trueop0) == CONST_INT)
3728 && (GET_CODE (trueop1) == CONST_DOUBLE
3729 || GET_CODE (trueop1) == CONST_INT))
3731 int width = GET_MODE_BITSIZE (mode);
3732 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3733 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3735 /* Get the two words comprising each integer constant. */
3736 if (GET_CODE (trueop0) == CONST_DOUBLE)
3738 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3739 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3743 l0u = l0s = INTVAL (trueop0);
3744 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3747 if (GET_CODE (trueop1) == CONST_DOUBLE)
3749 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3750 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3754 l1u = l1s = INTVAL (trueop1);
3755 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3758 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3759 we have to sign or zero-extend the values. */
3760 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3762 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3763 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3765 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3766 l0s |= ((HOST_WIDE_INT) (-1) << width);
3768 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3769 l1s |= ((HOST_WIDE_INT) (-1) << width);
3771 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3772 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3774 equal = (h0u == h1u && l0u == l1u);
3775 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3776 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3777 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3778 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3781 /* Otherwise, there are some code-specific tests we can make. */
3784 /* Optimize comparisons with upper and lower bounds. */
3785 if (SCALAR_INT_MODE_P (mode)
3786 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3799 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3806 /* x >= min is always true. */
3807 if (rtx_equal_p (trueop1, mmin))
3808 tem = const_true_rtx;
3814 /* x <= max is always true. */
3815 if (rtx_equal_p (trueop1, mmax))
3816 tem = const_true_rtx;
3821 /* x > max is always false. */
3822 if (rtx_equal_p (trueop1, mmax))
3828 /* x < min is always false. */
3829 if (rtx_equal_p (trueop1, mmin))
3836 if (tem == const0_rtx
3837 || tem == const_true_rtx)
3844 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3849 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3850 return const_true_rtx;
3854 /* Optimize abs(x) < 0.0. */
3855 if (trueop1 == CONST0_RTX (mode)
3856 && !HONOR_SNANS (mode)
3857 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3859 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3861 if (GET_CODE (tem) == ABS)
3867 /* Optimize abs(x) >= 0.0. */
3868 if (trueop1 == CONST0_RTX (mode)
3869 && !HONOR_NANS (mode)
3870 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3872 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3874 if (GET_CODE (tem) == ABS)
3875 return const_true_rtx;
3880 /* Optimize ! (abs(x) < 0.0). */
3881 if (trueop1 == CONST0_RTX (mode))
3883 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3885 if (GET_CODE (tem) == ABS)
3886 return const_true_rtx;
3897 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3903 return equal ? const_true_rtx : const0_rtx;
3906 return ! equal ? const_true_rtx : const0_rtx;
3909 return op0lt ? const_true_rtx : const0_rtx;
3912 return op1lt ? const_true_rtx : const0_rtx;
3914 return op0ltu ? const_true_rtx : const0_rtx;
3916 return op1ltu ? const_true_rtx : const0_rtx;
3919 return equal || op0lt ? const_true_rtx : const0_rtx;
3922 return equal || op1lt ? const_true_rtx : const0_rtx;
3924 return equal || op0ltu ? const_true_rtx : const0_rtx;
3926 return equal || op1ltu ? const_true_rtx : const0_rtx;
3928 return const_true_rtx;
3936 /* Simplify CODE, an operation with result mode MODE and three operands,
3937 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3938 a constant. Return 0 if no simplifications is possible. */
3941 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3942 enum machine_mode op0_mode, rtx op0, rtx op1,
3945 unsigned int width = GET_MODE_BITSIZE (mode);
3947 /* VOIDmode means "infinite" precision. */
3949 width = HOST_BITS_PER_WIDE_INT;
3955 if (GET_CODE (op0) == CONST_INT
3956 && GET_CODE (op1) == CONST_INT
3957 && GET_CODE (op2) == CONST_INT
3958 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3959 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3961 /* Extracting a bit-field from a constant */
3962 HOST_WIDE_INT val = INTVAL (op0);
3964 if (BITS_BIG_ENDIAN)
3965 val >>= (GET_MODE_BITSIZE (op0_mode)
3966 - INTVAL (op2) - INTVAL (op1));
3968 val >>= INTVAL (op2);
3970 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3972 /* First zero-extend. */
3973 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3974 /* If desired, propagate sign bit. */
3975 if (code == SIGN_EXTRACT
3976 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3977 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3980 /* Clear the bits that don't belong in our mode,
3981 unless they and our sign bit are all one.
3982 So we get either a reasonable negative value or a reasonable
3983 unsigned value for this mode. */
3984 if (width < HOST_BITS_PER_WIDE_INT
3985 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3986 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3987 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3989 return gen_int_mode (val, mode);
3994 if (GET_CODE (op0) == CONST_INT)
3995 return op0 != const0_rtx ? op1 : op2;
3997 /* Convert c ? a : a into "a". */
3998 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4001 /* Convert a != b ? a : b into "a". */
4002 if (GET_CODE (op0) == NE
4003 && ! side_effects_p (op0)
4004 && ! HONOR_NANS (mode)
4005 && ! HONOR_SIGNED_ZEROS (mode)
4006 && ((rtx_equal_p (XEXP (op0, 0), op1)
4007 && rtx_equal_p (XEXP (op0, 1), op2))
4008 || (rtx_equal_p (XEXP (op0, 0), op2)
4009 && rtx_equal_p (XEXP (op0, 1), op1))))
4012 /* Convert a == b ? a : b into "b". */
4013 if (GET_CODE (op0) == EQ
4014 && ! side_effects_p (op0)
4015 && ! HONOR_NANS (mode)
4016 && ! HONOR_SIGNED_ZEROS (mode)
4017 && ((rtx_equal_p (XEXP (op0, 0), op1)
4018 && rtx_equal_p (XEXP (op0, 1), op2))
4019 || (rtx_equal_p (XEXP (op0, 0), op2)
4020 && rtx_equal_p (XEXP (op0, 1), op1))))
4023 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4025 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4026 ? GET_MODE (XEXP (op0, 1))
4027 : GET_MODE (XEXP (op0, 0)));
4030 /* Look for happy constants in op1 and op2. */
4031 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4033 HOST_WIDE_INT t = INTVAL (op1);
4034 HOST_WIDE_INT f = INTVAL (op2);
4036 if (t == STORE_FLAG_VALUE && f == 0)
4037 code = GET_CODE (op0);
4038 else if (t == 0 && f == STORE_FLAG_VALUE)
4041 tmp = reversed_comparison_code (op0, NULL_RTX);
4049 return simplify_gen_relational (code, mode, cmp_mode,
4050 XEXP (op0, 0), XEXP (op0, 1));
4053 if (cmp_mode == VOIDmode)
4054 cmp_mode = op0_mode;
4055 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4056 cmp_mode, XEXP (op0, 0),
4059 /* See if any simplifications were possible. */
4062 if (GET_CODE (temp) == CONST_INT)
4063 return temp == const0_rtx ? op2 : op1;
4065 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4071 gcc_assert (GET_MODE (op0) == mode);
4072 gcc_assert (GET_MODE (op1) == mode);
4073 gcc_assert (VECTOR_MODE_P (mode));
4074 op2 = avoid_constant_pool_reference (op2);
4075 if (GET_CODE (op2) == CONST_INT)
4077 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4078 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4079 int mask = (1 << n_elts) - 1;
4081 if (!(INTVAL (op2) & mask))
4083 if ((INTVAL (op2) & mask) == mask)
4086 op0 = avoid_constant_pool_reference (op0);
4087 op1 = avoid_constant_pool_reference (op1);
4088 if (GET_CODE (op0) == CONST_VECTOR
4089 && GET_CODE (op1) == CONST_VECTOR)
4091 rtvec v = rtvec_alloc (n_elts);
4094 for (i = 0; i < n_elts; i++)
4095 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4096 ? CONST_VECTOR_ELT (op0, i)
4097 : CONST_VECTOR_ELT (op1, i));
4098 return gen_rtx_CONST_VECTOR (mode, v);
4110 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4111 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4113 Works by unpacking OP into a collection of 8-bit values
4114 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4115 and then repacking them again for OUTERMODE. */
4118 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4119 enum machine_mode innermode, unsigned int byte)
4121 /* We support up to 512-bit values (for V8DFmode). */
4125 value_mask = (1 << value_bit) - 1
4127 unsigned char value[max_bitsize / value_bit];
4136 rtvec result_v = NULL;
4137 enum mode_class outer_class;
4138 enum machine_mode outer_submode;
4140 /* Some ports misuse CCmode. */
4141 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4144 /* We have no way to represent a complex constant at the rtl level. */
4145 if (COMPLEX_MODE_P (outermode))
4148 /* Unpack the value. */
4150 if (GET_CODE (op) == CONST_VECTOR)
4152 num_elem = CONST_VECTOR_NUNITS (op);
4153 elems = &CONST_VECTOR_ELT (op, 0);
4154 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4160 elem_bitsize = max_bitsize;
4162 /* If this asserts, it is too complicated; reducing value_bit may help. */
4163 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4164 /* I don't know how to handle endianness of sub-units. */
4165 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4167 for (elem = 0; elem < num_elem; elem++)
4170 rtx el = elems[elem];
4172 /* Vectors are kept in target memory order. (This is probably
4175 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4176 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4178 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4179 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4180 unsigned bytele = (subword_byte % UNITS_PER_WORD
4181 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4182 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4185 switch (GET_CODE (el))
4189 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4191 *vp++ = INTVAL (el) >> i;
4192 /* CONST_INTs are always logically sign-extended. */
4193 for (; i < elem_bitsize; i += value_bit)
4194 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4198 if (GET_MODE (el) == VOIDmode)
4200 /* If this triggers, someone should have generated a
4201 CONST_INT instead. */
4202 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4204 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4205 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4206 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4209 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4212 /* It shouldn't matter what's done here, so fill it with
4214 for (; i < elem_bitsize; i += value_bit)
4219 long tmp[max_bitsize / 32];
4220 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4222 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4223 gcc_assert (bitsize <= elem_bitsize);
4224 gcc_assert (bitsize % value_bit == 0);
4226 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4229 /* real_to_target produces its result in words affected by
4230 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4231 and use WORDS_BIG_ENDIAN instead; see the documentation
4232 of SUBREG in rtl.texi. */
4233 for (i = 0; i < bitsize; i += value_bit)
4236 if (WORDS_BIG_ENDIAN)
4237 ibase = bitsize - 1 - i;
4240 *vp++ = tmp[ibase / 32] >> i % 32;
4243 /* It shouldn't matter what's done here, so fill it with
4245 for (; i < elem_bitsize; i += value_bit)
4255 /* Now, pick the right byte to start with. */
4256 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4257 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4258 will already have offset 0. */
4259 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4261 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4263 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4264 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4265 byte = (subword_byte % UNITS_PER_WORD
4266 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4269 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4270 so if it's become negative it will instead be very large.) */
4271 gcc_assert (byte < GET_MODE_SIZE (innermode));
4273 /* Convert from bytes to chunks of size value_bit. */
4274 value_start = byte * (BITS_PER_UNIT / value_bit);
4276 /* Re-pack the value. */
4278 if (VECTOR_MODE_P (outermode))
4280 num_elem = GET_MODE_NUNITS (outermode);
4281 result_v = rtvec_alloc (num_elem);
4282 elems = &RTVEC_ELT (result_v, 0);
4283 outer_submode = GET_MODE_INNER (outermode);
4289 outer_submode = outermode;
4292 outer_class = GET_MODE_CLASS (outer_submode);
4293 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4295 gcc_assert (elem_bitsize % value_bit == 0);
4296 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4298 for (elem = 0; elem < num_elem; elem++)
4302 /* Vectors are stored in target memory order. (This is probably
4305 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4306 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4308 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4309 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4310 unsigned bytele = (subword_byte % UNITS_PER_WORD
4311 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4312 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4315 switch (outer_class)
4318 case MODE_PARTIAL_INT:
4320 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4323 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4325 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4326 for (; i < elem_bitsize; i += value_bit)
4327 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4328 << (i - HOST_BITS_PER_WIDE_INT));
4330 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4332 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4333 elems[elem] = gen_int_mode (lo, outer_submode);
4334 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4335 elems[elem] = immed_double_const (lo, hi, outer_submode);
4342 case MODE_DECIMAL_FLOAT:
4345 long tmp[max_bitsize / 32];
4347 /* real_from_target wants its input in words affected by
4348 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4349 and use WORDS_BIG_ENDIAN instead; see the documentation
4350 of SUBREG in rtl.texi. */
4351 for (i = 0; i < max_bitsize / 32; i++)
4353 for (i = 0; i < elem_bitsize; i += value_bit)
4356 if (WORDS_BIG_ENDIAN)
4357 ibase = elem_bitsize - 1 - i;
4360 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4363 real_from_target (&r, tmp, outer_submode);
4364 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4372 if (VECTOR_MODE_P (outermode))
4373 return gen_rtx_CONST_VECTOR (outermode, result_v);
4378 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4379 Return 0 if no simplifications are possible. */
4381 simplify_subreg (enum machine_mode outermode, rtx op,
4382 enum machine_mode innermode, unsigned int byte)
4384 /* Little bit of sanity checking. */
4385 gcc_assert (innermode != VOIDmode);
4386 gcc_assert (outermode != VOIDmode);
4387 gcc_assert (innermode != BLKmode);
4388 gcc_assert (outermode != BLKmode);
4390 gcc_assert (GET_MODE (op) == innermode
4391 || GET_MODE (op) == VOIDmode);
4393 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4394 gcc_assert (byte < GET_MODE_SIZE (innermode));
4396 if (outermode == innermode && !byte)
4399 if (GET_CODE (op) == CONST_INT
4400 || GET_CODE (op) == CONST_DOUBLE
4401 || GET_CODE (op) == CONST_VECTOR)
4402 return simplify_immed_subreg (outermode, op, innermode, byte);
4404 /* Changing mode twice with SUBREG => just change it once,
4405 or not at all if changing back op starting mode. */
4406 if (GET_CODE (op) == SUBREG)
4408 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4409 int final_offset = byte + SUBREG_BYTE (op);
4412 if (outermode == innermostmode
4413 && byte == 0 && SUBREG_BYTE (op) == 0)
4414 return SUBREG_REG (op);
4416 /* The SUBREG_BYTE represents offset, as if the value were stored
4417 in memory. Irritating exception is paradoxical subreg, where
4418 we define SUBREG_BYTE to be 0. On big endian machines, this
4419 value should be negative. For a moment, undo this exception. */
4420 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4422 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4423 if (WORDS_BIG_ENDIAN)
4424 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4425 if (BYTES_BIG_ENDIAN)
4426 final_offset += difference % UNITS_PER_WORD;
4428 if (SUBREG_BYTE (op) == 0
4429 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4431 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4432 if (WORDS_BIG_ENDIAN)
4433 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4434 if (BYTES_BIG_ENDIAN)
4435 final_offset += difference % UNITS_PER_WORD;
4438 /* See whether resulting subreg will be paradoxical. */
4439 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4441 /* In nonparadoxical subregs we can't handle negative offsets. */
4442 if (final_offset < 0)
4444 /* Bail out in case resulting subreg would be incorrect. */
4445 if (final_offset % GET_MODE_SIZE (outermode)
4446 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4452 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4454 /* In paradoxical subreg, see if we are still looking on lower part.
4455 If so, our SUBREG_BYTE will be 0. */
4456 if (WORDS_BIG_ENDIAN)
4457 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4458 if (BYTES_BIG_ENDIAN)
4459 offset += difference % UNITS_PER_WORD;
4460 if (offset == final_offset)
4466 /* Recurse for further possible simplifications. */
4467 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4471 if (validate_subreg (outermode, innermostmode,
4472 SUBREG_REG (op), final_offset))
4473 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4477 /* Merge implicit and explicit truncations. */
4479 if (GET_CODE (op) == TRUNCATE
4480 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4481 && subreg_lowpart_offset (outermode, innermode) == byte)
4482 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4483 GET_MODE (XEXP (op, 0)));
4485 /* SUBREG of a hard register => just change the register number
4486 and/or mode. If the hard register is not valid in that mode,
4487 suppress this simplification. If the hard register is the stack,
4488 frame, or argument pointer, leave this as a SUBREG. */
4491 && REGNO (op) < FIRST_PSEUDO_REGISTER
4492 #ifdef CANNOT_CHANGE_MODE_CLASS
4493 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4494 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4495 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4497 && ((reload_completed && !frame_pointer_needed)
4498 || (REGNO (op) != FRAME_POINTER_REGNUM
4499 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4500 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4503 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4504 && REGNO (op) != ARG_POINTER_REGNUM
4506 && REGNO (op) != STACK_POINTER_REGNUM
4507 && subreg_offset_representable_p (REGNO (op), innermode,
4510 unsigned int regno = REGNO (op);
4511 unsigned int final_regno
4512 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4514 /* ??? We do allow it if the current REG is not valid for
4515 its mode. This is a kludge to work around how float/complex
4516 arguments are passed on 32-bit SPARC and should be fixed. */
4517 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4518 || ! HARD_REGNO_MODE_OK (regno, innermode))
4520 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
4522 /* Propagate original regno. We don't have any way to specify
4523 the offset inside original regno, so do so only for lowpart.
4524 The information is used only by alias analysis that can not
4525 grog partial register anyway. */
4527 if (subreg_lowpart_offset (outermode, innermode) == byte)
4528 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4533 /* If we have a SUBREG of a register that we are replacing and we are
4534 replacing it with a MEM, make a new MEM and try replacing the
4535 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4536 or if we would be widening it. */
4539 && ! mode_dependent_address_p (XEXP (op, 0))
4540 /* Allow splitting of volatile memory references in case we don't
4541 have instruction to move the whole thing. */
4542 && (! MEM_VOLATILE_P (op)
4543 || ! have_insn_for (SET, innermode))
4544 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4545 return adjust_address_nv (op, outermode, byte);
4547 /* Handle complex values represented as CONCAT
4548 of real and imaginary part. */
4549 if (GET_CODE (op) == CONCAT)
4551 unsigned int inner_size, final_offset;
4554 inner_size = GET_MODE_UNIT_SIZE (innermode);
4555 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4556 final_offset = byte % inner_size;
4557 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4560 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4563 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4564 return gen_rtx_SUBREG (outermode, part, final_offset);
4568 /* Optimize SUBREG truncations of zero and sign extended values. */
4569 if ((GET_CODE (op) == ZERO_EXTEND
4570 || GET_CODE (op) == SIGN_EXTEND)
4571 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4573 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4575 /* If we're requesting the lowpart of a zero or sign extension,
4576 there are three possibilities. If the outermode is the same
4577 as the origmode, we can omit both the extension and the subreg.
4578 If the outermode is not larger than the origmode, we can apply
4579 the truncation without the extension. Finally, if the outermode
4580 is larger than the origmode, but both are integer modes, we
4581 can just extend to the appropriate mode. */
4584 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4585 if (outermode == origmode)
4586 return XEXP (op, 0);
4587 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4588 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4589 subreg_lowpart_offset (outermode,
4591 if (SCALAR_INT_MODE_P (outermode))
4592 return simplify_gen_unary (GET_CODE (op), outermode,
4593 XEXP (op, 0), origmode);
4596 /* A SUBREG resulting from a zero extension may fold to zero if
4597 it extracts higher bits that the ZERO_EXTEND's source bits. */
4598 if (GET_CODE (op) == ZERO_EXTEND
4599 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4600 return CONST0_RTX (outermode);
4603 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4604 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4605 the outer subreg is effectively a truncation to the original mode. */
4606 if ((GET_CODE (op) == LSHIFTRT
4607 || GET_CODE (op) == ASHIFTRT)
4608 && SCALAR_INT_MODE_P (outermode)
4609 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4610 to avoid the possibility that an outer LSHIFTRT shifts by more
4611 than the sign extension's sign_bit_copies and introduces zeros
4612 into the high bits of the result. */
4613 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4614 && GET_CODE (XEXP (op, 1)) == CONST_INT
4615 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4616 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4617 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4618 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4619 return simplify_gen_binary (ASHIFTRT, outermode,
4620 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4622 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4623 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4624 the outer subreg is effectively a truncation to the original mode. */
4625 if ((GET_CODE (op) == LSHIFTRT
4626 || GET_CODE (op) == ASHIFTRT)
4627 && SCALAR_INT_MODE_P (outermode)
4628 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4629 && GET_CODE (XEXP (op, 1)) == CONST_INT
4630 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4631 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4632 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4633 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4634 return simplify_gen_binary (LSHIFTRT, outermode,
4635 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4637 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4638 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4639 the outer subreg is effectively a truncation to the original mode. */
4640 if (GET_CODE (op) == ASHIFT
4641 && SCALAR_INT_MODE_P (outermode)
4642 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4643 && GET_CODE (XEXP (op, 1)) == CONST_INT
4644 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4645 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4646 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4647 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4648 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4649 return simplify_gen_binary (ASHIFT, outermode,
4650 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4655 /* Make a SUBREG operation or equivalent if it folds. */
4658 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4659 enum machine_mode innermode, unsigned int byte)
4663 newx = simplify_subreg (outermode, op, innermode, byte);
4667 if (GET_CODE (op) == SUBREG
4668 || GET_CODE (op) == CONCAT
4669 || GET_MODE (op) == VOIDmode)
4672 if (validate_subreg (outermode, innermode, op, byte))
4673 return gen_rtx_SUBREG (outermode, op, byte);
4678 /* Simplify X, an rtx expression.
4680 Return the simplified expression or NULL if no simplifications
4683 This is the preferred entry point into the simplification routines;
4684 however, we still allow passes to call the more specific routines.
4686 Right now GCC has three (yes, three) major bodies of RTL simplification
4687 code that need to be unified.
4689 1. fold_rtx in cse.c. This code uses various CSE specific
4690 information to aid in RTL simplification.
4692 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4693 it uses combine specific information to aid in RTL
4696 3. The routines in this file.
4699 Long term we want to only have one body of simplification code; to
4700 get to that state I recommend the following steps:
4702 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4703 which are not pass dependent state into these routines.
4705 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4706 use this routine whenever possible.
4708 3. Allow for pass dependent state to be provided to these
4709 routines and add simplifications based on the pass dependent
4710 state. Remove code from cse.c & combine.c that becomes
4713 It will take time, but ultimately the compiler will be easier to
4714 maintain and improve. It's totally silly that when we add a
4715 simplification that it needs to be added to 4 places (3 for RTL
4716 simplification and 1 for tree simplification. */
4719 simplify_rtx (rtx x)
4721 enum rtx_code code = GET_CODE (x);
4722 enum machine_mode mode = GET_MODE (x);
4724 switch (GET_RTX_CLASS (code))
4727 return simplify_unary_operation (code, mode,
4728 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4729 case RTX_COMM_ARITH:
4730 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4731 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4733 /* Fall through.... */
4736 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4739 case RTX_BITFIELD_OPS:
4740 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4741 XEXP (x, 0), XEXP (x, 1),
4745 case RTX_COMM_COMPARE:
4746 return simplify_relational_operation (code, mode,
4747 ((GET_MODE (XEXP (x, 0))
4749 ? GET_MODE (XEXP (x, 0))
4750 : GET_MODE (XEXP (x, 1))),
4756 return simplify_gen_subreg (mode, SUBREG_REG (x),
4757 GET_MODE (SUBREG_REG (x)),
4764 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4765 if (GET_CODE (XEXP (x, 0)) == HIGH
4766 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))