1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* Put complex operands first and constants second if commutative. */
118 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
119 && swap_commutative_operands_p (op0, op1))
120 tem = op0, op0 = op1, op1 = tem;
122 /* If this simplifies, do it. */
123 tem = simplify_binary_operation (code, mode, op0, op1);
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code))
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
284 return simplify_gen_unary (code, mode, op0, op_mode);
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return simplify_gen_binary (code, mode, op0, op1);
295 case RTX_COMM_COMPARE:
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
306 case RTX_BITFIELD_OPS:
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
319 /* The only case we try to handle is a SUBREG. */
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
328 return op0 ? op0 : x;
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
375 if (GET_CODE (op) == CONST)
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
497 in2 = in1; in1 = tem;
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
559 if (GET_CODE (op) == ASHIFT)
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
590 /* We can't handle truncation to a partial integer mode here
591 because we don't know the real bitsize of the partial
593 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
596 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
597 if ((GET_CODE (op) == SIGN_EXTEND
598 || GET_CODE (op) == ZERO_EXTEND)
599 && GET_MODE (XEXP (op, 0)) == mode)
602 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
603 (OP:SI foo:SI) if OP is NEG or ABS. */
604 if ((GET_CODE (op) == ABS
605 || GET_CODE (op) == NEG)
606 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
607 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
608 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
609 return simplify_gen_unary (GET_CODE (op), mode,
610 XEXP (XEXP (op, 0), 0), mode);
612 /* (truncate:SI (subreg:DI (truncate:SI X) 0)) is
614 if (GET_CODE (op) == SUBREG
615 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
616 && subreg_lowpart_p (op))
617 return SUBREG_REG (op);
619 /* If we know that the value is already truncated, we can
620 replace the TRUNCATE with a SUBREG if TRULY_NOOP_TRUNCATION
621 is nonzero for the corresponding modes. But don't do this
622 for an (LSHIFTRT (MULT ...)) since this will cause problems
623 with the umulXi3_highpart patterns. */
624 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
625 GET_MODE_BITSIZE (GET_MODE (op)))
626 && num_sign_bit_copies (op, GET_MODE (op))
627 >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1)
628 && ! (GET_CODE (op) == LSHIFTRT
629 && GET_CODE (XEXP (op, 0)) == MULT))
630 return rtl_hooks.gen_lowpart_no_emit (mode, op);
632 /* A truncate of a comparison can be replaced with a subreg if
633 STORE_FLAG_VALUE permits. This is like the previous test,
634 but it works even if the comparison is done in a mode larger
635 than HOST_BITS_PER_WIDE_INT. */
636 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
638 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
639 return rtl_hooks.gen_lowpart_no_emit (mode, op);
643 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
644 if (GET_CODE (op) == FLOAT_EXTEND
645 && GET_MODE (XEXP (op, 0)) == mode)
648 /* (float_truncate:SF (float_truncate:DF foo:XF))
649 = (float_truncate:SF foo:XF).
650 This may eliminate double rounding, so it is unsafe.
652 (float_truncate:SF (float_extend:XF foo:DF))
653 = (float_truncate:SF foo:DF).
655 (float_truncate:DF (float_extend:XF foo:SF))
656 = (float_extend:SF foo:DF). */
657 if ((GET_CODE (op) == FLOAT_TRUNCATE
658 && flag_unsafe_math_optimizations)
659 || GET_CODE (op) == FLOAT_EXTEND)
660 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
662 > GET_MODE_SIZE (mode)
663 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
667 /* (float_truncate (float x)) is (float x) */
668 if (GET_CODE (op) == FLOAT
669 && (flag_unsafe_math_optimizations
670 || ((unsigned)significand_size (GET_MODE (op))
671 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
672 - num_sign_bit_copies (XEXP (op, 0),
673 GET_MODE (XEXP (op, 0)))))))
674 return simplify_gen_unary (FLOAT, mode,
676 GET_MODE (XEXP (op, 0)));
678 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
679 (OP:SF foo:SF) if OP is NEG or ABS. */
680 if ((GET_CODE (op) == ABS
681 || GET_CODE (op) == NEG)
682 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
683 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
684 return simplify_gen_unary (GET_CODE (op), mode,
685 XEXP (XEXP (op, 0), 0), mode);
687 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
688 is (float_truncate:SF x). */
689 if (GET_CODE (op) == SUBREG
690 && subreg_lowpart_p (op)
691 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
692 return SUBREG_REG (op);
696 /* (float_extend (float_extend x)) is (float_extend x)
698 (float_extend (float x)) is (float x) assuming that double
699 rounding can't happen.
701 if (GET_CODE (op) == FLOAT_EXTEND
702 || (GET_CODE (op) == FLOAT
703 && ((unsigned)significand_size (GET_MODE (op))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
705 - num_sign_bit_copies (XEXP (op, 0),
706 GET_MODE (XEXP (op, 0)))))))
707 return simplify_gen_unary (GET_CODE (op), mode,
709 GET_MODE (XEXP (op, 0)));
714 /* (abs (neg <foo>)) -> (abs <foo>) */
715 if (GET_CODE (op) == NEG)
716 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
717 GET_MODE (XEXP (op, 0)));
719 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
721 if (GET_MODE (op) == VOIDmode)
724 /* If operand is something known to be positive, ignore the ABS. */
725 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
726 || ((GET_MODE_BITSIZE (GET_MODE (op))
727 <= HOST_BITS_PER_WIDE_INT)
728 && ((nonzero_bits (op, GET_MODE (op))
730 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
734 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
735 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
736 return gen_rtx_NEG (mode, op);
741 /* (ffs (*_extend <X>)) = (ffs <X>) */
742 if (GET_CODE (op) == SIGN_EXTEND
743 || GET_CODE (op) == ZERO_EXTEND)
744 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
745 GET_MODE (XEXP (op, 0)));
750 /* (pop* (zero_extend <X>)) = (pop* <X>) */
751 if (GET_CODE (op) == ZERO_EXTEND)
752 return simplify_gen_unary (code, mode, XEXP (op, 0),
753 GET_MODE (XEXP (op, 0)));
757 /* (float (sign_extend <X>)) = (float <X>). */
758 if (GET_CODE (op) == SIGN_EXTEND)
759 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
760 GET_MODE (XEXP (op, 0)));
764 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
765 becomes just the MINUS if its mode is MODE. This allows
766 folding switch statements on machines using casesi (such as
768 if (GET_CODE (op) == TRUNCATE
769 && GET_MODE (XEXP (op, 0)) == mode
770 && GET_CODE (XEXP (op, 0)) == MINUS
771 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
772 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
775 /* Check for a sign extension of a subreg of a promoted
776 variable, where the promotion is sign-extended, and the
777 target mode is the same as the variable's promotion. */
778 if (GET_CODE (op) == SUBREG
779 && SUBREG_PROMOTED_VAR_P (op)
780 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
781 && GET_MODE (XEXP (op, 0)) == mode)
784 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
785 if (! POINTERS_EXTEND_UNSIGNED
786 && mode == Pmode && GET_MODE (op) == ptr_mode
788 || (GET_CODE (op) == SUBREG
789 && REG_P (SUBREG_REG (op))
790 && REG_POINTER (SUBREG_REG (op))
791 && GET_MODE (SUBREG_REG (op)) == Pmode)))
792 return convert_memory_address (Pmode, op);
797 /* Check for a zero extension of a subreg of a promoted
798 variable, where the promotion is zero-extended, and the
799 target mode is the same as the variable's promotion. */
800 if (GET_CODE (op) == SUBREG
801 && SUBREG_PROMOTED_VAR_P (op)
802 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
803 && GET_MODE (XEXP (op, 0)) == mode)
806 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
807 if (POINTERS_EXTEND_UNSIGNED > 0
808 && mode == Pmode && GET_MODE (op) == ptr_mode
810 || (GET_CODE (op) == SUBREG
811 && REG_P (SUBREG_REG (op))
812 && REG_POINTER (SUBREG_REG (op))
813 && GET_MODE (SUBREG_REG (op)) == Pmode)))
814 return convert_memory_address (Pmode, op);
825 /* Try to compute the value of a unary operation CODE whose output mode is to
826 be MODE with input operand OP whose mode was originally OP_MODE.
827 Return zero if the value cannot be computed. */
829 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
830 rtx op, enum machine_mode op_mode)
832 unsigned int width = GET_MODE_BITSIZE (mode);
834 if (code == VEC_DUPLICATE)
836 gcc_assert (VECTOR_MODE_P (mode));
837 if (GET_MODE (op) != VOIDmode)
839 if (!VECTOR_MODE_P (GET_MODE (op)))
840 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
842 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
845 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
846 || GET_CODE (op) == CONST_VECTOR)
848 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
849 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
850 rtvec v = rtvec_alloc (n_elts);
853 if (GET_CODE (op) != CONST_VECTOR)
854 for (i = 0; i < n_elts; i++)
855 RTVEC_ELT (v, i) = op;
858 enum machine_mode inmode = GET_MODE (op);
859 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
860 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
862 gcc_assert (in_n_elts < n_elts);
863 gcc_assert ((n_elts % in_n_elts) == 0);
864 for (i = 0; i < n_elts; i++)
865 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
867 return gen_rtx_CONST_VECTOR (mode, v);
871 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
873 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
874 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
875 enum machine_mode opmode = GET_MODE (op);
876 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
877 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
878 rtvec v = rtvec_alloc (n_elts);
881 gcc_assert (op_n_elts == n_elts);
882 for (i = 0; i < n_elts; i++)
884 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
885 CONST_VECTOR_ELT (op, i),
886 GET_MODE_INNER (opmode));
889 RTVEC_ELT (v, i) = x;
891 return gen_rtx_CONST_VECTOR (mode, v);
894 /* The order of these tests is critical so that, for example, we don't
895 check the wrong mode (input vs. output) for a conversion operation,
896 such as FIX. At some point, this should be simplified. */
898 if (code == FLOAT && GET_MODE (op) == VOIDmode
899 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
901 HOST_WIDE_INT hv, lv;
904 if (GET_CODE (op) == CONST_INT)
905 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
907 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
909 REAL_VALUE_FROM_INT (d, lv, hv, mode);
910 d = real_value_truncate (mode, d);
911 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
913 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
914 && (GET_CODE (op) == CONST_DOUBLE
915 || GET_CODE (op) == CONST_INT))
917 HOST_WIDE_INT hv, lv;
920 if (GET_CODE (op) == CONST_INT)
921 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
923 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
925 if (op_mode == VOIDmode)
927 /* We don't know how to interpret negative-looking numbers in
928 this case, so don't try to fold those. */
932 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
935 hv = 0, lv &= GET_MODE_MASK (op_mode);
937 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
938 d = real_value_truncate (mode, d);
939 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
942 if (GET_CODE (op) == CONST_INT
943 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
945 HOST_WIDE_INT arg0 = INTVAL (op);
959 val = (arg0 >= 0 ? arg0 : - arg0);
963 /* Don't use ffs here. Instead, get low order bit and then its
964 number. If arg0 is zero, this will return 0, as desired. */
965 arg0 &= GET_MODE_MASK (mode);
966 val = exact_log2 (arg0 & (- arg0)) + 1;
970 arg0 &= GET_MODE_MASK (mode);
971 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
974 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
978 arg0 &= GET_MODE_MASK (mode);
981 /* Even if the value at zero is undefined, we have to come
982 up with some replacement. Seems good enough. */
983 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
984 val = GET_MODE_BITSIZE (mode);
987 val = exact_log2 (arg0 & -arg0);
991 arg0 &= GET_MODE_MASK (mode);
994 val++, arg0 &= arg0 - 1;
998 arg0 &= GET_MODE_MASK (mode);
1001 val++, arg0 &= arg0 - 1;
1010 /* When zero-extending a CONST_INT, we need to know its
1012 gcc_assert (op_mode != VOIDmode);
1013 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1015 /* If we were really extending the mode,
1016 we would have to distinguish between zero-extension
1017 and sign-extension. */
1018 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1021 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1022 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1028 if (op_mode == VOIDmode)
1030 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1032 /* If we were really extending the mode,
1033 we would have to distinguish between zero-extension
1034 and sign-extension. */
1035 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1038 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1041 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1043 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1044 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1052 case FLOAT_TRUNCATE:
1061 return gen_int_mode (val, mode);
1064 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1065 for a DImode operation on a CONST_INT. */
1066 else if (GET_MODE (op) == VOIDmode
1067 && width <= HOST_BITS_PER_WIDE_INT * 2
1068 && (GET_CODE (op) == CONST_DOUBLE
1069 || GET_CODE (op) == CONST_INT))
1071 unsigned HOST_WIDE_INT l1, lv;
1072 HOST_WIDE_INT h1, hv;
1074 if (GET_CODE (op) == CONST_DOUBLE)
1075 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1077 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1087 neg_double (l1, h1, &lv, &hv);
1092 neg_double (l1, h1, &lv, &hv);
1104 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1107 lv = exact_log2 (l1 & -l1) + 1;
1113 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1114 - HOST_BITS_PER_WIDE_INT;
1116 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1117 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1118 lv = GET_MODE_BITSIZE (mode);
1124 lv = exact_log2 (l1 & -l1);
1126 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1127 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1128 lv = GET_MODE_BITSIZE (mode);
1151 /* This is just a change-of-mode, so do nothing. */
1156 gcc_assert (op_mode != VOIDmode);
1158 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1162 lv = l1 & GET_MODE_MASK (op_mode);
1166 if (op_mode == VOIDmode
1167 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1171 lv = l1 & GET_MODE_MASK (op_mode);
1172 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1173 && (lv & ((HOST_WIDE_INT) 1
1174 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1175 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1177 hv = HWI_SIGN_EXTEND (lv);
1188 return immed_double_const (lv, hv, mode);
1191 else if (GET_CODE (op) == CONST_DOUBLE
1192 && SCALAR_FLOAT_MODE_P (mode))
1194 REAL_VALUE_TYPE d, t;
1195 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1200 if (HONOR_SNANS (mode) && real_isnan (&d))
1202 real_sqrt (&t, mode, &d);
1206 d = REAL_VALUE_ABS (d);
1209 d = REAL_VALUE_NEGATE (d);
1211 case FLOAT_TRUNCATE:
1212 d = real_value_truncate (mode, d);
1215 /* All this does is change the mode. */
1218 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1225 real_to_target (tmp, &d, GET_MODE (op));
1226 for (i = 0; i < 4; i++)
1228 real_from_target (&d, tmp, mode);
1234 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1237 else if (GET_CODE (op) == CONST_DOUBLE
1238 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1239 && GET_MODE_CLASS (mode) == MODE_INT
1240 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1242 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1243 operators are intentionally left unspecified (to ease implementation
1244 by target backends), for consistency, this routine implements the
1245 same semantics for constant folding as used by the middle-end. */
1247 /* This was formerly used only for non-IEEE float.
1248 eggert@twinsun.com says it is safe for IEEE also. */
1249 HOST_WIDE_INT xh, xl, th, tl;
1250 REAL_VALUE_TYPE x, t;
1251 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1255 if (REAL_VALUE_ISNAN (x))
1258 /* Test against the signed upper bound. */
1259 if (width > HOST_BITS_PER_WIDE_INT)
1261 th = ((unsigned HOST_WIDE_INT) 1
1262 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1268 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1270 real_from_integer (&t, VOIDmode, tl, th, 0);
1271 if (REAL_VALUES_LESS (t, x))
1278 /* Test against the signed lower bound. */
1279 if (width > HOST_BITS_PER_WIDE_INT)
1281 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1287 tl = (HOST_WIDE_INT) -1 << (width - 1);
1289 real_from_integer (&t, VOIDmode, tl, th, 0);
1290 if (REAL_VALUES_LESS (x, t))
1296 REAL_VALUE_TO_INT (&xl, &xh, x);
1300 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1303 /* Test against the unsigned upper bound. */
1304 if (width == 2*HOST_BITS_PER_WIDE_INT)
1309 else if (width >= HOST_BITS_PER_WIDE_INT)
1311 th = ((unsigned HOST_WIDE_INT) 1
1312 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1318 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1320 real_from_integer (&t, VOIDmode, tl, th, 1);
1321 if (REAL_VALUES_LESS (t, x))
1328 REAL_VALUE_TO_INT (&xl, &xh, x);
1334 return immed_double_const (xl, xh, mode);
1340 /* Subroutine of simplify_binary_operation to simplify a commutative,
1341 associative binary operation CODE with result mode MODE, operating
1342 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1343 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1344 canonicalization is possible. */
1347 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1352 /* Linearize the operator to the left. */
1353 if (GET_CODE (op1) == code)
1355 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1356 if (GET_CODE (op0) == code)
1358 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1359 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1362 /* "a op (b op c)" becomes "(b op c) op a". */
1363 if (! swap_commutative_operands_p (op1, op0))
1364 return simplify_gen_binary (code, mode, op1, op0);
1371 if (GET_CODE (op0) == code)
1373 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1374 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1376 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1377 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1380 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1381 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1382 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1383 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1385 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1387 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1388 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1389 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1390 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1392 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1399 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1400 and OP1. Return 0 if no simplification is possible.
1402 Don't use this for relational operations such as EQ or LT.
1403 Use simplify_relational_operation instead. */
1405 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1408 rtx trueop0, trueop1;
1411 /* Relational operations don't work here. We must know the mode
1412 of the operands in order to do the comparison correctly.
1413 Assuming a full word can give incorrect results.
1414 Consider comparing 128 with -128 in QImode. */
1415 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1416 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1418 /* Make sure the constant is second. */
1419 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1420 && swap_commutative_operands_p (op0, op1))
1422 tem = op0, op0 = op1, op1 = tem;
1425 trueop0 = avoid_constant_pool_reference (op0);
1426 trueop1 = avoid_constant_pool_reference (op1);
1428 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1431 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1435 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1436 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1438 rtx tem, reversed, opleft, opright;
1440 unsigned int width = GET_MODE_BITSIZE (mode);
1442 /* Even if we can't compute a constant result,
1443 there are some cases worth simplifying. */
1448 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1449 when x is NaN, infinite, or finite and nonzero. They aren't
1450 when x is -0 and the rounding mode is not towards -infinity,
1451 since (-0) + 0 is then 0. */
1452 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1455 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1456 transformations are safe even for IEEE. */
1457 if (GET_CODE (op0) == NEG)
1458 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1459 else if (GET_CODE (op1) == NEG)
1460 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1462 /* (~a) + 1 -> -a */
1463 if (INTEGRAL_MODE_P (mode)
1464 && GET_CODE (op0) == NOT
1465 && trueop1 == const1_rtx)
1466 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1468 /* Handle both-operands-constant cases. We can only add
1469 CONST_INTs to constants since the sum of relocatable symbols
1470 can't be handled by most assemblers. Don't add CONST_INT
1471 to CONST_INT since overflow won't be computed properly if wider
1472 than HOST_BITS_PER_WIDE_INT. */
1474 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1475 && GET_CODE (op1) == CONST_INT)
1476 return plus_constant (op0, INTVAL (op1));
1477 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1478 && GET_CODE (op0) == CONST_INT)
1479 return plus_constant (op1, INTVAL (op0));
1481 /* See if this is something like X * C - X or vice versa or
1482 if the multiplication is written as a shift. If so, we can
1483 distribute and make a new multiply, shift, or maybe just
1484 have X (if C is 2 in the example above). But don't make
1485 something more expensive than we had before. */
1487 if (SCALAR_INT_MODE_P (mode))
1489 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1490 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1491 rtx lhs = op0, rhs = op1;
1493 if (GET_CODE (lhs) == NEG)
1497 lhs = XEXP (lhs, 0);
1499 else if (GET_CODE (lhs) == MULT
1500 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1502 coeff0l = INTVAL (XEXP (lhs, 1));
1503 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1504 lhs = XEXP (lhs, 0);
1506 else if (GET_CODE (lhs) == ASHIFT
1507 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1508 && INTVAL (XEXP (lhs, 1)) >= 0
1509 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1511 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1513 lhs = XEXP (lhs, 0);
1516 if (GET_CODE (rhs) == NEG)
1520 rhs = XEXP (rhs, 0);
1522 else if (GET_CODE (rhs) == MULT
1523 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1525 coeff1l = INTVAL (XEXP (rhs, 1));
1526 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1527 rhs = XEXP (rhs, 0);
1529 else if (GET_CODE (rhs) == ASHIFT
1530 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1531 && INTVAL (XEXP (rhs, 1)) >= 0
1532 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1534 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1536 rhs = XEXP (rhs, 0);
1539 if (rtx_equal_p (lhs, rhs))
1541 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1543 unsigned HOST_WIDE_INT l;
1546 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1547 coeff = immed_double_const (l, h, mode);
1549 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1550 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1555 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1556 if ((GET_CODE (op1) == CONST_INT
1557 || GET_CODE (op1) == CONST_DOUBLE)
1558 && GET_CODE (op0) == XOR
1559 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1560 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1561 && mode_signbit_p (mode, op1))
1562 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1563 simplify_gen_binary (XOR, mode, op1,
1566 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1567 if (GET_CODE (op0) == MULT
1568 && GET_CODE (XEXP (op0, 0)) == NEG)
1572 in1 = XEXP (XEXP (op0, 0), 0);
1573 in2 = XEXP (op0, 1);
1574 return simplify_gen_binary (MINUS, mode, op1,
1575 simplify_gen_binary (MULT, mode,
1579 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1580 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1582 if (COMPARISON_P (op0)
1583 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1584 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1585 && (reversed = reversed_comparison (op0, mode)))
1587 simplify_gen_unary (NEG, mode, reversed, mode);
1589 /* If one of the operands is a PLUS or a MINUS, see if we can
1590 simplify this by the associative law.
1591 Don't use the associative law for floating point.
1592 The inaccuracy makes it nonassociative,
1593 and subtle programs can break if operations are associated. */
1595 if (INTEGRAL_MODE_P (mode)
1596 && (plus_minus_operand_p (op0)
1597 || plus_minus_operand_p (op1))
1598 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1601 /* Reassociate floating point addition only when the user
1602 specifies unsafe math optimizations. */
1603 if (FLOAT_MODE_P (mode)
1604 && flag_unsafe_math_optimizations)
1606 tem = simplify_associative_operation (code, mode, op0, op1);
1614 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1615 using cc0, in which case we want to leave it as a COMPARE
1616 so we can distinguish it from a register-register-copy.
1618 In IEEE floating point, x-0 is not the same as x. */
1620 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1621 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1622 && trueop1 == CONST0_RTX (mode))
1626 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1627 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1628 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1629 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1631 rtx xop00 = XEXP (op0, 0);
1632 rtx xop10 = XEXP (op1, 0);
1635 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1637 if (REG_P (xop00) && REG_P (xop10)
1638 && GET_MODE (xop00) == GET_MODE (xop10)
1639 && REGNO (xop00) == REGNO (xop10)
1640 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1641 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1648 /* We can't assume x-x is 0 even with non-IEEE floating point,
1649 but since it is zero except in very strange circumstances, we
1650 will treat it as zero with -funsafe-math-optimizations. */
1651 if (rtx_equal_p (trueop0, trueop1)
1652 && ! side_effects_p (op0)
1653 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1654 return CONST0_RTX (mode);
1656 /* Change subtraction from zero into negation. (0 - x) is the
1657 same as -x when x is NaN, infinite, or finite and nonzero.
1658 But if the mode has signed zeros, and does not round towards
1659 -infinity, then 0 - 0 is 0, not -0. */
1660 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1661 return simplify_gen_unary (NEG, mode, op1, mode);
1663 /* (-1 - a) is ~a. */
1664 if (trueop0 == constm1_rtx)
1665 return simplify_gen_unary (NOT, mode, op1, mode);
1667 /* Subtracting 0 has no effect unless the mode has signed zeros
1668 and supports rounding towards -infinity. In such a case,
1670 if (!(HONOR_SIGNED_ZEROS (mode)
1671 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1672 && trueop1 == CONST0_RTX (mode))
1675 /* See if this is something like X * C - X or vice versa or
1676 if the multiplication is written as a shift. If so, we can
1677 distribute and make a new multiply, shift, or maybe just
1678 have X (if C is 2 in the example above). But don't make
1679 something more expensive than we had before. */
1681 if (SCALAR_INT_MODE_P (mode))
1683 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1684 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1685 rtx lhs = op0, rhs = op1;
1687 if (GET_CODE (lhs) == NEG)
1691 lhs = XEXP (lhs, 0);
1693 else if (GET_CODE (lhs) == MULT
1694 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1696 coeff0l = INTVAL (XEXP (lhs, 1));
1697 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1698 lhs = XEXP (lhs, 0);
1700 else if (GET_CODE (lhs) == ASHIFT
1701 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1702 && INTVAL (XEXP (lhs, 1)) >= 0
1703 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1705 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1707 lhs = XEXP (lhs, 0);
1710 if (GET_CODE (rhs) == NEG)
1714 rhs = XEXP (rhs, 0);
1716 else if (GET_CODE (rhs) == MULT
1717 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1719 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1720 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1721 rhs = XEXP (rhs, 0);
1723 else if (GET_CODE (rhs) == ASHIFT
1724 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1725 && INTVAL (XEXP (rhs, 1)) >= 0
1726 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1728 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1730 rhs = XEXP (rhs, 0);
1733 if (rtx_equal_p (lhs, rhs))
1735 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1737 unsigned HOST_WIDE_INT l;
1740 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1741 coeff = immed_double_const (l, h, mode);
1743 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1744 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1749 /* (a - (-b)) -> (a + b). True even for IEEE. */
1750 if (GET_CODE (op1) == NEG)
1751 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1753 /* (-x - c) may be simplified as (-c - x). */
1754 if (GET_CODE (op0) == NEG
1755 && (GET_CODE (op1) == CONST_INT
1756 || GET_CODE (op1) == CONST_DOUBLE))
1758 tem = simplify_unary_operation (NEG, mode, op1, mode);
1760 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1763 /* Don't let a relocatable value get a negative coeff. */
1764 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1765 return simplify_gen_binary (PLUS, mode,
1767 neg_const_int (mode, op1));
1769 /* (x - (x & y)) -> (x & ~y) */
1770 if (GET_CODE (op1) == AND)
1772 if (rtx_equal_p (op0, XEXP (op1, 0)))
1774 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1775 GET_MODE (XEXP (op1, 1)));
1776 return simplify_gen_binary (AND, mode, op0, tem);
1778 if (rtx_equal_p (op0, XEXP (op1, 1)))
1780 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1781 GET_MODE (XEXP (op1, 0)));
1782 return simplify_gen_binary (AND, mode, op0, tem);
1786 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1787 by reversing the comparison code if valid. */
1788 if (STORE_FLAG_VALUE == 1
1789 && trueop0 == const1_rtx
1790 && COMPARISON_P (op1)
1791 && (reversed = reversed_comparison (op1, mode)))
1794 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1795 if (GET_CODE (op1) == MULT
1796 && GET_CODE (XEXP (op1, 0)) == NEG)
1800 in1 = XEXP (XEXP (op1, 0), 0);
1801 in2 = XEXP (op1, 1);
1802 return simplify_gen_binary (PLUS, mode,
1803 simplify_gen_binary (MULT, mode,
1808 /* Canonicalize (minus (neg A) (mult B C)) to
1809 (minus (mult (neg B) C) A). */
1810 if (GET_CODE (op1) == MULT
1811 && GET_CODE (op0) == NEG)
1815 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1816 in2 = XEXP (op1, 1);
1817 return simplify_gen_binary (MINUS, mode,
1818 simplify_gen_binary (MULT, mode,
1823 /* If one of the operands is a PLUS or a MINUS, see if we can
1824 simplify this by the associative law. This will, for example,
1825 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1826 Don't use the associative law for floating point.
1827 The inaccuracy makes it nonassociative,
1828 and subtle programs can break if operations are associated. */
1830 if (INTEGRAL_MODE_P (mode)
1831 && (plus_minus_operand_p (op0)
1832 || plus_minus_operand_p (op1))
1833 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1838 if (trueop1 == constm1_rtx)
1839 return simplify_gen_unary (NEG, mode, op0, mode);
1841 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1842 x is NaN, since x * 0 is then also NaN. Nor is it valid
1843 when the mode has signed zeros, since multiplying a negative
1844 number by 0 will give -0, not 0. */
1845 if (!HONOR_NANS (mode)
1846 && !HONOR_SIGNED_ZEROS (mode)
1847 && trueop1 == CONST0_RTX (mode)
1848 && ! side_effects_p (op0))
1851 /* In IEEE floating point, x*1 is not equivalent to x for
1853 if (!HONOR_SNANS (mode)
1854 && trueop1 == CONST1_RTX (mode))
1857 /* Convert multiply by constant power of two into shift unless
1858 we are still generating RTL. This test is a kludge. */
1859 if (GET_CODE (trueop1) == CONST_INT
1860 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1861 /* If the mode is larger than the host word size, and the
1862 uppermost bit is set, then this isn't a power of two due
1863 to implicit sign extension. */
1864 && (width <= HOST_BITS_PER_WIDE_INT
1865 || val != HOST_BITS_PER_WIDE_INT - 1))
1866 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1868 /* Likewise for multipliers wider than a word. */
1869 else if (GET_CODE (trueop1) == CONST_DOUBLE
1870 && (GET_MODE (trueop1) == VOIDmode
1871 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1872 && GET_MODE (op0) == mode
1873 && CONST_DOUBLE_LOW (trueop1) == 0
1874 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1875 return simplify_gen_binary (ASHIFT, mode, op0,
1876 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1878 /* x*2 is x+x and x*(-1) is -x */
1879 if (GET_CODE (trueop1) == CONST_DOUBLE
1880 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1881 && GET_MODE (op0) == mode)
1884 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1886 if (REAL_VALUES_EQUAL (d, dconst2))
1887 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1889 if (REAL_VALUES_EQUAL (d, dconstm1))
1890 return simplify_gen_unary (NEG, mode, op0, mode);
1893 /* Reassociate multiplication, but for floating point MULTs
1894 only when the user specifies unsafe math optimizations. */
1895 if (! FLOAT_MODE_P (mode)
1896 || flag_unsafe_math_optimizations)
1898 tem = simplify_associative_operation (code, mode, op0, op1);
1905 if (trueop1 == const0_rtx)
1907 if (GET_CODE (trueop1) == CONST_INT
1908 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1909 == GET_MODE_MASK (mode)))
1911 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1913 /* A | (~A) -> -1 */
1914 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1915 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1916 && ! side_effects_p (op0)
1917 && SCALAR_INT_MODE_P (mode))
1920 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1921 if (GET_CODE (op1) == CONST_INT
1922 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1923 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1926 /* Convert (A & B) | A to A. */
1927 if (GET_CODE (op0) == AND
1928 && (rtx_equal_p (XEXP (op0, 0), op1)
1929 || rtx_equal_p (XEXP (op0, 1), op1))
1930 && ! side_effects_p (XEXP (op0, 0))
1931 && ! side_effects_p (XEXP (op0, 1)))
1934 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1935 mode size to (rotate A CX). */
1937 if (GET_CODE (op1) == ASHIFT
1938 || GET_CODE (op1) == SUBREG)
1949 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
1950 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
1951 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
1952 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1953 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
1954 == GET_MODE_BITSIZE (mode)))
1955 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
1957 /* Same, but for ashift that has been "simplified" to a wider mode
1958 by simplify_shift_const. */
1960 if (GET_CODE (opleft) == SUBREG
1961 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
1962 && GET_CODE (opright) == LSHIFTRT
1963 && GET_CODE (XEXP (opright, 0)) == SUBREG
1964 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
1965 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
1966 && (GET_MODE_SIZE (GET_MODE (opleft))
1967 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
1968 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
1969 SUBREG_REG (XEXP (opright, 0)))
1970 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
1971 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1972 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
1973 == GET_MODE_BITSIZE (mode)))
1974 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
1975 XEXP (SUBREG_REG (opleft), 1));
1977 /* If we have (ior (and (X C1) C2)), simplify this by making
1978 C1 as small as possible if C1 actually changes. */
1979 if (GET_CODE (op1) == CONST_INT
1980 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1981 || INTVAL (op1) > 0)
1982 && GET_CODE (op0) == AND
1983 && GET_CODE (XEXP (op0, 1)) == CONST_INT
1984 && GET_CODE (op1) == CONST_INT
1985 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
1986 return simplify_gen_binary (IOR, mode,
1988 (AND, mode, XEXP (op0, 0),
1989 GEN_INT (INTVAL (XEXP (op0, 1))
1993 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
1994 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
1995 the PLUS does not affect any of the bits in OP1: then we can do
1996 the IOR as a PLUS and we can associate. This is valid if OP1
1997 can be safely shifted left C bits. */
1998 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
1999 && GET_CODE (XEXP (op0, 0)) == PLUS
2000 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2001 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2002 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2004 int count = INTVAL (XEXP (op0, 1));
2005 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2007 if (mask >> count == INTVAL (trueop1)
2008 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2009 return simplify_gen_binary (ASHIFTRT, mode,
2010 plus_constant (XEXP (op0, 0), mask),
2014 tem = simplify_associative_operation (code, mode, op0, op1);
2020 if (trueop1 == const0_rtx)
2022 if (GET_CODE (trueop1) == CONST_INT
2023 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2024 == GET_MODE_MASK (mode)))
2025 return simplify_gen_unary (NOT, mode, op0, mode);
2026 if (rtx_equal_p (trueop0, trueop1)
2027 && ! side_effects_p (op0)
2028 && GET_MODE_CLASS (mode) != MODE_CC)
2029 return CONST0_RTX (mode);
2031 /* Canonicalize XOR of the most significant bit to PLUS. */
2032 if ((GET_CODE (op1) == CONST_INT
2033 || GET_CODE (op1) == CONST_DOUBLE)
2034 && mode_signbit_p (mode, op1))
2035 return simplify_gen_binary (PLUS, mode, op0, op1);
2036 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2037 if ((GET_CODE (op1) == CONST_INT
2038 || GET_CODE (op1) == CONST_DOUBLE)
2039 && GET_CODE (op0) == PLUS
2040 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2041 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2042 && mode_signbit_p (mode, XEXP (op0, 1)))
2043 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2044 simplify_gen_binary (XOR, mode, op1,
2047 /* If we are XORing two things that have no bits in common,
2048 convert them into an IOR. This helps to detect rotation encoded
2049 using those methods and possibly other simplifications. */
2051 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2052 && (nonzero_bits (op0, mode)
2053 & nonzero_bits (op1, mode)) == 0)
2054 return (simplify_gen_binary (IOR, mode, op0, op1));
2056 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2057 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2060 int num_negated = 0;
2062 if (GET_CODE (op0) == NOT)
2063 num_negated++, op0 = XEXP (op0, 0);
2064 if (GET_CODE (op1) == NOT)
2065 num_negated++, op1 = XEXP (op1, 0);
2067 if (num_negated == 2)
2068 return simplify_gen_binary (XOR, mode, op0, op1);
2069 else if (num_negated == 1)
2070 return simplify_gen_unary (NOT, mode,
2071 simplify_gen_binary (XOR, mode, op0, op1),
2075 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2076 correspond to a machine insn or result in further simplifications
2077 if B is a constant. */
2079 if (GET_CODE (op0) == AND
2080 && rtx_equal_p (XEXP (op0, 1), op1)
2081 && ! side_effects_p (op1))
2082 return simplify_gen_binary (AND, mode,
2083 simplify_gen_unary (NOT, mode,
2084 XEXP (op0, 0), mode),
2087 else if (GET_CODE (op0) == AND
2088 && rtx_equal_p (XEXP (op0, 0), op1)
2089 && ! side_effects_p (op1))
2090 return simplify_gen_binary (AND, mode,
2091 simplify_gen_unary (NOT, mode,
2092 XEXP (op0, 1), mode),
2095 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2096 comparison if STORE_FLAG_VALUE is 1. */
2097 if (STORE_FLAG_VALUE == 1
2098 && trueop1 == const1_rtx
2099 && COMPARISON_P (op0)
2100 && (reversed = reversed_comparison (op0, mode)))
2103 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2104 is (lt foo (const_int 0)), so we can perform the above
2105 simplification if STORE_FLAG_VALUE is 1. */
2107 if (STORE_FLAG_VALUE == 1
2108 && trueop1 == const1_rtx
2109 && GET_CODE (op0) == LSHIFTRT
2110 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2111 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2112 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2114 /* (xor (comparison foo bar) (const_int sign-bit))
2115 when STORE_FLAG_VALUE is the sign bit. */
2116 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2117 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2118 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2119 && trueop1 == const_true_rtx
2120 && COMPARISON_P (op0)
2121 && (reversed = reversed_comparison (op0, mode)))
2126 tem = simplify_associative_operation (code, mode, op0, op1);
2132 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2134 /* If we are turning off bits already known off in OP0, we need
2136 if (GET_CODE (trueop1) == CONST_INT
2137 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2138 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2140 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2141 && GET_MODE_CLASS (mode) != MODE_CC)
2144 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2145 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2146 && ! side_effects_p (op0)
2147 && GET_MODE_CLASS (mode) != MODE_CC)
2148 return CONST0_RTX (mode);
2150 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2151 there are no nonzero bits of C outside of X's mode. */
2152 if ((GET_CODE (op0) == SIGN_EXTEND
2153 || GET_CODE (op0) == ZERO_EXTEND)
2154 && GET_CODE (trueop1) == CONST_INT
2155 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2156 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2157 & INTVAL (trueop1)) == 0)
2159 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2160 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2161 gen_int_mode (INTVAL (trueop1),
2163 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2166 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2167 insn (and may simplify more). */
2168 if (GET_CODE (op0) == XOR
2169 && rtx_equal_p (XEXP (op0, 0), op1)
2170 && ! side_effects_p (op1))
2171 return simplify_gen_binary (AND, mode,
2172 simplify_gen_unary (NOT, mode,
2173 XEXP (op0, 1), mode),
2176 if (GET_CODE (op0) == XOR
2177 && rtx_equal_p (XEXP (op0, 1), op1)
2178 && ! side_effects_p (op1))
2179 return simplify_gen_binary (AND, mode,
2180 simplify_gen_unary (NOT, mode,
2181 XEXP (op0, 0), mode),
2184 /* Similarly for (~(A ^ B)) & A. */
2185 if (GET_CODE (op0) == NOT
2186 && GET_CODE (XEXP (op0, 0)) == XOR
2187 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2188 && ! side_effects_p (op1))
2189 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2191 if (GET_CODE (op0) == NOT
2192 && GET_CODE (XEXP (op0, 0)) == XOR
2193 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2194 && ! side_effects_p (op1))
2195 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2197 /* Convert (A | B) & A to A. */
2198 if (GET_CODE (op0) == IOR
2199 && (rtx_equal_p (XEXP (op0, 0), op1)
2200 || rtx_equal_p (XEXP (op0, 1), op1))
2201 && ! side_effects_p (XEXP (op0, 0))
2202 && ! side_effects_p (XEXP (op0, 1)))
2205 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2206 ((A & N) + B) & M -> (A + B) & M
2207 Similarly if (N & M) == 0,
2208 ((A | N) + B) & M -> (A + B) & M
2209 and for - instead of + and/or ^ instead of |. */
2210 if (GET_CODE (trueop1) == CONST_INT
2211 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2212 && ~INTVAL (trueop1)
2213 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2214 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2219 pmop[0] = XEXP (op0, 0);
2220 pmop[1] = XEXP (op0, 1);
2222 for (which = 0; which < 2; which++)
2225 switch (GET_CODE (tem))
2228 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2229 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2230 == INTVAL (trueop1))
2231 pmop[which] = XEXP (tem, 0);
2235 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2236 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2237 pmop[which] = XEXP (tem, 0);
2244 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2246 tem = simplify_gen_binary (GET_CODE (op0), mode,
2248 return simplify_gen_binary (code, mode, tem, op1);
2251 tem = simplify_associative_operation (code, mode, op0, op1);
2257 /* 0/x is 0 (or x&0 if x has side-effects). */
2258 if (trueop0 == CONST0_RTX (mode))
2260 if (side_effects_p (op1))
2261 return simplify_gen_binary (AND, mode, op1, trueop0);
2265 if (trueop1 == CONST1_RTX (mode))
2266 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2267 /* Convert divide by power of two into shift. */
2268 if (GET_CODE (trueop1) == CONST_INT
2269 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2270 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2274 /* Handle floating point and integers separately. */
2275 if (SCALAR_FLOAT_MODE_P (mode))
2277 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2278 safe for modes with NaNs, since 0.0 / 0.0 will then be
2279 NaN rather than 0.0. Nor is it safe for modes with signed
2280 zeros, since dividing 0 by a negative number gives -0.0 */
2281 if (trueop0 == CONST0_RTX (mode)
2282 && !HONOR_NANS (mode)
2283 && !HONOR_SIGNED_ZEROS (mode)
2284 && ! side_effects_p (op1))
2287 if (trueop1 == CONST1_RTX (mode)
2288 && !HONOR_SNANS (mode))
2291 if (GET_CODE (trueop1) == CONST_DOUBLE
2292 && trueop1 != CONST0_RTX (mode))
2295 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2298 if (REAL_VALUES_EQUAL (d, dconstm1)
2299 && !HONOR_SNANS (mode))
2300 return simplify_gen_unary (NEG, mode, op0, mode);
2302 /* Change FP division by a constant into multiplication.
2303 Only do this with -funsafe-math-optimizations. */
2304 if (flag_unsafe_math_optimizations
2305 && !REAL_VALUES_EQUAL (d, dconst0))
2307 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2308 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2309 return simplify_gen_binary (MULT, mode, op0, tem);
2315 /* 0/x is 0 (or x&0 if x has side-effects). */
2316 if (trueop0 == CONST0_RTX (mode))
2318 if (side_effects_p (op1))
2319 return simplify_gen_binary (AND, mode, op1, trueop0);
2323 if (trueop1 == CONST1_RTX (mode))
2324 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2326 if (trueop1 == constm1_rtx)
2328 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2329 return simplify_gen_unary (NEG, mode, x, mode);
2335 /* 0%x is 0 (or x&0 if x has side-effects). */
2336 if (trueop0 == CONST0_RTX (mode))
2338 if (side_effects_p (op1))
2339 return simplify_gen_binary (AND, mode, op1, trueop0);
2342 /* x%1 is 0 (of x&0 if x has side-effects). */
2343 if (trueop1 == CONST1_RTX (mode))
2345 if (side_effects_p (op0))
2346 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2347 return CONST0_RTX (mode);
2349 /* Implement modulus by power of two as AND. */
2350 if (GET_CODE (trueop1) == CONST_INT
2351 && exact_log2 (INTVAL (trueop1)) > 0)
2352 return simplify_gen_binary (AND, mode, op0,
2353 GEN_INT (INTVAL (op1) - 1));
2357 /* 0%x is 0 (or x&0 if x has side-effects). */
2358 if (trueop0 == CONST0_RTX (mode))
2360 if (side_effects_p (op1))
2361 return simplify_gen_binary (AND, mode, op1, trueop0);
2364 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2365 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2367 if (side_effects_p (op0))
2368 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2369 return CONST0_RTX (mode);
2376 /* Rotating ~0 always results in ~0. */
2377 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2378 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2379 && ! side_effects_p (op1))
2382 /* Fall through.... */
2386 if (trueop1 == CONST0_RTX (mode))
2388 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2393 if (width <= HOST_BITS_PER_WIDE_INT
2394 && GET_CODE (trueop1) == CONST_INT
2395 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2396 && ! side_effects_p (op0))
2398 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2400 tem = simplify_associative_operation (code, mode, op0, op1);
2406 if (width <= HOST_BITS_PER_WIDE_INT
2407 && GET_CODE (trueop1) == CONST_INT
2408 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2409 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2410 && ! side_effects_p (op0))
2412 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2414 tem = simplify_associative_operation (code, mode, op0, op1);
2420 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2422 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2424 tem = simplify_associative_operation (code, mode, op0, op1);
2430 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2432 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2434 tem = simplify_associative_operation (code, mode, op0, op1);
2443 /* ??? There are simplifications that can be done. */
2447 if (!VECTOR_MODE_P (mode))
2449 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2450 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2451 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2452 gcc_assert (XVECLEN (trueop1, 0) == 1);
2453 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2455 if (GET_CODE (trueop0) == CONST_VECTOR)
2456 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2461 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2462 gcc_assert (GET_MODE_INNER (mode)
2463 == GET_MODE_INNER (GET_MODE (trueop0)));
2464 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2466 if (GET_CODE (trueop0) == CONST_VECTOR)
2468 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2469 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2470 rtvec v = rtvec_alloc (n_elts);
2473 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2474 for (i = 0; i < n_elts; i++)
2476 rtx x = XVECEXP (trueop1, 0, i);
2478 gcc_assert (GET_CODE (x) == CONST_INT);
2479 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2483 return gen_rtx_CONST_VECTOR (mode, v);
2487 if (XVECLEN (trueop1, 0) == 1
2488 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2489 && GET_CODE (trueop0) == VEC_CONCAT)
2492 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2494 /* Try to find the element in the VEC_CONCAT. */
2495 while (GET_MODE (vec) != mode
2496 && GET_CODE (vec) == VEC_CONCAT)
2498 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2499 if (offset < vec_size)
2500 vec = XEXP (vec, 0);
2504 vec = XEXP (vec, 1);
2506 vec = avoid_constant_pool_reference (vec);
2509 if (GET_MODE (vec) == mode)
2516 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2517 ? GET_MODE (trueop0)
2518 : GET_MODE_INNER (mode));
2519 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2520 ? GET_MODE (trueop1)
2521 : GET_MODE_INNER (mode));
2523 gcc_assert (VECTOR_MODE_P (mode));
2524 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2525 == GET_MODE_SIZE (mode));
2527 if (VECTOR_MODE_P (op0_mode))
2528 gcc_assert (GET_MODE_INNER (mode)
2529 == GET_MODE_INNER (op0_mode));
2531 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2533 if (VECTOR_MODE_P (op1_mode))
2534 gcc_assert (GET_MODE_INNER (mode)
2535 == GET_MODE_INNER (op1_mode));
2537 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2539 if ((GET_CODE (trueop0) == CONST_VECTOR
2540 || GET_CODE (trueop0) == CONST_INT
2541 || GET_CODE (trueop0) == CONST_DOUBLE)
2542 && (GET_CODE (trueop1) == CONST_VECTOR
2543 || GET_CODE (trueop1) == CONST_INT
2544 || GET_CODE (trueop1) == CONST_DOUBLE))
2546 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2547 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2548 rtvec v = rtvec_alloc (n_elts);
2550 unsigned in_n_elts = 1;
2552 if (VECTOR_MODE_P (op0_mode))
2553 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2554 for (i = 0; i < n_elts; i++)
2558 if (!VECTOR_MODE_P (op0_mode))
2559 RTVEC_ELT (v, i) = trueop0;
2561 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2565 if (!VECTOR_MODE_P (op1_mode))
2566 RTVEC_ELT (v, i) = trueop1;
2568 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2573 return gen_rtx_CONST_VECTOR (mode, v);
2586 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2589 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2591 unsigned int width = GET_MODE_BITSIZE (mode);
2593 if (VECTOR_MODE_P (mode)
2594 && code != VEC_CONCAT
2595 && GET_CODE (op0) == CONST_VECTOR
2596 && GET_CODE (op1) == CONST_VECTOR)
2598 unsigned n_elts = GET_MODE_NUNITS (mode);
2599 enum machine_mode op0mode = GET_MODE (op0);
2600 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2601 enum machine_mode op1mode = GET_MODE (op1);
2602 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2603 rtvec v = rtvec_alloc (n_elts);
2606 gcc_assert (op0_n_elts == n_elts);
2607 gcc_assert (op1_n_elts == n_elts);
2608 for (i = 0; i < n_elts; i++)
2610 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2611 CONST_VECTOR_ELT (op0, i),
2612 CONST_VECTOR_ELT (op1, i));
2615 RTVEC_ELT (v, i) = x;
2618 return gen_rtx_CONST_VECTOR (mode, v);
2621 if (VECTOR_MODE_P (mode)
2622 && code == VEC_CONCAT
2623 && CONSTANT_P (op0) && CONSTANT_P (op1))
2625 unsigned n_elts = GET_MODE_NUNITS (mode);
2626 rtvec v = rtvec_alloc (n_elts);
2628 gcc_assert (n_elts >= 2);
2631 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2632 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2634 RTVEC_ELT (v, 0) = op0;
2635 RTVEC_ELT (v, 1) = op1;
2639 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2640 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2643 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2644 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2645 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2647 for (i = 0; i < op0_n_elts; ++i)
2648 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2649 for (i = 0; i < op1_n_elts; ++i)
2650 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2653 return gen_rtx_CONST_VECTOR (mode, v);
2656 if (SCALAR_FLOAT_MODE_P (mode)
2657 && GET_CODE (op0) == CONST_DOUBLE
2658 && GET_CODE (op1) == CONST_DOUBLE
2659 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2670 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2672 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2674 for (i = 0; i < 4; i++)
2691 real_from_target (&r, tmp0, mode);
2692 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2696 REAL_VALUE_TYPE f0, f1, value, result;
2699 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2700 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2701 real_convert (&f0, mode, &f0);
2702 real_convert (&f1, mode, &f1);
2704 if (HONOR_SNANS (mode)
2705 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2709 && REAL_VALUES_EQUAL (f1, dconst0)
2710 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2713 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2714 && flag_trapping_math
2715 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2717 int s0 = REAL_VALUE_NEGATIVE (f0);
2718 int s1 = REAL_VALUE_NEGATIVE (f1);
2723 /* Inf + -Inf = NaN plus exception. */
2728 /* Inf - Inf = NaN plus exception. */
2733 /* Inf / Inf = NaN plus exception. */
2740 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2741 && flag_trapping_math
2742 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2743 || (REAL_VALUE_ISINF (f1)
2744 && REAL_VALUES_EQUAL (f0, dconst0))))
2745 /* Inf * 0 = NaN plus exception. */
2748 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2750 real_convert (&result, mode, &value);
2752 /* Don't constant fold this floating point operation if
2753 the result has overflowed and flag_trapping_math. */
2755 if (flag_trapping_math
2756 && MODE_HAS_INFINITIES (mode)
2757 && REAL_VALUE_ISINF (result)
2758 && !REAL_VALUE_ISINF (f0)
2759 && !REAL_VALUE_ISINF (f1))
2760 /* Overflow plus exception. */
2763 /* Don't constant fold this floating point operation if the
2764 result may dependent upon the run-time rounding mode and
2765 flag_rounding_math is set, or if GCC's software emulation
2766 is unable to accurately represent the result. */
2768 if ((flag_rounding_math
2769 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2770 && !flag_unsafe_math_optimizations))
2771 && (inexact || !real_identical (&result, &value)))
2774 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2778 /* We can fold some multi-word operations. */
2779 if (GET_MODE_CLASS (mode) == MODE_INT
2780 && width == HOST_BITS_PER_WIDE_INT * 2
2781 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2782 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2784 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2785 HOST_WIDE_INT h1, h2, hv, ht;
2787 if (GET_CODE (op0) == CONST_DOUBLE)
2788 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2790 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2792 if (GET_CODE (op1) == CONST_DOUBLE)
2793 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2795 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2800 /* A - B == A + (-B). */
2801 neg_double (l2, h2, &lv, &hv);
2804 /* Fall through.... */
2807 add_double (l1, h1, l2, h2, &lv, &hv);
2811 mul_double (l1, h1, l2, h2, &lv, &hv);
2815 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2816 &lv, &hv, <, &ht))
2821 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2822 <, &ht, &lv, &hv))
2827 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2828 &lv, &hv, <, &ht))
2833 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2834 <, &ht, &lv, &hv))
2839 lv = l1 & l2, hv = h1 & h2;
2843 lv = l1 | l2, hv = h1 | h2;
2847 lv = l1 ^ l2, hv = h1 ^ h2;
2853 && ((unsigned HOST_WIDE_INT) l1
2854 < (unsigned HOST_WIDE_INT) l2)))
2863 && ((unsigned HOST_WIDE_INT) l1
2864 > (unsigned HOST_WIDE_INT) l2)))
2871 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2873 && ((unsigned HOST_WIDE_INT) l1
2874 < (unsigned HOST_WIDE_INT) l2)))
2881 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2883 && ((unsigned HOST_WIDE_INT) l1
2884 > (unsigned HOST_WIDE_INT) l2)))
2890 case LSHIFTRT: case ASHIFTRT:
2892 case ROTATE: case ROTATERT:
2893 if (SHIFT_COUNT_TRUNCATED)
2894 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2896 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2899 if (code == LSHIFTRT || code == ASHIFTRT)
2900 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2902 else if (code == ASHIFT)
2903 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2904 else if (code == ROTATE)
2905 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2906 else /* code == ROTATERT */
2907 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2914 return immed_double_const (lv, hv, mode);
2917 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2918 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2920 /* Get the integer argument values in two forms:
2921 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2923 arg0 = INTVAL (op0);
2924 arg1 = INTVAL (op1);
2926 if (width < HOST_BITS_PER_WIDE_INT)
2928 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2929 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2932 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2933 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2936 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2937 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2945 /* Compute the value of the arithmetic. */
2950 val = arg0s + arg1s;
2954 val = arg0s - arg1s;
2958 val = arg0s * arg1s;
2963 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2966 val = arg0s / arg1s;
2971 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2974 val = arg0s % arg1s;
2979 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2982 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2987 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2990 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3008 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3009 the value is in range. We can't return any old value for
3010 out-of-range arguments because either the middle-end (via
3011 shift_truncation_mask) or the back-end might be relying on
3012 target-specific knowledge. Nor can we rely on
3013 shift_truncation_mask, since the shift might not be part of an
3014 ashlM3, lshrM3 or ashrM3 instruction. */
3015 if (SHIFT_COUNT_TRUNCATED)
3016 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3017 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3020 val = (code == ASHIFT
3021 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3022 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3024 /* Sign-extend the result for arithmetic right shifts. */
3025 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3026 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3034 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3035 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3043 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3044 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3048 /* Do nothing here. */
3052 val = arg0s <= arg1s ? arg0s : arg1s;
3056 val = ((unsigned HOST_WIDE_INT) arg0
3057 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3061 val = arg0s > arg1s ? arg0s : arg1s;
3065 val = ((unsigned HOST_WIDE_INT) arg0
3066 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3073 /* ??? There are simplifications that can be done. */
3080 return gen_int_mode (val, mode);
3088 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3091 Rather than test for specific case, we do this by a brute-force method
3092 and do all possible simplifications until no more changes occur. Then
3093 we rebuild the operation. */
3095 struct simplify_plus_minus_op_data
3103 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3105 const struct simplify_plus_minus_op_data *d1 = p1;
3106 const struct simplify_plus_minus_op_data *d2 = p2;
3109 result = (commutative_operand_precedence (d2->op)
3110 - commutative_operand_precedence (d1->op));
3113 return d1->ix - d2->ix;
3117 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3120 struct simplify_plus_minus_op_data ops[8];
3122 int n_ops = 2, input_ops = 2;
3123 int first, changed, canonicalized = 0;
3126 memset (ops, 0, sizeof ops);
3128 /* Set up the two operands and then expand them until nothing has been
3129 changed. If we run out of room in our array, give up; this should
3130 almost never happen. */
3135 ops[1].neg = (code == MINUS);
3141 for (i = 0; i < n_ops; i++)
3143 rtx this_op = ops[i].op;
3144 int this_neg = ops[i].neg;
3145 enum rtx_code this_code = GET_CODE (this_op);
3154 ops[n_ops].op = XEXP (this_op, 1);
3155 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3158 ops[i].op = XEXP (this_op, 0);
3161 canonicalized |= this_neg;
3165 ops[i].op = XEXP (this_op, 0);
3166 ops[i].neg = ! this_neg;
3173 && GET_CODE (XEXP (this_op, 0)) == PLUS
3174 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3175 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3177 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3178 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3179 ops[n_ops].neg = this_neg;
3187 /* ~a -> (-a - 1) */
3190 ops[n_ops].op = constm1_rtx;
3191 ops[n_ops++].neg = this_neg;
3192 ops[i].op = XEXP (this_op, 0);
3193 ops[i].neg = !this_neg;
3202 ops[i].op = neg_const_int (mode, this_op);
3216 gcc_assert (n_ops >= 2);
3219 int n_constants = 0;
3221 for (i = 0; i < n_ops; i++)
3222 if (GET_CODE (ops[i].op) == CONST_INT)
3225 if (n_constants <= 1)
3229 /* If we only have two operands, we can avoid the loops. */
3232 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3235 /* Get the two operands. Be careful with the order, especially for
3236 the cases where code == MINUS. */
3237 if (ops[0].neg && ops[1].neg)
3239 lhs = gen_rtx_NEG (mode, ops[0].op);
3242 else if (ops[0].neg)
3253 return simplify_const_binary_operation (code, mode, lhs, rhs);
3256 /* Now simplify each pair of operands until nothing changes. The first
3257 time through just simplify constants against each other. */
3264 for (i = 0; i < n_ops - 1; i++)
3265 for (j = i + 1; j < n_ops; j++)
3267 rtx lhs = ops[i].op, rhs = ops[j].op;
3268 int lneg = ops[i].neg, rneg = ops[j].neg;
3270 if (lhs != 0 && rhs != 0
3271 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3273 enum rtx_code ncode = PLUS;
3279 tem = lhs, lhs = rhs, rhs = tem;
<