1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* Put complex operands first and constants second if commutative. */
118 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
119 && swap_commutative_operands_p (op0, op1))
120 tem = op0, op0 = op1, op1 = tem;
122 /* If this simplifies, do it. */
123 tem = simplify_binary_operation (code, mode, op0, op1);
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code))
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
284 return simplify_gen_unary (code, mode, op0, op_mode);
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return simplify_gen_binary (code, mode, op0, op1);
295 case RTX_COMM_COMPARE:
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
306 case RTX_BITFIELD_OPS:
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
319 /* The only case we try to handle is a SUBREG. */
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
328 return op0 ? op0 : x;
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
375 if (GET_CODE (op) == CONST)
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
497 in2 = in1; in1 = tem;
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
559 if (GET_CODE (op) == ASHIFT)
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
590 /* We can't handle truncation to a partial integer mode here
591 because we don't know the real bitsize of the partial
593 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
596 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
597 if ((GET_CODE (op) == SIGN_EXTEND
598 || GET_CODE (op) == ZERO_EXTEND)
599 && GET_MODE (XEXP (op, 0)) == mode)
602 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
603 (OP:SI foo:SI) if OP is NEG or ABS. */
604 if ((GET_CODE (op) == ABS
605 || GET_CODE (op) == NEG)
606 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
607 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
608 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
609 return simplify_gen_unary (GET_CODE (op), mode,
610 XEXP (XEXP (op, 0), 0), mode);
612 /* (truncate:SI (subreg:DI (truncate:SI X) 0)) is
614 if (GET_CODE (op) == SUBREG
615 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
616 && subreg_lowpart_p (op))
617 return SUBREG_REG (op);
619 /* If we know that the value is already truncated, we can
620 replace the TRUNCATE with a SUBREG if TRULY_NOOP_TRUNCATION
621 is nonzero for the corresponding modes. But don't do this
622 for an (LSHIFTRT (MULT ...)) since this will cause problems
623 with the umulXi3_highpart patterns. */
624 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
625 GET_MODE_BITSIZE (GET_MODE (op)))
626 && num_sign_bit_copies (op, GET_MODE (op))
627 >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1)
628 && ! (GET_CODE (op) == LSHIFTRT
629 && GET_CODE (XEXP (op, 0)) == MULT))
630 return rtl_hooks.gen_lowpart_no_emit (mode, op);
632 /* A truncate of a comparison can be replaced with a subreg if
633 STORE_FLAG_VALUE permits. This is like the previous test,
634 but it works even if the comparison is done in a mode larger
635 than HOST_BITS_PER_WIDE_INT. */
636 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
638 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
639 return rtl_hooks.gen_lowpart_no_emit (mode, op);
643 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
644 if (GET_CODE (op) == FLOAT_EXTEND
645 && GET_MODE (XEXP (op, 0)) == mode)
648 /* (float_truncate:SF (float_truncate:DF foo:XF))
649 = (float_truncate:SF foo:XF).
650 This may eliminate double rounding, so it is unsafe.
652 (float_truncate:SF (float_extend:XF foo:DF))
653 = (float_truncate:SF foo:DF).
655 (float_truncate:DF (float_extend:XF foo:SF))
656 = (float_extend:SF foo:DF). */
657 if ((GET_CODE (op) == FLOAT_TRUNCATE
658 && flag_unsafe_math_optimizations)
659 || GET_CODE (op) == FLOAT_EXTEND)
660 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
662 > GET_MODE_SIZE (mode)
663 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
667 /* (float_truncate (float x)) is (float x) */
668 if (GET_CODE (op) == FLOAT
669 && (flag_unsafe_math_optimizations
670 || ((unsigned)significand_size (GET_MODE (op))
671 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
672 - num_sign_bit_copies (XEXP (op, 0),
673 GET_MODE (XEXP (op, 0)))))))
674 return simplify_gen_unary (FLOAT, mode,
676 GET_MODE (XEXP (op, 0)));
678 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
679 (OP:SF foo:SF) if OP is NEG or ABS. */
680 if ((GET_CODE (op) == ABS
681 || GET_CODE (op) == NEG)
682 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
683 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
684 return simplify_gen_unary (GET_CODE (op), mode,
685 XEXP (XEXP (op, 0), 0), mode);
687 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
688 is (float_truncate:SF x). */
689 if (GET_CODE (op) == SUBREG
690 && subreg_lowpart_p (op)
691 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
692 return SUBREG_REG (op);
696 /* (float_extend (float_extend x)) is (float_extend x)
698 (float_extend (float x)) is (float x) assuming that double
699 rounding can't happen.
701 if (GET_CODE (op) == FLOAT_EXTEND
702 || (GET_CODE (op) == FLOAT
703 && ((unsigned)significand_size (GET_MODE (op))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
705 - num_sign_bit_copies (XEXP (op, 0),
706 GET_MODE (XEXP (op, 0)))))))
707 return simplify_gen_unary (GET_CODE (op), mode,
709 GET_MODE (XEXP (op, 0)));
714 /* (abs (neg <foo>)) -> (abs <foo>) */
715 if (GET_CODE (op) == NEG)
716 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
717 GET_MODE (XEXP (op, 0)));
719 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
721 if (GET_MODE (op) == VOIDmode)
724 /* If operand is something known to be positive, ignore the ABS. */
725 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
726 || ((GET_MODE_BITSIZE (GET_MODE (op))
727 <= HOST_BITS_PER_WIDE_INT)
728 && ((nonzero_bits (op, GET_MODE (op))
730 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
734 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
735 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
736 return gen_rtx_NEG (mode, op);
741 /* (ffs (*_extend <X>)) = (ffs <X>) */
742 if (GET_CODE (op) == SIGN_EXTEND
743 || GET_CODE (op) == ZERO_EXTEND)
744 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
745 GET_MODE (XEXP (op, 0)));
750 /* (pop* (zero_extend <X>)) = (pop* <X>) */
751 if (GET_CODE (op) == ZERO_EXTEND)
752 return simplify_gen_unary (code, mode, XEXP (op, 0),
753 GET_MODE (XEXP (op, 0)));
757 /* (float (sign_extend <X>)) = (float <X>). */
758 if (GET_CODE (op) == SIGN_EXTEND)
759 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
760 GET_MODE (XEXP (op, 0)));
764 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
765 becomes just the MINUS if its mode is MODE. This allows
766 folding switch statements on machines using casesi (such as
768 if (GET_CODE (op) == TRUNCATE
769 && GET_MODE (XEXP (op, 0)) == mode
770 && GET_CODE (XEXP (op, 0)) == MINUS
771 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
772 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
775 /* Check for a sign extension of a subreg of a promoted
776 variable, where the promotion is sign-extended, and the
777 target mode is the same as the variable's promotion. */
778 if (GET_CODE (op) == SUBREG
779 && SUBREG_PROMOTED_VAR_P (op)
780 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
781 && GET_MODE (XEXP (op, 0)) == mode)
784 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
785 if (! POINTERS_EXTEND_UNSIGNED
786 && mode == Pmode && GET_MODE (op) == ptr_mode
788 || (GET_CODE (op) == SUBREG
789 && REG_P (SUBREG_REG (op))
790 && REG_POINTER (SUBREG_REG (op))
791 && GET_MODE (SUBREG_REG (op)) == Pmode)))
792 return convert_memory_address (Pmode, op);
797 /* Check for a zero extension of a subreg of a promoted
798 variable, where the promotion is zero-extended, and the
799 target mode is the same as the variable's promotion. */
800 if (GET_CODE (op) == SUBREG
801 && SUBREG_PROMOTED_VAR_P (op)
802 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
803 && GET_MODE (XEXP (op, 0)) == mode)
806 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
807 if (POINTERS_EXTEND_UNSIGNED > 0
808 && mode == Pmode && GET_MODE (op) == ptr_mode
810 || (GET_CODE (op) == SUBREG
811 && REG_P (SUBREG_REG (op))
812 && REG_POINTER (SUBREG_REG (op))
813 && GET_MODE (SUBREG_REG (op)) == Pmode)))
814 return convert_memory_address (Pmode, op);
825 /* Try to compute the value of a unary operation CODE whose output mode is to
826 be MODE with input operand OP whose mode was originally OP_MODE.
827 Return zero if the value cannot be computed. */
829 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
830 rtx op, enum machine_mode op_mode)
832 unsigned int width = GET_MODE_BITSIZE (mode);
834 if (code == VEC_DUPLICATE)
836 gcc_assert (VECTOR_MODE_P (mode));
837 if (GET_MODE (op) != VOIDmode)
839 if (!VECTOR_MODE_P (GET_MODE (op)))
840 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
842 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
845 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
846 || GET_CODE (op) == CONST_VECTOR)
848 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
849 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
850 rtvec v = rtvec_alloc (n_elts);
853 if (GET_CODE (op) != CONST_VECTOR)
854 for (i = 0; i < n_elts; i++)
855 RTVEC_ELT (v, i) = op;
858 enum machine_mode inmode = GET_MODE (op);
859 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
860 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
862 gcc_assert (in_n_elts < n_elts);
863 gcc_assert ((n_elts % in_n_elts) == 0);
864 for (i = 0; i < n_elts; i++)
865 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
867 return gen_rtx_CONST_VECTOR (mode, v);
871 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
873 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
874 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
875 enum machine_mode opmode = GET_MODE (op);
876 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
877 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
878 rtvec v = rtvec_alloc (n_elts);
881 gcc_assert (op_n_elts == n_elts);
882 for (i = 0; i < n_elts; i++)
884 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
885 CONST_VECTOR_ELT (op, i),
886 GET_MODE_INNER (opmode));
889 RTVEC_ELT (v, i) = x;
891 return gen_rtx_CONST_VECTOR (mode, v);
894 /* The order of these tests is critical so that, for example, we don't
895 check the wrong mode (input vs. output) for a conversion operation,
896 such as FIX. At some point, this should be simplified. */
898 if (code == FLOAT && GET_MODE (op) == VOIDmode
899 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
901 HOST_WIDE_INT hv, lv;
904 if (GET_CODE (op) == CONST_INT)
905 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
907 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
909 REAL_VALUE_FROM_INT (d, lv, hv, mode);
910 d = real_value_truncate (mode, d);
911 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
913 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
914 && (GET_CODE (op) == CONST_DOUBLE
915 || GET_CODE (op) == CONST_INT))
917 HOST_WIDE_INT hv, lv;
920 if (GET_CODE (op) == CONST_INT)
921 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
923 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
925 if (op_mode == VOIDmode)
927 /* We don't know how to interpret negative-looking numbers in
928 this case, so don't try to fold those. */
932 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
935 hv = 0, lv &= GET_MODE_MASK (op_mode);
937 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
938 d = real_value_truncate (mode, d);
939 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
942 if (GET_CODE (op) == CONST_INT
943 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
945 HOST_WIDE_INT arg0 = INTVAL (op);
959 val = (arg0 >= 0 ? arg0 : - arg0);
963 /* Don't use ffs here. Instead, get low order bit and then its
964 number. If arg0 is zero, this will return 0, as desired. */
965 arg0 &= GET_MODE_MASK (mode);
966 val = exact_log2 (arg0 & (- arg0)) + 1;
970 arg0 &= GET_MODE_MASK (mode);
971 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
974 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
978 arg0 &= GET_MODE_MASK (mode);
981 /* Even if the value at zero is undefined, we have to come
982 up with some replacement. Seems good enough. */
983 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
984 val = GET_MODE_BITSIZE (mode);
987 val = exact_log2 (arg0 & -arg0);
991 arg0 &= GET_MODE_MASK (mode);
994 val++, arg0 &= arg0 - 1;
998 arg0 &= GET_MODE_MASK (mode);
1001 val++, arg0 &= arg0 - 1;
1010 /* When zero-extending a CONST_INT, we need to know its
1012 gcc_assert (op_mode != VOIDmode);
1013 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1015 /* If we were really extending the mode,
1016 we would have to distinguish between zero-extension
1017 and sign-extension. */
1018 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1021 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1022 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1028 if (op_mode == VOIDmode)
1030 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1032 /* If we were really extending the mode,
1033 we would have to distinguish between zero-extension
1034 and sign-extension. */
1035 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1038 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1041 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1043 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1044 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1052 case FLOAT_TRUNCATE:
1061 return gen_int_mode (val, mode);
1064 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1065 for a DImode operation on a CONST_INT. */
1066 else if (GET_MODE (op) == VOIDmode
1067 && width <= HOST_BITS_PER_WIDE_INT * 2
1068 && (GET_CODE (op) == CONST_DOUBLE
1069 || GET_CODE (op) == CONST_INT))
1071 unsigned HOST_WIDE_INT l1, lv;
1072 HOST_WIDE_INT h1, hv;
1074 if (GET_CODE (op) == CONST_DOUBLE)
1075 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1077 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1087 neg_double (l1, h1, &lv, &hv);
1092 neg_double (l1, h1, &lv, &hv);
1104 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1107 lv = exact_log2 (l1 & -l1) + 1;
1113 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1114 - HOST_BITS_PER_WIDE_INT;
1116 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1117 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1118 lv = GET_MODE_BITSIZE (mode);
1124 lv = exact_log2 (l1 & -l1);
1126 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1127 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1128 lv = GET_MODE_BITSIZE (mode);
1151 /* This is just a change-of-mode, so do nothing. */
1156 gcc_assert (op_mode != VOIDmode);
1158 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1162 lv = l1 & GET_MODE_MASK (op_mode);
1166 if (op_mode == VOIDmode
1167 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1171 lv = l1 & GET_MODE_MASK (op_mode);
1172 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1173 && (lv & ((HOST_WIDE_INT) 1
1174 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1175 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1177 hv = HWI_SIGN_EXTEND (lv);
1188 return immed_double_const (lv, hv, mode);
1191 else if (GET_CODE (op) == CONST_DOUBLE
1192 && SCALAR_FLOAT_MODE_P (mode))
1194 REAL_VALUE_TYPE d, t;
1195 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1200 if (HONOR_SNANS (mode) && real_isnan (&d))
1202 real_sqrt (&t, mode, &d);
1206 d = REAL_VALUE_ABS (d);
1209 d = REAL_VALUE_NEGATE (d);
1211 case FLOAT_TRUNCATE:
1212 d = real_value_truncate (mode, d);
1215 /* All this does is change the mode. */
1218 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1225 real_to_target (tmp, &d, GET_MODE (op));
1226 for (i = 0; i < 4; i++)
1228 real_from_target (&d, tmp, mode);
1234 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1237 else if (GET_CODE (op) == CONST_DOUBLE
1238 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1239 && GET_MODE_CLASS (mode) == MODE_INT
1240 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1242 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1243 operators are intentionally left unspecified (to ease implementation
1244 by target backends), for consistency, this routine implements the
1245 same semantics for constant folding as used by the middle-end. */
1247 /* This was formerly used only for non-IEEE float.
1248 eggert@twinsun.com says it is safe for IEEE also. */
1249 HOST_WIDE_INT xh, xl, th, tl;
1250 REAL_VALUE_TYPE x, t;
1251 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1255 if (REAL_VALUE_ISNAN (x))
1258 /* Test against the signed upper bound. */
1259 if (width > HOST_BITS_PER_WIDE_INT)
1261 th = ((unsigned HOST_WIDE_INT) 1
1262 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1268 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1270 real_from_integer (&t, VOIDmode, tl, th, 0);
1271 if (REAL_VALUES_LESS (t, x))
1278 /* Test against the signed lower bound. */
1279 if (width > HOST_BITS_PER_WIDE_INT)
1281 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1287 tl = (HOST_WIDE_INT) -1 << (width - 1);
1289 real_from_integer (&t, VOIDmode, tl, th, 0);
1290 if (REAL_VALUES_LESS (x, t))
1296 REAL_VALUE_TO_INT (&xl, &xh, x);
1300 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1303 /* Test against the unsigned upper bound. */
1304 if (width == 2*HOST_BITS_PER_WIDE_INT)
1309 else if (width >= HOST_BITS_PER_WIDE_INT)
1311 th = ((unsigned HOST_WIDE_INT) 1
1312 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1318 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1320 real_from_integer (&t, VOIDmode, tl, th, 1);
1321 if (REAL_VALUES_LESS (t, x))
1328 REAL_VALUE_TO_INT (&xl, &xh, x);
1334 return immed_double_const (xl, xh, mode);
1340 /* Subroutine of simplify_binary_operation to simplify a commutative,
1341 associative binary operation CODE with result mode MODE, operating
1342 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1343 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1344 canonicalization is possible. */
1347 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1352 /* Linearize the operator to the left. */
1353 if (GET_CODE (op1) == code)
1355 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1356 if (GET_CODE (op0) == code)
1358 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1359 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1362 /* "a op (b op c)" becomes "(b op c) op a". */
1363 if (! swap_commutative_operands_p (op1, op0))
1364 return simplify_gen_binary (code, mode, op1, op0);
1371 if (GET_CODE (op0) == code)
1373 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1374 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1376 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1377 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1380 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1381 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1382 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1383 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1385 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1387 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1388 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1389 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1390 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1392 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1399 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1400 and OP1. Return 0 if no simplification is possible.
1402 Don't use this for relational operations such as EQ or LT.
1403 Use simplify_relational_operation instead. */
1405 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1408 rtx trueop0, trueop1;
1411 /* Relational operations don't work here. We must know the mode
1412 of the operands in order to do the comparison correctly.
1413 Assuming a full word can give incorrect results.
1414 Consider comparing 128 with -128 in QImode. */
1415 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1416 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1418 /* Make sure the constant is second. */
1419 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1420 && swap_commutative_operands_p (op0, op1))
1422 tem = op0, op0 = op1, op1 = tem;
1425 trueop0 = avoid_constant_pool_reference (op0);
1426 trueop1 = avoid_constant_pool_reference (op1);
1428 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1431 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1435 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1436 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1438 rtx tem, reversed, opleft, opright;
1440 unsigned int width = GET_MODE_BITSIZE (mode);
1442 /* Even if we can't compute a constant result,
1443 there are some cases worth simplifying. */
1448 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1449 when x is NaN, infinite, or finite and nonzero. They aren't
1450 when x is -0 and the rounding mode is not towards -infinity,
1451 since (-0) + 0 is then 0. */
1452 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1455 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1456 transformations are safe even for IEEE. */
1457 if (GET_CODE (op0) == NEG)
1458 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1459 else if (GET_CODE (op1) == NEG)
1460 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1462 /* (~a) + 1 -> -a */
1463 if (INTEGRAL_MODE_P (mode)
1464 && GET_CODE (op0) == NOT
1465 && trueop1 == const1_rtx)
1466 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1468 /* Handle both-operands-constant cases. We can only add
1469 CONST_INTs to constants since the sum of relocatable symbols
1470 can't be handled by most assemblers. Don't add CONST_INT
1471 to CONST_INT since overflow won't be computed properly if wider
1472 than HOST_BITS_PER_WIDE_INT. */
1474 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1475 && GET_CODE (op1) == CONST_INT)
1476 return plus_constant (op0, INTVAL (op1));
1477 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1478 && GET_CODE (op0) == CONST_INT)
1479 return plus_constant (op1, INTVAL (op0));
1481 /* See if this is something like X * C - X or vice versa or
1482 if the multiplication is written as a shift. If so, we can
1483 distribute and make a new multiply, shift, or maybe just
1484 have X (if C is 2 in the example above). But don't make
1485 something more expensive than we had before. */
1487 if (SCALAR_INT_MODE_P (mode))
1489 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1490 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1491 rtx lhs = op0, rhs = op1;
1493 if (GET_CODE (lhs) == NEG)
1497 lhs = XEXP (lhs, 0);
1499 else if (GET_CODE (lhs) == MULT
1500 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1502 coeff0l = INTVAL (XEXP (lhs, 1));
1503 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1504 lhs = XEXP (lhs, 0);
1506 else if (GET_CODE (lhs) == ASHIFT
1507 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1508 && INTVAL (XEXP (lhs, 1)) >= 0
1509 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1511 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1513 lhs = XEXP (lhs, 0);
1516 if (GET_CODE (rhs) == NEG)
1520 rhs = XEXP (rhs, 0);
1522 else if (GET_CODE (rhs) == MULT
1523 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1525 coeff1l = INTVAL (XEXP (rhs, 1));
1526 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1527 rhs = XEXP (rhs, 0);
1529 else if (GET_CODE (rhs) == ASHIFT
1530 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1531 && INTVAL (XEXP (rhs, 1)) >= 0
1532 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1534 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1536 rhs = XEXP (rhs, 0);
1539 if (rtx_equal_p (lhs, rhs))
1541 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1543 unsigned HOST_WIDE_INT l;
1546 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1547 coeff = immed_double_const (l, h, mode);
1549 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1550 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1555 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1556 if ((GET_CODE (op1) == CONST_INT
1557 || GET_CODE (op1) == CONST_DOUBLE)
1558 && GET_CODE (op0) == XOR
1559 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1560 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1561 && mode_signbit_p (mode, op1))
1562 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1563 simplify_gen_binary (XOR, mode, op1,
1566 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1567 if (GET_CODE (op0) == MULT
1568 && GET_CODE (XEXP (op0, 0)) == NEG)
1572 in1 = XEXP (XEXP (op0, 0), 0);
1573 in2 = XEXP (op0, 1);
1574 return simplify_gen_binary (MINUS, mode, op1,
1575 simplify_gen_binary (MULT, mode,
1579 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1580 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1582 if (COMPARISON_P (op0)
1583 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1584 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1585 && (reversed = reversed_comparison (op0, mode)))
1587 simplify_gen_unary (NEG, mode, reversed, mode);
1589 /* If one of the operands is a PLUS or a MINUS, see if we can
1590 simplify this by the associative law.
1591 Don't use the associative law for floating point.
1592 The inaccuracy makes it nonassociative,
1593 and subtle programs can break if operations are associated. */
1595 if (INTEGRAL_MODE_P (mode)
1596 && (plus_minus_operand_p (op0)
1597 || plus_minus_operand_p (op1))
1598 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1601 /* Reassociate floating point addition only when the user
1602 specifies unsafe math optimizations. */
1603 if (FLOAT_MODE_P (mode)
1604 && flag_unsafe_math_optimizations)
1606 tem = simplify_associative_operation (code, mode, op0, op1);
1614 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1615 using cc0, in which case we want to leave it as a COMPARE
1616 so we can distinguish it from a register-register-copy.
1618 In IEEE floating point, x-0 is not the same as x. */
1620 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1621 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1622 && trueop1 == CONST0_RTX (mode))
1626 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1627 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1628 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1629 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1631 rtx xop00 = XEXP (op0, 0);
1632 rtx xop10 = XEXP (op1, 0);
1635 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1637 if (REG_P (xop00) && REG_P (xop10)
1638 && GET_MODE (xop00) == GET_MODE (xop10)
1639 && REGNO (xop00) == REGNO (xop10)
1640 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1641 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1648 /* We can't assume x-x is 0 even with non-IEEE floating point,
1649 but since it is zero except in very strange circumstances, we
1650 will treat it as zero with -funsafe-math-optimizations. */
1651 if (rtx_equal_p (trueop0, trueop1)
1652 && ! side_effects_p (op0)
1653 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1654 return CONST0_RTX (mode);
1656 /* Change subtraction from zero into negation. (0 - x) is the
1657 same as -x when x is NaN, infinite, or finite and nonzero.
1658 But if the mode has signed zeros, and does not round towards
1659 -infinity, then 0 - 0 is 0, not -0. */
1660 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1661 return simplify_gen_unary (NEG, mode, op1, mode);
1663 /* (-1 - a) is ~a. */
1664 if (trueop0 == constm1_rtx)
1665 return simplify_gen_unary (NOT, mode, op1, mode);
1667 /* Subtracting 0 has no effect unless the mode has signed zeros
1668 and supports rounding towards -infinity. In such a case,
1670 if (!(HONOR_SIGNED_ZEROS (mode)
1671 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1672 && trueop1 == CONST0_RTX (mode))
1675 /* See if this is something like X * C - X or vice versa or
1676 if the multiplication is written as a shift. If so, we can
1677 distribute and make a new multiply, shift, or maybe just
1678 have X (if C is 2 in the example above). But don't make
1679 something more expensive than we had before. */
1681 if (SCALAR_INT_MODE_P (mode))
1683 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1684 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1685 rtx lhs = op0, rhs = op1;
1687 if (GET_CODE (lhs) == NEG)
1691 lhs = XEXP (lhs, 0);
1693 else if (GET_CODE (lhs) == MULT
1694 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1696 coeff0l = INTVAL (XEXP (lhs, 1));
1697 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1698 lhs = XEXP (lhs, 0);
1700 else if (GET_CODE (lhs) == ASHIFT
1701 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1702 && INTVAL (XEXP (lhs, 1)) >= 0
1703 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1705 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1707 lhs = XEXP (lhs, 0);
1710 if (GET_CODE (rhs) == NEG)
1714 rhs = XEXP (rhs, 0);
1716 else if (GET_CODE (rhs) == MULT
1717 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1719 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1720 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1721 rhs = XEXP (rhs, 0);
1723 else if (GET_CODE (rhs) == ASHIFT
1724 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1725 && INTVAL (XEXP (rhs, 1)) >= 0
1726 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1728 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1730 rhs = XEXP (rhs, 0);
1733 if (rtx_equal_p (lhs, rhs))
1735 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1737 unsigned HOST_WIDE_INT l;
1740 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1741 coeff = immed_double_const (l, h, mode);
1743 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1744 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1749 /* (a - (-b)) -> (a + b). True even for IEEE. */
1750 if (GET_CODE (op1) == NEG)
1751 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1753 /* (-x - c) may be simplified as (-c - x). */
1754 if (GET_CODE (op0) == NEG
1755 && (GET_CODE (op1) == CONST_INT
1756 || GET_CODE (op1) == CONST_DOUBLE))
1758 tem = simplify_unary_operation (NEG, mode, op1, mode);
1760 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1763 /* Don't let a relocatable value get a negative coeff. */
1764 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1765 return simplify_gen_binary (PLUS, mode,
1767 neg_const_int (mode, op1));
1769 /* (x - (x & y)) -> (x & ~y) */
1770 if (GET_CODE (op1) == AND)
1772 if (rtx_equal_p (op0, XEXP (op1, 0)))
1774 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1775 GET_MODE (XEXP (op1, 1)));
1776 return simplify_gen_binary (AND, mode, op0, tem);
1778 if (rtx_equal_p (op0, XEXP (op1, 1)))
1780 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1781 GET_MODE (XEXP (op1, 0)));
1782 return simplify_gen_binary (AND, mode, op0, tem);
1786 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1787 by reversing the comparison code if valid. */
1788 if (STORE_FLAG_VALUE == 1
1789 && trueop0 == const1_rtx
1790 && COMPARISON_P (op1)
1791 && (reversed = reversed_comparison (op1, mode)))
1794 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1795 if (GET_CODE (op1) == MULT
1796 && GET_CODE (XEXP (op1, 0)) == NEG)
1800 in1 = XEXP (XEXP (op1, 0), 0);
1801 in2 = XEXP (op1, 1);
1802 return simplify_gen_binary (PLUS, mode,
1803 simplify_gen_binary (MULT, mode,
1808 /* Canonicalize (minus (neg A) (mult B C)) to
1809 (minus (mult (neg B) C) A). */
1810 if (GET_CODE (op1) == MULT
1811 && GET_CODE (op0) == NEG)
1815 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1816 in2 = XEXP (op1, 1);
1817 return simplify_gen_binary (MINUS, mode,
1818 simplify_gen_binary (MULT, mode,
1823 /* If one of the operands is a PLUS or a MINUS, see if we can
1824 simplify this by the associative law. This will, for example,
1825 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1826 Don't use the associative law for floating point.
1827 The inaccuracy makes it nonassociative,
1828 and subtle programs can break if operations are associated. */
1830 if (INTEGRAL_MODE_P (mode)
1831 && (plus_minus_operand_p (op0)
1832 || plus_minus_operand_p (op1))
1833 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1838 if (trueop1 == constm1_rtx)
1839 return simplify_gen_unary (NEG, mode, op0, mode);
1841 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1842 x is NaN, since x * 0 is then also NaN. Nor is it valid
1843 when the mode has signed zeros, since multiplying a negative
1844 number by 0 will give -0, not 0. */
1845 if (!HONOR_NANS (mode)
1846 && !HONOR_SIGNED_ZEROS (mode)
1847 && trueop1 == CONST0_RTX (mode)
1848 && ! side_effects_p (op0))
1851 /* In IEEE floating point, x*1 is not equivalent to x for
1853 if (!HONOR_SNANS (mode)
1854 && trueop1 == CONST1_RTX (mode))
1857 /* Convert multiply by constant power of two into shift unless
1858 we are still generating RTL. This test is a kludge. */
1859 if (GET_CODE (trueop1) == CONST_INT
1860 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1861 /* If the mode is larger than the host word size, and the
1862 uppermost bit is set, then this isn't a power of two due
1863 to implicit sign extension. */
1864 && (width <= HOST_BITS_PER_WIDE_INT
1865 || val != HOST_BITS_PER_WIDE_INT - 1))
1866 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1868 /* Likewise for multipliers wider than a word. */
1869 else if (GET_CODE (trueop1) == CONST_DOUBLE
1870 && (GET_MODE (trueop1) == VOIDmode
1871 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1872 && GET_MODE (op0) == mode
1873 && CONST_DOUBLE_LOW (trueop1) == 0
1874 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1875 return simplify_gen_binary (ASHIFT, mode, op0,
1876 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1878 /* x*2 is x+x and x*(-1) is -x */
1879 if (GET_CODE (trueop1) == CONST_DOUBLE
1880 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1881 && GET_MODE (op0) == mode)
1884 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1886 if (REAL_VALUES_EQUAL (d, dconst2))
1887 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1889 if (REAL_VALUES_EQUAL (d, dconstm1))
1890 return simplify_gen_unary (NEG, mode, op0, mode);
1893 /* Reassociate multiplication, but for floating point MULTs
1894 only when the user specifies unsafe math optimizations. */
1895 if (! FLOAT_MODE_P (mode)
1896 || flag_unsafe_math_optimizations)
1898 tem = simplify_associative_operation (code, mode, op0, op1);
1905 if (trueop1 == const0_rtx)
1907 if (GET_CODE (trueop1) == CONST_INT
1908 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1909 == GET_MODE_MASK (mode)))
1911 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1913 /* A | (~A) -> -1 */
1914 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1915 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1916 && ! side_effects_p (op0)
1917 && SCALAR_INT_MODE_P (mode))
1920 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1921 if (GET_CODE (op1) == CONST_INT
1922 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1923 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1926 /* Convert (A & B) | A to A. */
1927 if (GET_CODE (op0) == AND
1928 && (rtx_equal_p (XEXP (op0, 0), op1)
1929 || rtx_equal_p (XEXP (op0, 1), op1))
1930 && ! side_effects_p (XEXP (op0, 0))
1931 && ! side_effects_p (XEXP (op0, 1)))
1934 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1935 mode size to (rotate A CX). */
1937 if (GET_CODE (op1) == ASHIFT
1938 || GET_CODE (op1) == SUBREG)
1949 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
1950 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
1951 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
1952 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1953 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
1954 == GET_MODE_BITSIZE (mode)))
1955 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
1957 /* Same, but for ashift that has been "simplified" to a wider mode
1958 by simplify_shift_const. */
1960 if (GET_CODE (opleft) == SUBREG
1961 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
1962 && GET_CODE (opright) == LSHIFTRT
1963 && GET_CODE (XEXP (opright, 0)) == SUBREG
1964 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
1965 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
1966 && (GET_MODE_SIZE (GET_MODE (opleft))
1967 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
1968 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
1969 SUBREG_REG (XEXP (opright, 0)))
1970 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
1971 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1972 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
1973 == GET_MODE_BITSIZE (mode)))
1974 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
1975 XEXP (SUBREG_REG (opright), 1));
1977 /* If we have (ior (and (X C1) C2)), simplify this by making
1978 C1 as small as possible if C1 actually changes. */
1979 if (GET_CODE (op1) == CONST_INT
1980 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1981 || INTVAL (op1) > 0)
1982 && GET_CODE (op0) == AND
1983 && GET_CODE (XEXP (op0, 1)) == CONST_INT
1984 && GET_CODE (op1) == CONST_INT
1985 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
1986 return simplify_gen_binary (IOR, mode,
1988 (AND, mode, XEXP (op0, 0),
1989 GEN_INT (INTVAL (XEXP (op0, 1))
1993 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
1994 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
1995 the PLUS does not affect any of the bits in OP1: then we can do
1996 the IOR as a PLUS and we can associate. This is valid if OP1
1997 can be safely shifted left C bits. */
1998 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
1999 && GET_CODE (XEXP (op0, 0)) == PLUS
2000 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2001 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2002 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2004 int count = INTVAL (XEXP (op0, 1));
2005 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2007 if (mask >> count == INTVAL (trueop1)
2008 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2009 return simplify_gen_binary (ASHIFTRT, mode,
2010 plus_constant (XEXP (op0, 0), mask),
2014 tem = simplify_associative_operation (code, mode, op0, op1);
2020 if (trueop1 == const0_rtx)
2022 if (GET_CODE (trueop1) == CONST_INT
2023 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2024 == GET_MODE_MASK (mode)))
2025 return simplify_gen_unary (NOT, mode, op0, mode);
2026 if (rtx_equal_p (trueop0, trueop1)
2027 && ! side_effects_p (op0)
2028 && GET_MODE_CLASS (mode) != MODE_CC)
2029 return CONST0_RTX (mode);
2031 /* Canonicalize XOR of the most significant bit to PLUS. */
2032 if ((GET_CODE (op1) == CONST_INT
2033 || GET_CODE (op1) == CONST_DOUBLE)
2034 && mode_signbit_p (mode, op1))
2035 return simplify_gen_binary (PLUS, mode, op0, op1);
2036 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2037 if ((GET_CODE (op1) == CONST_INT
2038 || GET_CODE (op1) == CONST_DOUBLE)
2039 && GET_CODE (op0) == PLUS
2040 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2041 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2042 && mode_signbit_p (mode, XEXP (op0, 1)))
2043 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2044 simplify_gen_binary (XOR, mode, op1,
2047 /* If we are XORing two things that have no bits in common,
2048 convert them into an IOR. This helps to detect rotation encoded
2049 using those methods and possibly other simplifications. */
2051 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2052 && (nonzero_bits (op0, mode)
2053 & nonzero_bits (op1, mode)) == 0)
2054 return (simplify_gen_binary (IOR, mode, op0, op1));
2056 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2057 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2060 int num_negated = 0;
2062 if (GET_CODE (op0) == NOT)
2063 num_negated++, op0 = XEXP (op0, 0);
2064 if (GET_CODE (op1) == NOT)
2065 num_negated++, op1 = XEXP (op1, 0);
2067 if (num_negated == 2)
2068 return simplify_gen_binary (XOR, mode, op0, op1);
2069 else if (num_negated == 1)
2070 return simplify_gen_unary (NOT, mode,
2071 simplify_gen_binary (XOR, mode, op0, op1),
2075 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2076 correspond to a machine insn or result in further simplifications
2077 if B is a constant. */
2079 if (GET_CODE (op0) == AND
2080 && rtx_equal_p (XEXP (op0, 1), op1)
2081 && ! side_effects_p (op1))
2082 return simplify_gen_binary (AND, mode,
2083 simplify_gen_unary (NOT, mode,
2084 XEXP (op0, 0), mode),
2087 else if (GET_CODE (op0) == AND
2088 && rtx_equal_p (XEXP (op0, 0), op1)
2089 && ! side_effects_p (op1))
2090 return simplify_gen_binary (AND, mode,
2091 simplify_gen_unary (NOT, mode,
2092 XEXP (op0, 1), mode),
2095 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2096 comparison if STORE_FLAG_VALUE is 1. */
2097 if (STORE_FLAG_VALUE == 1
2098 && trueop1 == const1_rtx
2099 && COMPARISON_P (op0)
2100 && (reversed = reversed_comparison (op0, mode)))
2103 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2104 is (lt foo (const_int 0)), so we can perform the above
2105 simplification if STORE_FLAG_VALUE is 1. */
2107 if (STORE_FLAG_VALUE == 1
2108 && trueop1 == const1_rtx
2109 && GET_CODE (op0) == LSHIFTRT
2110 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2111 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2112 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2114 /* (xor (comparison foo bar) (const_int sign-bit))
2115 when STORE_FLAG_VALUE is the sign bit. */
2116 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2117 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2118 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2119 && trueop1 == const_true_rtx
2120 && COMPARISON_P (op0)
2121 && (reversed = reversed_comparison (op0, mode)))
2126 tem = simplify_associative_operation (code, mode, op0, op1);
2132 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2134 /* If we are turning off bits already known off in OP0, we need
2136 if (GET_CODE (trueop1) == CONST_INT
2137 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2138 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2140 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2141 && GET_MODE_CLASS (mode) != MODE_CC)
2144 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2145 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2146 && ! side_effects_p (op0)
2147 && GET_MODE_CLASS (mode) != MODE_CC)
2148 return CONST0_RTX (mode);
2150 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2151 there are no nonzero bits of C outside of X's mode. */
2152 if ((GET_CODE (op0) == SIGN_EXTEND
2153 || GET_CODE (op0) == ZERO_EXTEND)
2154 && GET_CODE (trueop1) == CONST_INT
2155 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2156 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2157 & INTVAL (trueop1)) == 0)
2159 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2160 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2161 gen_int_mode (INTVAL (trueop1),
2163 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2166 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2167 insn (and may simplify more). */
2168 if (GET_CODE (op0) == XOR
2169 && rtx_equal_p (XEXP (op0, 0), op1)
2170 && ! side_effects_p (op1))
2171 return simplify_gen_binary (AND, mode,
2172 simplify_gen_unary (NOT, mode,
2173 XEXP (op0, 1), mode),
2176 if (GET_CODE (op0) == XOR
2177 && rtx_equal_p (XEXP (op0, 1), op1)
2178 && ! side_effects_p (op1))
2179 return simplify_gen_binary (AND, mode,
2180 simplify_gen_unary (NOT, mode,
2181 XEXP (op0, 0), mode),
2184 /* Similarly for (~(A ^ B)) & A. */
2185 if (GET_CODE (op0) == NOT
2186 && GET_CODE (XEXP (op0, 0)) == XOR
2187 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2188 && ! side_effects_p (op1))
2189 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2191 if (GET_CODE (op0) == NOT
2192 && GET_CODE (XEXP (op0, 0)) == XOR
2193 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2194 && ! side_effects_p (op1))
2195 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2197 /* Convert (A | B) & A to A. */
2198 if (GET_CODE (op0) == IOR
2199 && (rtx_equal_p (XEXP (op0, 0), op1)
2200 || rtx_equal_p (XEXP (op0, 1), op1))
2201 && ! side_effects_p (XEXP (op0, 0))
2202 && ! side_effects_p (XEXP (op0, 1)))
2205 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2206 ((A & N) + B) & M -> (A + B) & M
2207 Similarly if (N & M) == 0,
2208 ((A | N) + B) & M -> (A + B) & M
2209 and for - instead of + and/or ^ instead of |. */
2210 if (GET_CODE (trueop1) == CONST_INT
2211 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2212 && ~INTVAL (trueop1)
2213 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2214 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2219 pmop[0] = XEXP (op0, 0);
2220 pmop[1] = XEXP (op0, 1);
2222 for (which = 0; which < 2; which++)
2225 switch (GET_CODE (tem))
2228 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2229 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2230 == INTVAL (trueop1))
2231 pmop[which] = XEXP (tem, 0);
2235 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2236 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2237 pmop[which] = XEXP (tem, 0);
2244 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2246 tem = simplify_gen_binary (GET_CODE (op0), mode,
2248 return simplify_gen_binary (code, mode, tem, op1);
2251 tem = simplify_associative_operation (code, mode, op0, op1);
2257 /* 0/x is 0 (or x&0 if x has side-effects). */
2258 if (trueop0 == CONST0_RTX (mode))
2260 if (side_effects_p (op1))
2261 return simplify_gen_binary (AND, mode, op1, trueop0);
2265 if (trueop1 == CONST1_RTX (mode))
2266 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2267 /* Convert divide by power of two into shift. */
2268 if (GET_CODE (trueop1) == CONST_INT
2269 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2270 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2274 /* Handle floating point and integers separately. */
2275 if (SCALAR_FLOAT_MODE_P (mode))
2277 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2278 safe for modes with NaNs, since 0.0 / 0.0 will then be
2279 NaN rather than 0.0. Nor is it safe for modes with signed
2280 zeros, since dividing 0 by a negative number gives -0.0 */
2281 if (trueop0 == CONST0_RTX (mode)
2282 && !HONOR_NANS (mode)
2283 && !HONOR_SIGNED_ZEROS (mode)
2284 && ! side_effects_p (op1))
2287 if (trueop1 == CONST1_RTX (mode)
2288 && !HONOR_SNANS (mode))
2291 if (GET_CODE (trueop1) == CONST_DOUBLE
2292 && trueop1 != CONST0_RTX (mode))
2295 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2298 if (REAL_VALUES_EQUAL (d, dconstm1)
2299 && !HONOR_SNANS (mode))
2300 return simplify_gen_unary (NEG, mode, op0, mode);
2302 /* Change FP division by a constant into multiplication.
2303 Only do this with -funsafe-math-optimizations. */
2304 if (flag_unsafe_math_optimizations
2305 && !REAL_VALUES_EQUAL (d, dconst0))
2307 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2308 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2309 return simplify_gen_binary (MULT, mode, op0, tem);
2315 /* 0/x is 0 (or x&0 if x has side-effects). */
2316 if (trueop0 == CONST0_RTX (mode))
2318 if (side_effects_p (op1))
2319 return simplify_gen_binary (AND, mode, op1, trueop0);
2323 if (trueop1 == CONST1_RTX (mode))
2324 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2326 if (trueop1 == constm1_rtx)
2328 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2329 return simplify_gen_unary (NEG, mode, x, mode);
2335 /* 0%x is 0 (or x&0 if x has side-effects). */
2336 if (trueop0 == CONST0_RTX (mode))
2338 if (side_effects_p (op1))
2339 return simplify_gen_binary (AND, mode, op1, trueop0);
2342 /* x%1 is 0 (of x&0 if x has side-effects). */
2343 if (trueop1 == CONST1_RTX (mode))
2345 if (side_effects_p (op0))
2346 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2347 return CONST0_RTX (mode);
2349 /* Implement modulus by power of two as AND. */
2350 if (GET_CODE (trueop1) == CONST_INT
2351 && exact_log2 (INTVAL (trueop1)) > 0)
2352 return simplify_gen_binary (AND, mode, op0,
2353 GEN_INT (INTVAL (op1) - 1));
2357 /* 0%x is 0 (or x&0 if x has side-effects). */
2358 if (trueop0 == CONST0_RTX (mode))
2360 if (side_effects_p (op1))
2361 return simplify_gen_binary (AND, mode, op1, trueop0);
2364 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2365 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2367 if (side_effects_p (op0))
2368 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2369 return CONST0_RTX (mode);
2376 /* Rotating ~0 always results in ~0. */
2377 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2378 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2379 && ! side_effects_p (op1))
2382 /* Fall through.... */
2386 if (trueop1 == CONST0_RTX (mode))
2388 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2393 if (width <= HOST_BITS_PER_WIDE_INT
2394 && GET_CODE (trueop1) == CONST_INT
2395 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2396 && ! side_effects_p (op0))
2398 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2400 tem = simplify_associative_operation (code, mode, op0, op1);
2406 if (width <= HOST_BITS_PER_WIDE_INT
2407 && GET_CODE (trueop1) == CONST_INT
2408 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2409 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2410 && ! side_effects_p (op0))
2412 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2414 tem = simplify_associative_operation (code, mode, op0, op1);
2420 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2422 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2424 tem = simplify_associative_operation (code, mode, op0, op1);
2430 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2432 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2434 tem = simplify_associative_operation (code, mode, op0, op1);
2443 /* ??? There are simplifications that can be done. */
2447 if (!VECTOR_MODE_P (mode))
2449 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2450 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2451 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2452 gcc_assert (XVECLEN (trueop1, 0) == 1);
2453 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2455 if (GET_CODE (trueop0) == CONST_VECTOR)
2456 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2461 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2462 gcc_assert (GET_MODE_INNER (mode)
2463 == GET_MODE_INNER (GET_MODE (trueop0)));
2464 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2466 if (GET_CODE (trueop0) == CONST_VECTOR)
2468 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2469 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2470 rtvec v = rtvec_alloc (n_elts);
2473 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2474 for (i = 0; i < n_elts; i++)
2476 rtx x = XVECEXP (trueop1, 0, i);
2478 gcc_assert (GET_CODE (x) == CONST_INT);
2479 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2483 return gen_rtx_CONST_VECTOR (mode, v);
2487 if (XVECLEN (trueop1, 0) == 1
2488 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2489 && GET_CODE (trueop0) == VEC_CONCAT)
2492 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2494 /* Try to find the element in the VEC_CONCAT. */
2495 while (GET_MODE (vec) != mode
2496 && GET_CODE (vec) == VEC_CONCAT)
2498 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2499 if (offset < vec_size)
2500 vec = XEXP (vec, 0);
2504 vec = XEXP (vec, 1);
2506 vec = avoid_constant_pool_reference (vec);
2509 if (GET_MODE (vec) == mode)
2516 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2517 ? GET_MODE (trueop0)
2518 : GET_MODE_INNER (mode));
2519 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2520 ? GET_MODE (trueop1)
2521 : GET_MODE_INNER (mode));
2523 gcc_assert (VECTOR_MODE_P (mode));
2524 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2525 == GET_MODE_SIZE (mode));
2527 if (VECTOR_MODE_P (op0_mode))
2528 gcc_assert (GET_MODE_INNER (mode)
2529 == GET_MODE_INNER (op0_mode));
2531 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2533 if (VECTOR_MODE_P (op1_mode))
2534 gcc_assert (GET_MODE_INNER (mode)
2535 == GET_MODE_INNER (op1_mode));
2537 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2539 if ((GET_CODE (trueop0) == CONST_VECTOR
2540 || GET_CODE (trueop0) == CONST_INT
2541 || GET_CODE (trueop0) == CONST_DOUBLE)
2542 && (GET_CODE (trueop1) == CONST_VECTOR
2543 || GET_CODE (trueop1) == CONST_INT
2544 || GET_CODE (trueop1) == CONST_DOUBLE))
2546 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2547 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2548 rtvec v = rtvec_alloc (n_elts);
2550 unsigned in_n_elts = 1;
2552 if (VECTOR_MODE_P (op0_mode))
2553 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2554 for (i = 0; i < n_elts; i++)
2558 if (!VECTOR_MODE_P (op0_mode))
2559 RTVEC_ELT (v, i) = trueop0;
2561 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2565 if (!VECTOR_MODE_P (op1_mode))
2566 RTVEC_ELT (v, i) = trueop1;
2568 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2573 return gen_rtx_CONST_VECTOR (mode, v);
2586 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2589 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2591 unsigned int width = GET_MODE_BITSIZE (mode);
2593 if (VECTOR_MODE_P (mode)
2594 && code != VEC_CONCAT
2595 && GET_CODE (op0) == CONST_VECTOR
2596 && GET_CODE (op1) == CONST_VECTOR)
2598 unsigned n_elts = GET_MODE_NUNITS (mode);
2599 enum machine_mode op0mode = GET_MODE (op0);
2600 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2601 enum machine_mode op1mode = GET_MODE (op1);
2602 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2603 rtvec v = rtvec_alloc (n_elts);
2606 gcc_assert (op0_n_elts == n_elts);
2607 gcc_assert (op1_n_elts == n_elts);
2608 for (i = 0; i < n_elts; i++)
2610 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2611 CONST_VECTOR_ELT (op0, i),
2612 CONST_VECTOR_ELT (op1, i));
2615 RTVEC_ELT (v, i) = x;
2618 return gen_rtx_CONST_VECTOR (mode, v);
2621 if (VECTOR_MODE_P (mode)
2622 && code == VEC_CONCAT
2623 && CONSTANT_P (op0) && CONSTANT_P (op1))
2625 unsigned n_elts = GET_MODE_NUNITS (mode);
2626 rtvec v = rtvec_alloc (n_elts);
2628 gcc_assert (n_elts >= 2);
2631 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2632 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2634 RTVEC_ELT (v, 0) = op0;
2635 RTVEC_ELT (v, 1) = op1;
2639 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2640 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2643 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2644 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2645 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2647 for (i = 0; i < op0_n_elts; ++i)
2648 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2649 for (i = 0; i < op1_n_elts; ++i)
2650 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2653 return gen_rtx_CONST_VECTOR (mode, v);
2656 if (SCALAR_FLOAT_MODE_P (mode)
2657 && GET_CODE (op0) == CONST_DOUBLE
2658 && GET_CODE (op1) == CONST_DOUBLE
2659 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2670 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2672 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2674 for (i = 0; i < 4; i++)
2691 real_from_target (&r, tmp0, mode);
2692 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2696 REAL_VALUE_TYPE f0, f1, value, result;
2699 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2700 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2701 real_convert (&f0, mode, &f0);
2702 real_convert (&f1, mode, &f1);
2704 if (HONOR_SNANS (mode)
2705 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2709 && REAL_VALUES_EQUAL (f1, dconst0)
2710 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2713 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2714 && flag_trapping_math
2715 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2717 int s0 = REAL_VALUE_NEGATIVE (f0);
2718 int s1 = REAL_VALUE_NEGATIVE (f1);
2723 /* Inf + -Inf = NaN plus exception. */
2728 /* Inf - Inf = NaN plus exception. */
2733 /* Inf / Inf = NaN plus exception. */
2740 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2741 && flag_trapping_math
2742 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2743 || (REAL_VALUE_ISINF (f1)
2744 && REAL_VALUES_EQUAL (f0, dconst0))))
2745 /* Inf * 0 = NaN plus exception. */
2748 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2750 real_convert (&result, mode, &value);
2752 /* Don't constant fold this floating point operation if
2753 the result has overflowed and flag_trapping_math. */
2755 if (flag_trapping_math
2756 && MODE_HAS_INFINITIES (mode)
2757 && REAL_VALUE_ISINF (result)
2758 && !REAL_VALUE_ISINF (f0)
2759 && !REAL_VALUE_ISINF (f1))
2760 /* Overflow plus exception. */
2763 /* Don't constant fold this floating point operation if the
2764 result may dependent upon the run-time rounding mode and
2765 flag_rounding_math is set, or if GCC's software emulation
2766 is unable to accurately represent the result. */
2768 if ((flag_rounding_math
2769 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2770 && !flag_unsafe_math_optimizations))
2771 && (inexact || !real_identical (&result, &value)))
2774 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2778 /* We can fold some multi-word operations. */
2779 if (GET_MODE_CLASS (mode) == MODE_INT
2780 && width == HOST_BITS_PER_WIDE_INT * 2
2781 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2782 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2784 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2785 HOST_WIDE_INT h1, h2, hv, ht;
2787 if (GET_CODE (op0) == CONST_DOUBLE)
2788 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2790 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2792 if (GET_CODE (op1) == CONST_DOUBLE)
2793 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2795 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2800 /* A - B == A + (-B). */
2801 neg_double (l2, h2, &lv, &hv);
2804 /* Fall through.... */
2807 add_double (l1, h1, l2, h2, &lv, &hv);
2811 mul_double (l1, h1, l2, h2, &lv, &hv);
2815 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2816 &lv, &hv, <, &ht))
2821 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2822 <, &ht, &lv, &hv))
2827 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2828 &lv, &hv, <, &ht))
2833 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2834 <, &ht, &lv, &hv))
2839 lv = l1 & l2, hv = h1 & h2;
2843 lv = l1 | l2, hv = h1 | h2;
2847 lv = l1 ^ l2, hv = h1 ^ h2;
2853 && ((unsigned HOST_WIDE_INT) l1
2854 < (unsigned HOST_WIDE_INT) l2)))
2863 && ((unsigned HOST_WIDE_INT) l1
2864 > (unsigned HOST_WIDE_INT) l2)))
2871 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2873 && ((unsigned HOST_WIDE_INT) l1
2874 < (unsigned HOST_WIDE_INT) l2)))
2881 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2883 && ((unsigned HOST_WIDE_INT) l1
2884 > (unsigned HOST_WIDE_INT) l2)))
2890 case LSHIFTRT: case ASHIFTRT:
2892 case ROTATE: case ROTATERT:
2893 if (SHIFT_COUNT_TRUNCATED)
2894 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2896 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2899 if (code == LSHIFTRT || code == ASHIFTRT)
2900 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2902 else if (code == ASHIFT)
2903 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2904 else if (code == ROTATE)
2905 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2906 else /* code == ROTATERT */
2907 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2914 return immed_double_const (lv, hv, mode);
2917 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2918 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2920 /* Get the integer argument values in two forms:
2921 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2923 arg0 = INTVAL (op0);
2924 arg1 = INTVAL (op1);
2926 if (width < HOST_BITS_PER_WIDE_INT)
2928 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2929 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2932 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2933 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2936 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2937 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2945 /* Compute the value of the arithmetic. */
2950 val = arg0s + arg1s;
2954 val = arg0s - arg1s;
2958 val = arg0s * arg1s;
2963 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2966 val = arg0s / arg1s;
2971 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2974 val = arg0s % arg1s;
2979 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2982 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2987 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2990 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3008 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3009 the value is in range. We can't return any old value for
3010 out-of-range arguments because either the middle-end (via
3011 shift_truncation_mask) or the back-end might be relying on
3012 target-specific knowledge. Nor can we rely on
3013 shift_truncation_mask, since the shift might not be part of an
3014 ashlM3, lshrM3 or ashrM3 instruction. */
3015 if (SHIFT_COUNT_TRUNCATED)
3016 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3017 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3020 val = (code == ASHIFT
3021 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3022 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3024 /* Sign-extend the result for arithmetic right shifts. */
3025 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3026 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3034 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3035 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3043 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3044 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3048 /* Do nothing here. */
3052 val = arg0s <= arg1s ? arg0s : arg1s;
3056 val = ((unsigned HOST_WIDE_INT) arg0
3057 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3061 val = arg0s > arg1s ? arg0s : arg1s;
3065 val = ((unsigned HOST_WIDE_INT) arg0
3066 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3073 /* ??? There are simplifications that can be done. */
3080 return gen_int_mode (val, mode);
3088 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3091 Rather than test for specific case, we do this by a brute-force method
3092 and do all possible simplifications until no more changes occur. Then
3093 we rebuild the operation. */
3095 struct simplify_plus_minus_op_data
3103 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3105 const struct simplify_plus_minus_op_data *d1 = p1;
3106 const struct simplify_plus_minus_op_data *d2 = p2;
3109 result = (commutative_operand_precedence (d2->op)
3110 - commutative_operand_precedence (d1->op));
3113 return d1->ix - d2->ix;
3117 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3120 struct simplify_plus_minus_op_data ops[8];
3122 int n_ops = 2, input_ops = 2;
3123 int first, changed, canonicalized = 0;
3126 memset (ops, 0, sizeof ops);
3128 /* Set up the two operands and then expand them until nothing has been
3129 changed. If we run out of room in our array, give up; this should
3130 almost never happen. */
3135 ops[1].neg = (code == MINUS);
3141 for (i = 0; i < n_ops; i++)
3143 rtx this_op = ops[i].op;
3144 int this_neg = ops[i].neg;
3145 enum rtx_code this_code = GET_CODE (this_op);
3154 ops[n_ops].op = XEXP (this_op, 1);
3155 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3158 ops[i].op = XEXP (this_op, 0);
3161 canonicalized |= this_neg;
3165 ops[i].op = XEXP (this_op, 0);
3166 ops[i].neg = ! this_neg;
3173 && GET_CODE (XEXP (this_op, 0)) == PLUS
3174 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3175 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3177 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3178 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3179 ops[n_ops].neg = this_neg;
3187 /* ~a -> (-a - 1) */
3190 ops[n_ops].op = constm1_rtx;
3191 ops[n_ops++].neg = this_neg;
3192 ops[i].op = XEXP (this_op, 0);
3193 ops[i].neg = !this_neg;
3202 ops[i].op = neg_const_int (mode, this_op);
3216 gcc_assert (n_ops >= 2);
3219 int n_constants = 0;
3221 for (i = 0; i < n_ops; i++)
3222 if (GET_CODE (ops[i].op) == CONST_INT)
3225 if (n_constants <= 1)
3229 /* If we only have two operands, we can avoid the loops. */
3232 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3235 /* Get the two operands. Be careful with the order, especially for
3236 the cases where code == MINUS. */
3237 if (ops[0].neg && ops[1].neg)
3239 lhs = gen_rtx_NEG (mode, ops[0].op);
3242 else if (ops[0].neg)
3253 return simplify_const_binary_operation (code, mode, lhs, rhs);
3256 /* Now simplify each pair of operands until nothing changes. The first
3257 time through just simplify constants against each other. */
3264 for (i = 0; i < n_ops - 1; i++)
3265 for (j = i + 1; j < n_ops; j++)
3267 rtx lhs = ops[i].op, rhs = ops[j].op;
3268 int lneg = ops[i].neg, rneg = ops[j].neg;
3270 if (lhs != 0 && rhs != 0
3271 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3273 enum rtx_code ncode = PLUS;
3279 tem = lhs, lhs = rhs, rhs = tem;
3281 else if (swap_commutative_operands_p (lhs, rhs))
3282 tem = lhs, lhs = rhs, rhs = tem;
3284 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3286 /* Reject "simplifications" that just wrap the two
3287 arguments in a CONST. Failure to do so can result
3288 in infinite recursion with simplify_binary_operation
3289 when it calls us to simplify CONST operations. */
3291 && ! (GET_CODE (tem) == CONST
3292 && GET_CODE (XEXP (tem, 0)) == ncode
3293 && XEXP (XEXP (tem, 0), 0) == lhs
3294 && XEXP (XEXP (tem, 0), 1) == rhs)
3295 /* Don't allow -x + -1 -> ~x simplifications in the
3296 first pass. This allows us the chance to combine
3297 the -1 with other constants. */
3299 && GET_CODE (tem) == NOT
3300 && XEXP (tem, 0) == rhs))
3303 if (GET_CODE (tem) == NEG)
3304 tem = XEXP (tem, 0), lneg = !lneg;
3305 if (GET_CODE (tem) == CONST_INT && lneg)
3306 tem = neg_const_int (mode, tem), lneg = 0;
3310 ops[j].op = NULL_RTX;
3320 /* Pack all the operands to the lower-numbered entries. */
3321 for (i = 0, j = 0; j < n_ops; j++)
3325 /* Stabilize sort. */
3331 /* Sort the operations based on swap_commutative_operands_p. */
3332 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3334 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3336 && GET_CODE (ops[1].op) == CONST_INT
3337 && CONSTANT_P (ops[0].op)
3339 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3341 /* We suppressed creation of trivial CONST expressions in the
3342 combination loop to avoid recursion. Create one manually now.
3343 The combination loop should have ensured that there is exactly
3344 one CONST_INT, and the sort will have ensured that it is last
3345 in the array and that any other constant will be next-to-last. */
3348 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3349 && CONSTANT_P (ops[n_ops - 2].op))
3351 rtx value = ops[n_ops - 1].op;
3352 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3353 value = neg_const_int (mode, value);
3354 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3358 /* Put a non-negated operand first, if possible. */
3360 for (i = 0; i < n_ops && ops[i].neg; i++)
3363 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3372 /* Now make the result by performing the requested operations. */
3374 for (i = 1; i < n_ops; i++)
3375 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3376 mode, result, ops[i].op);
3381 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3383 plus_minus_operand_p (rtx x)
3385 return GET_CODE (x) == PLUS
3386 || GET_CODE (x) == MINUS
3387 || (GET_CODE (x) == CONST
3388 && GET_CODE (XEXP (x, 0)) == PLUS
3389 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3390 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3393 /* Like simplify_binary_operation except used for relational operators.
3394 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3395 not also be VOIDmode.
3397 CMP_MODE specifies in which mode the comparison is done in, so it is
3398 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3399 the operands or, if both are VOIDmode, the operands are compared in
3400 "infinite precision". */
3402 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3403 enum machine_mode cmp_mode, rtx op0, rtx op1)
3405 rtx tem, trueop0, trueop1;
3407 if (cmp_mode == VOIDmode)
3408 cmp_mode = GET_MODE (op0);
3409 if (cmp_mode == VOIDmode)
3410 cmp_mode = GET_MODE (op1);
3412 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3415 if (SCALAR_FLOAT_MODE_P (mode))
3417 if (tem == const0_rtx)
3418 return CONST0_RTX (mode);
3419 #ifdef FLOAT_STORE_FLAG_VALUE
3421 REAL_VALUE_TYPE val;
3422 val = FLOAT_STORE_FLAG_VALUE (mode);
3423 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3429 if (VECTOR_MODE_P (mode))
3431 if (tem == const0_rtx)
3432 return CONST0_RTX (mode);
3433 #ifdef VECTOR_STORE_FLAG_VALUE
3438 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3439 if (val == NULL_RTX)
3441 if (val == const1_rtx)
3442 return CONST1_RTX (mode);
3444 units = GET_MODE_NUNITS (mode);
3445 v = rtvec_alloc (units);
3446 for (i = 0; i < units; i++)
3447 RTVEC_ELT (v, i) = val;
3448 return gen_rtx_raw_CONST_VECTOR (mode, v);
3458 /* For the following tests, ensure const0_rtx is op1. */
3459 if (swap_commutative_operands_p (op0, op1)
3460 || (op0 == const0_rtx && op1 != const0_rtx))
3461 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3463 /* If op0 is a compare, extract the comparison arguments from it. */
3464 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3465 return simplify_relational_operation (code, mode, VOIDmode,
3466 XEXP (op0, 0), XEXP (op0, 1));
3468 if (mode == VOIDmode
3469 || GET_MODE_CLASS (cmp_mode) == MODE_CC
3473 trueop0 = avoid_constant_pool_reference (op0);
3474 trueop1 = avoid_constant_pool_reference (op1);
3475 return simplify_relational_operation_1 (code, mode, cmp_mode,
3479 /* This part of simplify_relational_operation is only used when CMP_MODE
3480 is not in class MODE_CC (i.e. it is a real comparison).
3482 MODE is the mode of the result, while CMP_MODE specifies in which
3483 mode the comparison is done in, so it is the mode of the operands. */
3486 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3487 enum machine_mode cmp_mode, rtx op0, rtx op1)
3489 enum rtx_code op0code = GET_CODE (op0);
3491 if (GET_CODE (op1) == CONST_INT)
3493 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3495 /* If op0 is a comparison, extract the comparison arguments form it. */
3498 if (GET_MODE (op0) == mode)
3499 return simplify_rtx (op0);
3501 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3502 XEXP (op0, 0), XEXP (op0, 1));
3504 else if (code == EQ)
3506 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3507 if (new_code != UNKNOWN)
3508 return simplify_gen_relational (new_code, mode, VOIDmode,
3509 XEXP (op0, 0), XEXP (op0, 1));
3514 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3515 if ((code == EQ || code == NE)
3516 && (op0code == PLUS || op0code == MINUS)
3518 && CONSTANT_P (XEXP (op0, 1))
3519 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3521 rtx x = XEXP (op0, 0);
3522 rtx c = XEXP (op0, 1);
3524 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3526 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3529 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3530 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3532 && op1 == const0_rtx
3533 && GET_MODE_CLASS (mode) == MODE_INT
3534 && cmp_mode != VOIDmode
3535 /* ??? Work-around BImode bugs in the ia64 backend. */
3537 && cmp_mode != BImode
3538 && nonzero_bits (op0, cmp_mode) == 1
3539 && STORE_FLAG_VALUE == 1)
3540 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3541 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3542 : lowpart_subreg (mode, op0, cmp_mode);
3547 /* Check if the given comparison (done in the given MODE) is actually a
3548 tautology or a contradiction.
3549 If no simplification is possible, this function returns zero.
3550 Otherwise, it returns either const_true_rtx or const0_rtx. */
3553 simplify_const_relational_operation (enum rtx_code code,
3554 enum machine_mode mode,
3557 int equal, op0lt, op0ltu, op1lt, op1ltu;
3562 gcc_assert (mode != VOIDmode
3563 || (GET_MODE (op0) == VOIDmode
3564 && GET_MODE (op1) == VOIDmode));
3566 /* If op0 is a compare, extract the comparison arguments from it. */
3567 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3569 op1 = XEXP (op0, 1);
3570 op0 = XEXP (op0, 0);
3572 if (GET_MODE (op0) != VOIDmode)
3573 mode = GET_MODE (op0);
3574 else if (GET_MODE (op1) != VOIDmode)
3575 mode = GET_MODE (op1);
3580 /* We can't simplify MODE_CC values since we don't know what the
3581 actual comparison is. */
3582 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3585 /* Make sure the constant is second. */
3586 if (swap_commutative_operands_p (op0, op1))
3588 tem = op0, op0 = op1, op1 = tem;
3589 code = swap_condition (code);
3592 trueop0 = avoid_constant_pool_reference (op0);
3593 trueop1 = avoid_constant_pool_reference (op1);
3595 /* For integer comparisons of A and B maybe we can simplify A - B and can
3596 then simplify a comparison of that with zero. If A and B are both either
3597 a register or a CONST_INT, this can't help; testing for these cases will
3598 prevent infinite recursion here and speed things up.
3600 If CODE is an unsigned comparison, then we can never do this optimization,
3601 because it gives an incorrect result if the subtraction wraps around zero.
3602 ANSI C defines unsigned operations such that they never overflow, and
3603 thus such cases can not be ignored; but we cannot do it even for
3604 signed comparisons for languages such as Java, so test flag_wrapv. */
3606 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3607 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3608 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3609 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3610 /* We cannot do this for == or != if tem is a nonzero address. */
3611 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3612 && code != GTU && code != GEU && code != LTU && code != LEU)
3613 return simplify_const_relational_operation (signed_condition (code),
3614 mode, tem, const0_rtx);
3616 if (flag_unsafe_math_optimizations && code == ORDERED)
3617 return const_true_rtx;
3619 if (flag_unsafe_math_optimizations && code == UNORDERED)
3622 /* For modes without NaNs, if the two operands are equal, we know the
3623 result except if they have side-effects. */
3624 if (! HONOR_NANS (GET_MODE (trueop0))
3625 && rtx_equal_p (trueop0, trueop1)
3626 && ! side_effects_p (trueop0))
3627 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3629 /* If the operands are floating-point constants, see if we can fold
3631 else if (GET_CODE (trueop0) == CONST_DOUBLE
3632 && GET_CODE (trueop1) == CONST_DOUBLE
3633 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3635 REAL_VALUE_TYPE d0, d1;
3637 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3638 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3640 /* Comparisons are unordered iff at least one of the values is NaN. */
3641 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3651 return const_true_rtx;
3664 equal = REAL_VALUES_EQUAL (d0, d1);
3665 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3666 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3669 /* Otherwise, see if the operands are both integers. */
3670 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3671 && (GET_CODE (trueop0) == CONST_DOUBLE
3672 || GET_CODE (trueop0) == CONST_INT)
3673 && (GET_CODE (trueop1) == CONST_DOUBLE
3674 || GET_CODE (trueop1) == CONST_INT))
3676 int width = GET_MODE_BITSIZE (mode);
3677 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3678 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3680 /* Get the two words comprising each integer constant. */
3681 if (GET_CODE (trueop0) == CONST_DOUBLE)
3683 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3684 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3688 l0u = l0s = INTVAL (trueop0);
3689 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3692 if (GET_CODE (trueop1) == CONST_DOUBLE)
3694 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3695 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3699 l1u = l1s = INTVAL (trueop1);
3700 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3703 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3704 we have to sign or zero-extend the values. */
3705 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3707 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3708 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3710 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3711 l0s |= ((HOST_WIDE_INT) (-1) << width);
3713 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3714 l1s |= ((HOST_WIDE_INT) (-1) << width);
3716 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3717 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3719 equal = (h0u == h1u && l0u == l1u);
3720 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3721 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3722 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3723 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3726 /* Otherwise, there are some code-specific tests we can make. */
3729 /* Optimize comparisons with upper and lower bounds. */
3730 if (SCALAR_INT_MODE_P (mode)
3731 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3744 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3751 /* x >= min is always true. */
3752 if (rtx_equal_p (trueop1, mmin))
3753 tem = const_true_rtx;
3759 /* x <= max is always true. */
3760 if (rtx_equal_p (trueop1, mmax))
3761 tem = const_true_rtx;
3766 /* x > max is always false. */
3767 if (rtx_equal_p (trueop1, mmax))
3773 /* x < min is always false. */
3774 if (rtx_equal_p (trueop1, mmin))
3781 if (tem == const0_rtx
3782 || tem == const_true_rtx)
3789 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3794 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3795 return const_true_rtx;
3799 /* Optimize abs(x) < 0.0. */
3800 if (trueop1 == CONST0_RTX (mode)
3801 && !HONOR_SNANS (mode)
3802 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3804 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3806 if (GET_CODE (tem) == ABS)
3812 /* Optimize abs(x) >= 0.0. */
3813 if (trueop1 == CONST0_RTX (mode)
3814 && !HONOR_NANS (mode)
3815 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3817 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3819 if (GET_CODE (tem) == ABS)
3820 return const_true_rtx;
3825 /* Optimize ! (abs(x) < 0.0). */
3826 if (trueop1 == CONST0_RTX (mode))
3828 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3830 if (GET_CODE (tem) == ABS)
3831 return const_true_rtx;
3842 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3848 return equal ? const_true_rtx : const0_rtx;
3851 return ! equal ? const_true_rtx : const0_rtx;
3854 return op0lt ? const_true_rtx : const0_rtx;
3857 return op1lt ? const_true_rtx : const0_rtx;
3859 return op0ltu ? const_true_rtx : const0_rtx;
3861 return op1ltu ? const_true_rtx : const0_rtx;
3864 return equal || op0lt ? const_true_rtx : const0_rtx;
3867 return equal || op1lt ? const_true_rtx : const0_rtx;
3869 return equal || op0ltu ? const_true_rtx : const0_rtx;
3871 return equal || op1ltu ? const_true_rtx : const0_rtx;
3873 return const_true_rtx;
3881 /* Simplify CODE, an operation with result mode MODE and three operands,
3882 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3883 a constant. Return 0 if no simplifications is possible. */
3886 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3887 enum machine_mode op0_mode, rtx op0, rtx op1,
3890 unsigned int width = GET_MODE_BITSIZE (mode);
3892 /* VOIDmode means "infinite" precision. */
3894 width = HOST_BITS_PER_WIDE_INT;
3900 if (GET_CODE (op0) == CONST_INT
3901 && GET_CODE (op1) == CONST_INT
3902 && GET_CODE (op2) == CONST_INT
3903 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3904 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3906 /* Extracting a bit-field from a constant */
3907 HOST_WIDE_INT val = INTVAL (op0);
3909 if (BITS_BIG_ENDIAN)
3910 val >>= (GET_MODE_BITSIZE (op0_mode)
3911 - INTVAL (op2) - INTVAL (op1));
3913 val >>= INTVAL (op2);
3915 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3917 /* First zero-extend. */
3918 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3919 /* If desired, propagate sign bit. */
3920 if (code == SIGN_EXTRACT
3921 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3922 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3925 /* Clear the bits that don't belong in our mode,
3926 unless they and our sign bit are all one.
3927 So we get either a reasonable negative value or a reasonable
3928 unsigned value for this mode. */
3929 if (width < HOST_BITS_PER_WIDE_INT
3930 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3931 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3932 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3934 return gen_int_mode (val, mode);
3939 if (GET_CODE (op0) == CONST_INT)
3940 return op0 != const0_rtx ? op1 : op2;
3942 /* Convert c ? a : a into "a". */
3943 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3946 /* Convert a != b ? a : b into "a". */
3947 if (GET_CODE (op0) == NE
3948 && ! side_effects_p (op0)
3949 && ! HONOR_NANS (mode)
3950 && ! HONOR_SIGNED_ZEROS (mode)
3951 && ((rtx_equal_p (XEXP (op0, 0), op1)
3952 && rtx_equal_p (XEXP (op0, 1), op2))
3953 || (rtx_equal_p (XEXP (op0, 0), op2)
3954 && rtx_equal_p (XEXP (op0, 1), op1))))
3957 /* Convert a == b ? a : b into "b". */
3958 if (GET_CODE (op0) == EQ
3959 && ! side_effects_p (op0)
3960 && ! HONOR_NANS (mode)
3961 && ! HONOR_SIGNED_ZEROS (mode)
3962 && ((rtx_equal_p (XEXP (op0, 0), op1)
3963 && rtx_equal_p (XEXP (op0, 1), op2))
3964 || (rtx_equal_p (XEXP (op0, 0), op2)
3965 && rtx_equal_p (XEXP (op0, 1), op1))))
3968 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3970 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3971 ? GET_MODE (XEXP (op0, 1))
3972 : GET_MODE (XEXP (op0, 0)));
3975 /* Look for happy constants in op1 and op2. */
3976 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3978 HOST_WIDE_INT t = INTVAL (op1);
3979 HOST_WIDE_INT f = INTVAL (op2);
3981 if (t == STORE_FLAG_VALUE && f == 0)
3982 code = GET_CODE (op0);
3983 else if (t == 0 && f == STORE_FLAG_VALUE)
3986 tmp = reversed_comparison_code (op0, NULL_RTX);
3994 return simplify_gen_relational (code, mode, cmp_mode,
3995 XEXP (op0, 0), XEXP (op0, 1));
3998 if (cmp_mode == VOIDmode)
3999 cmp_mode = op0_mode;
4000 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4001 cmp_mode, XEXP (op0, 0),
4004 /* See if any simplifications were possible. */
4007 if (GET_CODE (temp) == CONST_INT)
4008 return temp == const0_rtx ? op2 : op1;
4010 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4016 gcc_assert (GET_MODE (op0) == mode);
4017 gcc_assert (GET_MODE (op1) == mode);
4018 gcc_assert (VECTOR_MODE_P (mode));
4019 op2 = avoid_constant_pool_reference (op2);
4020 if (GET_CODE (op2) == CONST_INT)
4022 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4023 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4024 int mask = (1 << n_elts) - 1;
4026 if (!(INTVAL (op2) & mask))
4028 if ((INTVAL (op2) & mask) == mask)
4031 op0 = avoid_constant_pool_reference (op0);
4032 op1 = avoid_constant_pool_reference (op1);
4033 if (GET_CODE (op0) == CONST_VECTOR
4034 && GET_CODE (op1) == CONST_VECTOR)
4036 rtvec v = rtvec_alloc (n_elts);
4039 for (i = 0; i < n_elts; i++)
4040 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4041 ? CONST_VECTOR_ELT (op0, i)
4042 : CONST_VECTOR_ELT (op1, i));
4043 return gen_rtx_CONST_VECTOR (mode, v);
4055 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4056 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4058 Works by unpacking OP into a collection of 8-bit values
4059 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4060 and then repacking them again for OUTERMODE. */
4063 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4064 enum machine_mode innermode, unsigned int byte)
4066 /* We support up to 512-bit values (for V8DFmode). */
4070 value_mask = (1 << value_bit) - 1
4072 unsigned char value[max_bitsize / value_bit];
4081 rtvec result_v = NULL;
4082 enum mode_class outer_class;
4083 enum machine_mode outer_submode;
4085 /* Some ports misuse CCmode. */
4086 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4089 /* We have no way to represent a complex constant at the rtl level. */
4090 if (COMPLEX_MODE_P (outermode))
4093 /* Unpack the value. */
4095 if (GET_CODE (op) == CONST_VECTOR)
4097 num_elem = CONST_VECTOR_NUNITS (op);
4098 elems = &CONST_VECTOR_ELT (op, 0);
4099 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4105 elem_bitsize = max_bitsize;
4107 /* If this asserts, it is too complicated; reducing value_bit may help. */
4108 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4109 /* I don't know how to handle endianness of sub-units. */
4110 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4112 for (elem = 0; elem < num_elem; elem++)
4115 rtx el = elems[elem];
4117 /* Vectors are kept in target memory order. (This is probably
4120 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4121 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4123 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4124 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4125 unsigned bytele = (subword_byte % UNITS_PER_WORD
4126 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4127 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4130 switch (GET_CODE (el))
4134 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4136 *vp++ = INTVAL (el) >> i;
4137 /* CONST_INTs are always logically sign-extended. */
4138 for (; i < elem_bitsize; i += value_bit)
4139 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4143 if (GET_MODE (el) == VOIDmode)
4145 /* If this triggers, someone should have generated a
4146 CONST_INT instead. */
4147 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4149 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4150 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4151 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4154 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4157 /* It shouldn't matter what's done here, so fill it with
4159 for (; i < elem_bitsize; i += value_bit)
4164 long tmp[max_bitsize / 32];
4165 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4167 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4168 gcc_assert (bitsize <= elem_bitsize);
4169 gcc_assert (bitsize % value_bit == 0);
4171 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4174 /* real_to_target produces its result in words affected by
4175 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4176 and use WORDS_BIG_ENDIAN instead; see the documentation
4177 of SUBREG in rtl.texi. */
4178 for (i = 0; i < bitsize; i += value_bit)
4181 if (WORDS_BIG_ENDIAN)
4182 ibase = bitsize - 1 - i;
4185 *vp++ = tmp[ibase / 32] >> i % 32;
4188 /* It shouldn't matter what's done here, so fill it with
4190 for (; i < elem_bitsize; i += value_bit)
4200 /* Now, pick the right byte to start with. */
4201 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4202 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4203 will already have offset 0. */
4204 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4206 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4208 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4209 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4210 byte = (subword_byte % UNITS_PER_WORD
4211 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4214 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4215 so if it's become negative it will instead be very large.) */
4216 gcc_assert (byte < GET_MODE_SIZE (innermode));
4218 /* Convert from bytes to chunks of size value_bit. */
4219 value_start = byte * (BITS_PER_UNIT / value_bit);
4221 /* Re-pack the value. */
4223 if (VECTOR_MODE_P (outermode))
4225 num_elem = GET_MODE_NUNITS (outermode);
4226 result_v = rtvec_alloc (num_elem);
4227 elems = &RTVEC_ELT (result_v, 0);
4228 outer_submode = GET_MODE_INNER (outermode);
4234 outer_submode = outermode;
4237 outer_class = GET_MODE_CLASS (outer_submode);
4238 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4240 gcc_assert (elem_bitsize % value_bit == 0);
4241 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4243 for (elem = 0; elem < num_elem; elem++)
4247 /* Vectors are stored in target memory order. (This is probably
4250 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4251 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4253 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4254 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4255 unsigned bytele = (subword_byte % UNITS_PER_WORD
4256 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4257 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4260 switch (outer_class)
4263 case MODE_PARTIAL_INT:
4265 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4268 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4270 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4271 for (; i < elem_bitsize; i += value_bit)
4272 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4273 << (i - HOST_BITS_PER_WIDE_INT));
4275 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4277 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4278 elems[elem] = gen_int_mode (lo, outer_submode);
4279 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4280 elems[elem] = immed_double_const (lo, hi, outer_submode);
4289 long tmp[max_bitsize / 32];
4291 /* real_from_target wants its input in words affected by
4292 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4293 and use WORDS_BIG_ENDIAN instead; see the documentation
4294 of SUBREG in rtl.texi. */
4295 for (i = 0; i < max_bitsize / 32; i++)
4297 for (i = 0; i < elem_bitsize; i += value_bit)
4300 if (WORDS_BIG_ENDIAN)
4301 ibase = elem_bitsize - 1 - i;
4304 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4307 real_from_target (&r, tmp, outer_submode);
4308 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4316 if (VECTOR_MODE_P (outermode))
4317 return gen_rtx_CONST_VECTOR (outermode, result_v);
4322 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4323 Return 0 if no simplifications are possible. */
4325 simplify_subreg (enum machine_mode outermode, rtx op,
4326 enum machine_mode innermode, unsigned int byte)
4328 /* Little bit of sanity checking. */
4329 gcc_assert (innermode != VOIDmode);
4330 gcc_assert (outermode != VOIDmode);
4331 gcc_assert (innermode != BLKmode);
4332 gcc_assert (outermode != BLKmode);
4334 gcc_assert (GET_MODE (op) == innermode
4335 || GET_MODE (op) == VOIDmode);
4337 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4338 gcc_assert (byte < GET_MODE_SIZE (innermode));
4340 if (outermode == innermode && !byte)
4343 if (GET_CODE (op) == CONST_INT
4344 || GET_CODE (op) == CONST_DOUBLE
4345 || GET_CODE (op) == CONST_VECTOR)
4346 return simplify_immed_subreg (outermode, op, innermode, byte);
4348 /* Changing mode twice with SUBREG => just change it once,
4349 or not at all if changing back op starting mode. */
4350 if (GET_CODE (op) == SUBREG)
4352 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4353 int final_offset = byte + SUBREG_BYTE (op);
4356 if (outermode == innermostmode
4357 && byte == 0 && SUBREG_BYTE (op) == 0)
4358 return SUBREG_REG (op);
4360 /* The SUBREG_BYTE represents offset, as if the value were stored
4361 in memory. Irritating exception is paradoxical subreg, where
4362 we define SUBREG_BYTE to be 0. On big endian machines, this
4363 value should be negative. For a moment, undo this exception. */
4364 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4366 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4367 if (WORDS_BIG_ENDIAN)
4368 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4369 if (BYTES_BIG_ENDIAN)
4370 final_offset += difference % UNITS_PER_WORD;
4372 if (SUBREG_BYTE (op) == 0
4373 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4375 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4376 if (WORDS_BIG_ENDIAN)
4377 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4378 if (BYTES_BIG_ENDIAN)
4379 final_offset += difference % UNITS_PER_WORD;
4382 /* See whether resulting subreg will be paradoxical. */
4383 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4385 /* In nonparadoxical subregs we can't handle negative offsets. */
4386 if (final_offset < 0)
4388 /* Bail out in case resulting subreg would be incorrect. */
4389 if (final_offset % GET_MODE_SIZE (outermode)
4390 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4396 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4398 /* In paradoxical subreg, see if we are still looking on lower part.
4399 If so, our SUBREG_BYTE will be 0. */
4400 if (WORDS_BIG_ENDIAN)
4401 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4402 if (BYTES_BIG_ENDIAN)
4403 offset += difference % UNITS_PER_WORD;
4404 if (offset == final_offset)
4410 /* Recurse for further possible simplifications. */
4411 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4415 if (validate_subreg (outermode, innermostmode,
4416 SUBREG_REG (op), final_offset))
4417 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4421 /* SUBREG of a hard register => just change the register number
4422 and/or mode. If the hard register is not valid in that mode,
4423 suppress this simplification. If the hard register is the stack,
4424 frame, or argument pointer, leave this as a SUBREG. */
4427 && REGNO (op) < FIRST_PSEUDO_REGISTER
4428 #ifdef CANNOT_CHANGE_MODE_CLASS
4429 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4430 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4431 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4433 && ((reload_completed && !frame_pointer_needed)
4434 || (REGNO (op) != FRAME_POINTER_REGNUM
4435 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4436 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4439 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4440 && REGNO (op) != ARG_POINTER_REGNUM
4442 && REGNO (op) != STACK_POINTER_REGNUM
4443 && subreg_offset_representable_p (REGNO (op), innermode,
4446 unsigned int regno = REGNO (op);
4447 unsigned int final_regno
4448 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4450 /* ??? We do allow it if the current REG is not valid for
4451 its mode. This is a kludge to work around how float/complex
4452 arguments are passed on 32-bit SPARC and should be fixed. */
4453 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4454 || ! HARD_REGNO_MODE_OK (regno, innermode))
4456 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
4458 /* Propagate original regno. We don't have any way to specify
4459 the offset inside original regno, so do so only for lowpart.
4460 The information is used only by alias analysis that can not
4461 grog partial register anyway. */
4463 if (subreg_lowpart_offset (outermode, innermode) == byte)
4464 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4469 /* If we have a SUBREG of a register that we are replacing and we are
4470 replacing it with a MEM, make a new MEM and try replacing the
4471 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4472 or if we would be widening it. */
4475 && ! mode_dependent_address_p (XEXP (op, 0))
4476 /* Allow splitting of volatile memory references in case we don't
4477 have instruction to move the whole thing. */
4478 && (! MEM_VOLATILE_P (op)
4479 || ! have_insn_for (SET, innermode))
4480 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4481 return adjust_address_nv (op, outermode, byte);
4483 /* Handle complex values represented as CONCAT
4484 of real and imaginary part. */
4485 if (GET_CODE (op) == CONCAT)
4487 unsigned int inner_size, final_offset;
4490 inner_size = GET_MODE_UNIT_SIZE (innermode);
4491 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4492 final_offset = byte % inner_size;
4493 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4496 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4499 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4500 return gen_rtx_SUBREG (outermode, part, final_offset);
4504 /* Optimize SUBREG truncations of zero and sign extended values. */
4505 if ((GET_CODE (op) == ZERO_EXTEND
4506 || GET_CODE (op) == SIGN_EXTEND)
4507 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4509 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4511 /* If we're requesting the lowpart of a zero or sign extension,
4512 there are three possibilities. If the outermode is the same
4513 as the origmode, we can omit both the extension and the subreg.
4514 If the outermode is not larger than the origmode, we can apply
4515 the truncation without the extension. Finally, if the outermode
4516 is larger than the origmode, but both are integer modes, we
4517 can just extend to the appropriate mode. */
4520 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4521 if (outermode == origmode)
4522 return XEXP (op, 0);
4523 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4524 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4525 subreg_lowpart_offset (outermode,
4527 if (SCALAR_INT_MODE_P (outermode))
4528 return simplify_gen_unary (GET_CODE (op), outermode,
4529 XEXP (op, 0), origmode);
4532 /* A SUBREG resulting from a zero extension may fold to zero if
4533 it extracts higher bits that the ZERO_EXTEND's source bits. */
4534 if (GET_CODE (op) == ZERO_EXTEND
4535 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4536 return CONST0_RTX (outermode);
4539 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4540 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4541 the outer subreg is effectively a truncation to the original mode. */
4542 if ((GET_CODE (op) == LSHIFTRT
4543 || GET_CODE (op) == ASHIFTRT)
4544 && SCALAR_INT_MODE_P (outermode)
4545 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4546 to avoid the possibility that an outer LSHIFTRT shifts by more
4547 than the sign extension's sign_bit_copies and introduces zeros
4548 into the high bits of the result. */
4549 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4550 && GET_CODE (XEXP (op, 1)) == CONST_INT
4551 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4552 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4553 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4554 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4555 return simplify_gen_binary (ASHIFTRT, outermode,
4556 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4558 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4559 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4560 the outer subreg is effectively a truncation to the original mode. */
4561 if ((GET_CODE (op) == LSHIFTRT
4562 || GET_CODE (op) == ASHIFTRT)
4563 && SCALAR_INT_MODE_P (outermode)
4564 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4565 && GET_CODE (XEXP (op, 1)) == CONST_INT
4566 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4567 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4568 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4569 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4570 return simplify_gen_binary (LSHIFTRT, outermode,
4571 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4573 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4574 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4575 the outer subreg is effectively a truncation to the original mode. */
4576 if (GET_CODE (op) == ASHIFT
4577 && SCALAR_INT_MODE_P (outermode)
4578 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4579 && GET_CODE (XEXP (op, 1)) == CONST_INT
4580 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4581 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4582 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4583 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4584 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4585 return simplify_gen_binary (ASHIFT, outermode,
4586 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4591 /* Make a SUBREG operation or equivalent if it folds. */
4594 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4595 enum machine_mode innermode, unsigned int byte)
4599 newx = simplify_subreg (outermode, op, innermode, byte);
4603 if (GET_CODE (op) == SUBREG
4604 || GET_CODE (op) == CONCAT
4605 || GET_MODE (op) == VOIDmode)
4608 if (validate_subreg (outermode, innermode, op, byte))
4609 return gen_rtx_SUBREG (outermode, op, byte);
4614 /* Simplify X, an rtx expression.
4616 Return the simplified expression or NULL if no simplifications
4619 This is the preferred entry point into the simplification routines;
4620 however, we still allow passes to call the more specific routines.
4622 Right now GCC has three (yes, three) major bodies of RTL simplification
4623 code that need to be unified.
4625 1. fold_rtx in cse.c. This code uses various CSE specific
4626 information to aid in RTL simplification.
4628 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4629 it uses combine specific information to aid in RTL
4632 3. The routines in this file.
4635 Long term we want to only have one body of simplification code; to
4636 get to that state I recommend the following steps:
4638 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4639 which are not pass dependent state into these routines.
4641 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4642 use this routine whenever possible.
4644 3. Allow for pass dependent state to be provided to these
4645 routines and add simplifications based on the pass dependent
4646 state. Remove code from cse.c & combine.c that becomes
4649 It will take time, but ultimately the compiler will be easier to
4650 maintain and improve. It's totally silly that when we add a
4651 simplification that it needs to be added to 4 places (3 for RTL
4652 simplification and 1 for tree simplification. */
4655 simplify_rtx (rtx x)
4657 enum rtx_code code = GET_CODE (x);
4658 enum machine_mode mode = GET_MODE (x);
4660 switch (GET_RTX_CLASS (code))
4663 return simplify_unary_operation (code, mode,
4664 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4665 case RTX_COMM_ARITH:
4666 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4667 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4669 /* Fall through.... */
4672 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4675 case RTX_BITFIELD_OPS:
4676 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4677 XEXP (x, 0), XEXP (x, 1),
4681 case RTX_COMM_COMPARE:
4682 return simplify_relational_operation (code, mode,
4683 ((GET_MODE (XEXP (x, 0))
4685 ? GET_MODE (XEXP (x, 0))
4686 : GET_MODE (XEXP (x, 1))),
4692 return simplify_gen_subreg (mode, SUBREG_REG (x),
4693 GET_MODE (SUBREG_REG (x)),
4700 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4701 if (GET_CODE (XEXP (x, 0)) == HIGH
4702 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))