1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code))
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
284 return simplify_gen_unary (code, mode, op0, op_mode);
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return simplify_gen_binary (code, mode, op0, op1);
295 case RTX_COMM_COMPARE:
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
306 case RTX_BITFIELD_OPS:
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
319 /* The only case we try to handle is a SUBREG. */
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
328 return op0 ? op0 : x;
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
375 if (GET_CODE (op) == CONST)
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
497 in2 = in1; in1 = tem;
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
559 if (GET_CODE (op) == ASHIFT)
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
593 if (STORE_FLAG_VALUE == 1)
594 return simplify_gen_binary (ASHIFTRT, mode, XEXP (op, 0),
595 GEN_INT (GET_MODE_BITSIZE (mode) - 1));
596 else if (STORE_FLAG_VALUE == -1)
597 return simplify_gen_binary (LSHIFTRT, mode, XEXP (op, 0),
598 GEN_INT (GET_MODE_BITSIZE (mode) - 1));
603 /* We can't handle truncation to a partial integer mode here
604 because we don't know the real bitsize of the partial
606 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
609 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
610 if ((GET_CODE (op) == SIGN_EXTEND
611 || GET_CODE (op) == ZERO_EXTEND)
612 && GET_MODE (XEXP (op, 0)) == mode)
615 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
616 (OP:SI foo:SI) if OP is NEG or ABS. */
617 if ((GET_CODE (op) == ABS
618 || GET_CODE (op) == NEG)
619 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
620 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
621 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
622 return simplify_gen_unary (GET_CODE (op), mode,
623 XEXP (XEXP (op, 0), 0), mode);
625 /* (truncate:A (subreg:B (truncate:C X) 0)) is
627 if (GET_CODE (op) == SUBREG
628 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
629 && subreg_lowpart_p (op))
630 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
631 GET_MODE (XEXP (SUBREG_REG (op), 0)));
633 /* If we know that the value is already truncated, we can
634 replace the TRUNCATE with a SUBREG if TRULY_NOOP_TRUNCATION
635 is nonzero for the corresponding modes. But don't do this
636 for an (LSHIFTRT (MULT ...)) since this will cause problems
637 with the umulXi3_highpart patterns. */
638 if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
639 GET_MODE_BITSIZE (GET_MODE (op)))
640 && num_sign_bit_copies (op, GET_MODE (op))
641 >= (unsigned int) (GET_MODE_BITSIZE (mode) + 1)
642 && ! (GET_CODE (op) == LSHIFTRT
643 && GET_CODE (XEXP (op, 0)) == MULT))
644 return rtl_hooks.gen_lowpart_no_emit (mode, op);
646 /* A truncate of a comparison can be replaced with a subreg if
647 STORE_FLAG_VALUE permits. This is like the previous test,
648 but it works even if the comparison is done in a mode larger
649 than HOST_BITS_PER_WIDE_INT. */
650 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
652 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
653 return rtl_hooks.gen_lowpart_no_emit (mode, op);
657 if (DECIMAL_FLOAT_MODE_P (mode))
660 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
661 if (GET_CODE (op) == FLOAT_EXTEND
662 && GET_MODE (XEXP (op, 0)) == mode)
665 /* (float_truncate:SF (float_truncate:DF foo:XF))
666 = (float_truncate:SF foo:XF).
667 This may eliminate double rounding, so it is unsafe.
669 (float_truncate:SF (float_extend:XF foo:DF))
670 = (float_truncate:SF foo:DF).
672 (float_truncate:DF (float_extend:XF foo:SF))
673 = (float_extend:SF foo:DF). */
674 if ((GET_CODE (op) == FLOAT_TRUNCATE
675 && flag_unsafe_math_optimizations)
676 || GET_CODE (op) == FLOAT_EXTEND)
677 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
679 > GET_MODE_SIZE (mode)
680 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
684 /* (float_truncate (float x)) is (float x) */
685 if (GET_CODE (op) == FLOAT
686 && (flag_unsafe_math_optimizations
687 || ((unsigned)significand_size (GET_MODE (op))
688 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
689 - num_sign_bit_copies (XEXP (op, 0),
690 GET_MODE (XEXP (op, 0)))))))
691 return simplify_gen_unary (FLOAT, mode,
693 GET_MODE (XEXP (op, 0)));
695 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
696 (OP:SF foo:SF) if OP is NEG or ABS. */
697 if ((GET_CODE (op) == ABS
698 || GET_CODE (op) == NEG)
699 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
700 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
701 return simplify_gen_unary (GET_CODE (op), mode,
702 XEXP (XEXP (op, 0), 0), mode);
704 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
705 is (float_truncate:SF x). */
706 if (GET_CODE (op) == SUBREG
707 && subreg_lowpart_p (op)
708 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
709 return SUBREG_REG (op);
713 if (DECIMAL_FLOAT_MODE_P (mode))
716 /* (float_extend (float_extend x)) is (float_extend x)
718 (float_extend (float x)) is (float x) assuming that double
719 rounding can't happen.
721 if (GET_CODE (op) == FLOAT_EXTEND
722 || (GET_CODE (op) == FLOAT
723 && ((unsigned)significand_size (GET_MODE (op))
724 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
725 - num_sign_bit_copies (XEXP (op, 0),
726 GET_MODE (XEXP (op, 0)))))))
727 return simplify_gen_unary (GET_CODE (op), mode,
729 GET_MODE (XEXP (op, 0)));
734 /* (abs (neg <foo>)) -> (abs <foo>) */
735 if (GET_CODE (op) == NEG)
736 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
737 GET_MODE (XEXP (op, 0)));
739 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
741 if (GET_MODE (op) == VOIDmode)
744 /* If operand is something known to be positive, ignore the ABS. */
745 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
746 || ((GET_MODE_BITSIZE (GET_MODE (op))
747 <= HOST_BITS_PER_WIDE_INT)
748 && ((nonzero_bits (op, GET_MODE (op))
750 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
754 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
755 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
756 return gen_rtx_NEG (mode, op);
761 /* (ffs (*_extend <X>)) = (ffs <X>) */
762 if (GET_CODE (op) == SIGN_EXTEND
763 || GET_CODE (op) == ZERO_EXTEND)
764 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
765 GET_MODE (XEXP (op, 0)));
770 /* (pop* (zero_extend <X>)) = (pop* <X>) */
771 if (GET_CODE (op) == ZERO_EXTEND)
772 return simplify_gen_unary (code, mode, XEXP (op, 0),
773 GET_MODE (XEXP (op, 0)));
777 /* (float (sign_extend <X>)) = (float <X>). */
778 if (GET_CODE (op) == SIGN_EXTEND)
779 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
780 GET_MODE (XEXP (op, 0)));
784 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
785 becomes just the MINUS if its mode is MODE. This allows
786 folding switch statements on machines using casesi (such as
788 if (GET_CODE (op) == TRUNCATE
789 && GET_MODE (XEXP (op, 0)) == mode
790 && GET_CODE (XEXP (op, 0)) == MINUS
791 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
792 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
795 /* Check for a sign extension of a subreg of a promoted
796 variable, where the promotion is sign-extended, and the
797 target mode is the same as the variable's promotion. */
798 if (GET_CODE (op) == SUBREG
799 && SUBREG_PROMOTED_VAR_P (op)
800 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
801 && GET_MODE (XEXP (op, 0)) == mode)
804 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
805 if (! POINTERS_EXTEND_UNSIGNED
806 && mode == Pmode && GET_MODE (op) == ptr_mode
808 || (GET_CODE (op) == SUBREG
809 && REG_P (SUBREG_REG (op))
810 && REG_POINTER (SUBREG_REG (op))
811 && GET_MODE (SUBREG_REG (op)) == Pmode)))
812 return convert_memory_address (Pmode, op);
817 /* Check for a zero extension of a subreg of a promoted
818 variable, where the promotion is zero-extended, and the
819 target mode is the same as the variable's promotion. */
820 if (GET_CODE (op) == SUBREG
821 && SUBREG_PROMOTED_VAR_P (op)
822 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
823 && GET_MODE (XEXP (op, 0)) == mode)
826 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
827 if (POINTERS_EXTEND_UNSIGNED > 0
828 && mode == Pmode && GET_MODE (op) == ptr_mode
830 || (GET_CODE (op) == SUBREG
831 && REG_P (SUBREG_REG (op))
832 && REG_POINTER (SUBREG_REG (op))
833 && GET_MODE (SUBREG_REG (op)) == Pmode)))
834 return convert_memory_address (Pmode, op);
845 /* Try to compute the value of a unary operation CODE whose output mode is to
846 be MODE with input operand OP whose mode was originally OP_MODE.
847 Return zero if the value cannot be computed. */
849 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
850 rtx op, enum machine_mode op_mode)
852 unsigned int width = GET_MODE_BITSIZE (mode);
854 if (code == VEC_DUPLICATE)
856 gcc_assert (VECTOR_MODE_P (mode));
857 if (GET_MODE (op) != VOIDmode)
859 if (!VECTOR_MODE_P (GET_MODE (op)))
860 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
862 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
865 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
866 || GET_CODE (op) == CONST_VECTOR)
868 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
869 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
870 rtvec v = rtvec_alloc (n_elts);
873 if (GET_CODE (op) != CONST_VECTOR)
874 for (i = 0; i < n_elts; i++)
875 RTVEC_ELT (v, i) = op;
878 enum machine_mode inmode = GET_MODE (op);
879 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
880 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
882 gcc_assert (in_n_elts < n_elts);
883 gcc_assert ((n_elts % in_n_elts) == 0);
884 for (i = 0; i < n_elts; i++)
885 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
887 return gen_rtx_CONST_VECTOR (mode, v);
891 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
893 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
894 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
895 enum machine_mode opmode = GET_MODE (op);
896 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
897 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
898 rtvec v = rtvec_alloc (n_elts);
901 gcc_assert (op_n_elts == n_elts);
902 for (i = 0; i < n_elts; i++)
904 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
905 CONST_VECTOR_ELT (op, i),
906 GET_MODE_INNER (opmode));
909 RTVEC_ELT (v, i) = x;
911 return gen_rtx_CONST_VECTOR (mode, v);
914 /* The order of these tests is critical so that, for example, we don't
915 check the wrong mode (input vs. output) for a conversion operation,
916 such as FIX. At some point, this should be simplified. */
918 if (code == FLOAT && GET_MODE (op) == VOIDmode
919 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
921 HOST_WIDE_INT hv, lv;
924 if (GET_CODE (op) == CONST_INT)
925 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
927 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
929 REAL_VALUE_FROM_INT (d, lv, hv, mode);
930 d = real_value_truncate (mode, d);
931 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
933 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
934 && (GET_CODE (op) == CONST_DOUBLE
935 || GET_CODE (op) == CONST_INT))
937 HOST_WIDE_INT hv, lv;
940 if (GET_CODE (op) == CONST_INT)
941 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
943 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
945 if (op_mode == VOIDmode)
947 /* We don't know how to interpret negative-looking numbers in
948 this case, so don't try to fold those. */
952 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
955 hv = 0, lv &= GET_MODE_MASK (op_mode);
957 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
958 d = real_value_truncate (mode, d);
959 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
962 if (GET_CODE (op) == CONST_INT
963 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
965 HOST_WIDE_INT arg0 = INTVAL (op);
979 val = (arg0 >= 0 ? arg0 : - arg0);
983 /* Don't use ffs here. Instead, get low order bit and then its
984 number. If arg0 is zero, this will return 0, as desired. */
985 arg0 &= GET_MODE_MASK (mode);
986 val = exact_log2 (arg0 & (- arg0)) + 1;
990 arg0 &= GET_MODE_MASK (mode);
991 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
994 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
998 arg0 &= GET_MODE_MASK (mode);
1001 /* Even if the value at zero is undefined, we have to come
1002 up with some replacement. Seems good enough. */
1003 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1004 val = GET_MODE_BITSIZE (mode);
1007 val = exact_log2 (arg0 & -arg0);
1011 arg0 &= GET_MODE_MASK (mode);
1014 val++, arg0 &= arg0 - 1;
1018 arg0 &= GET_MODE_MASK (mode);
1021 val++, arg0 &= arg0 - 1;
1030 /* When zero-extending a CONST_INT, we need to know its
1032 gcc_assert (op_mode != VOIDmode);
1033 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1035 /* If we were really extending the mode,
1036 we would have to distinguish between zero-extension
1037 and sign-extension. */
1038 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1041 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1042 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1048 if (op_mode == VOIDmode)
1050 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1052 /* If we were really extending the mode,
1053 we would have to distinguish between zero-extension
1054 and sign-extension. */
1055 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1058 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1061 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1063 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1064 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1072 case FLOAT_TRUNCATE:
1081 return gen_int_mode (val, mode);
1084 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1085 for a DImode operation on a CONST_INT. */
1086 else if (GET_MODE (op) == VOIDmode
1087 && width <= HOST_BITS_PER_WIDE_INT * 2
1088 && (GET_CODE (op) == CONST_DOUBLE
1089 || GET_CODE (op) == CONST_INT))
1091 unsigned HOST_WIDE_INT l1, lv;
1092 HOST_WIDE_INT h1, hv;
1094 if (GET_CODE (op) == CONST_DOUBLE)
1095 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1097 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1107 neg_double (l1, h1, &lv, &hv);
1112 neg_double (l1, h1, &lv, &hv);
1124 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1127 lv = exact_log2 (l1 & -l1) + 1;
1133 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1134 - HOST_BITS_PER_WIDE_INT;
1136 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1137 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1138 lv = GET_MODE_BITSIZE (mode);
1144 lv = exact_log2 (l1 & -l1);
1146 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1147 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1148 lv = GET_MODE_BITSIZE (mode);
1171 /* This is just a change-of-mode, so do nothing. */
1176 gcc_assert (op_mode != VOIDmode);
1178 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1182 lv = l1 & GET_MODE_MASK (op_mode);
1186 if (op_mode == VOIDmode
1187 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1191 lv = l1 & GET_MODE_MASK (op_mode);
1192 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1193 && (lv & ((HOST_WIDE_INT) 1
1194 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1195 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1197 hv = HWI_SIGN_EXTEND (lv);
1208 return immed_double_const (lv, hv, mode);
1211 else if (GET_CODE (op) == CONST_DOUBLE
1212 && SCALAR_FLOAT_MODE_P (mode))
1214 REAL_VALUE_TYPE d, t;
1215 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1220 if (HONOR_SNANS (mode) && real_isnan (&d))
1222 real_sqrt (&t, mode, &d);
1226 d = REAL_VALUE_ABS (d);
1229 d = REAL_VALUE_NEGATE (d);
1231 case FLOAT_TRUNCATE:
1232 d = real_value_truncate (mode, d);
1235 /* All this does is change the mode. */
1238 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1245 real_to_target (tmp, &d, GET_MODE (op));
1246 for (i = 0; i < 4; i++)
1248 real_from_target (&d, tmp, mode);
1254 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1257 else if (GET_CODE (op) == CONST_DOUBLE
1258 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1259 && GET_MODE_CLASS (mode) == MODE_INT
1260 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1262 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1263 operators are intentionally left unspecified (to ease implementation
1264 by target backends), for consistency, this routine implements the
1265 same semantics for constant folding as used by the middle-end. */
1267 /* This was formerly used only for non-IEEE float.
1268 eggert@twinsun.com says it is safe for IEEE also. */
1269 HOST_WIDE_INT xh, xl, th, tl;
1270 REAL_VALUE_TYPE x, t;
1271 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1275 if (REAL_VALUE_ISNAN (x))
1278 /* Test against the signed upper bound. */
1279 if (width > HOST_BITS_PER_WIDE_INT)
1281 th = ((unsigned HOST_WIDE_INT) 1
1282 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1288 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1290 real_from_integer (&t, VOIDmode, tl, th, 0);
1291 if (REAL_VALUES_LESS (t, x))
1298 /* Test against the signed lower bound. */
1299 if (width > HOST_BITS_PER_WIDE_INT)
1301 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1307 tl = (HOST_WIDE_INT) -1 << (width - 1);
1309 real_from_integer (&t, VOIDmode, tl, th, 0);
1310 if (REAL_VALUES_LESS (x, t))
1316 REAL_VALUE_TO_INT (&xl, &xh, x);
1320 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1323 /* Test against the unsigned upper bound. */
1324 if (width == 2*HOST_BITS_PER_WIDE_INT)
1329 else if (width >= HOST_BITS_PER_WIDE_INT)
1331 th = ((unsigned HOST_WIDE_INT) 1
1332 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1338 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1340 real_from_integer (&t, VOIDmode, tl, th, 1);
1341 if (REAL_VALUES_LESS (t, x))
1348 REAL_VALUE_TO_INT (&xl, &xh, x);
1354 return immed_double_const (xl, xh, mode);
1360 /* Subroutine of simplify_binary_operation to simplify a commutative,
1361 associative binary operation CODE with result mode MODE, operating
1362 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1363 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1364 canonicalization is possible. */
1367 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1372 /* Linearize the operator to the left. */
1373 if (GET_CODE (op1) == code)
1375 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1376 if (GET_CODE (op0) == code)
1378 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1379 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1382 /* "a op (b op c)" becomes "(b op c) op a". */
1383 if (! swap_commutative_operands_p (op1, op0))
1384 return simplify_gen_binary (code, mode, op1, op0);
1391 if (GET_CODE (op0) == code)
1393 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1394 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1396 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1397 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1400 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1401 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1402 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1403 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1405 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1407 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1408 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1409 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1410 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1412 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1419 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1420 and OP1. Return 0 if no simplification is possible.
1422 Don't use this for relational operations such as EQ or LT.
1423 Use simplify_relational_operation instead. */
1425 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1428 rtx trueop0, trueop1;
1431 /* Relational operations don't work here. We must know the mode
1432 of the operands in order to do the comparison correctly.
1433 Assuming a full word can give incorrect results.
1434 Consider comparing 128 with -128 in QImode. */
1435 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1436 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1438 /* Make sure the constant is second. */
1439 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1440 && swap_commutative_operands_p (op0, op1))
1442 tem = op0, op0 = op1, op1 = tem;
1445 trueop0 = avoid_constant_pool_reference (op0);
1446 trueop1 = avoid_constant_pool_reference (op1);
1448 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1451 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1455 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1456 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1458 rtx tem, reversed, opleft, opright;
1460 unsigned int width = GET_MODE_BITSIZE (mode);
1462 /* Even if we can't compute a constant result,
1463 there are some cases worth simplifying. */
1468 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1469 when x is NaN, infinite, or finite and nonzero. They aren't
1470 when x is -0 and the rounding mode is not towards -infinity,
1471 since (-0) + 0 is then 0. */
1472 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1475 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1476 transformations are safe even for IEEE. */
1477 if (GET_CODE (op0) == NEG)
1478 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1479 else if (GET_CODE (op1) == NEG)
1480 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1482 /* (~a) + 1 -> -a */
1483 if (INTEGRAL_MODE_P (mode)
1484 && GET_CODE (op0) == NOT
1485 && trueop1 == const1_rtx)
1486 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1488 /* Handle both-operands-constant cases. We can only add
1489 CONST_INTs to constants since the sum of relocatable symbols
1490 can't be handled by most assemblers. Don't add CONST_INT
1491 to CONST_INT since overflow won't be computed properly if wider
1492 than HOST_BITS_PER_WIDE_INT. */
1494 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1495 && GET_CODE (op1) == CONST_INT)
1496 return plus_constant (op0, INTVAL (op1));
1497 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1498 && GET_CODE (op0) == CONST_INT)
1499 return plus_constant (op1, INTVAL (op0));
1501 /* See if this is something like X * C - X or vice versa or
1502 if the multiplication is written as a shift. If so, we can
1503 distribute and make a new multiply, shift, or maybe just
1504 have X (if C is 2 in the example above). But don't make
1505 something more expensive than we had before. */
1507 if (SCALAR_INT_MODE_P (mode))
1509 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1510 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1511 rtx lhs = op0, rhs = op1;
1513 if (GET_CODE (lhs) == NEG)
1517 lhs = XEXP (lhs, 0);
1519 else if (GET_CODE (lhs) == MULT
1520 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1522 coeff0l = INTVAL (XEXP (lhs, 1));
1523 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1524 lhs = XEXP (lhs, 0);
1526 else if (GET_CODE (lhs) == ASHIFT
1527 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1528 && INTVAL (XEXP (lhs, 1)) >= 0
1529 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1531 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1533 lhs = XEXP (lhs, 0);
1536 if (GET_CODE (rhs) == NEG)
1540 rhs = XEXP (rhs, 0);
1542 else if (GET_CODE (rhs) == MULT
1543 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1545 coeff1l = INTVAL (XEXP (rhs, 1));
1546 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1547 rhs = XEXP (rhs, 0);
1549 else if (GET_CODE (rhs) == ASHIFT
1550 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1551 && INTVAL (XEXP (rhs, 1)) >= 0
1552 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1554 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1556 rhs = XEXP (rhs, 0);
1559 if (rtx_equal_p (lhs, rhs))
1561 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1563 unsigned HOST_WIDE_INT l;
1566 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1567 coeff = immed_double_const (l, h, mode);
1569 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1570 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1575 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1576 if ((GET_CODE (op1) == CONST_INT
1577 || GET_CODE (op1) == CONST_DOUBLE)
1578 && GET_CODE (op0) == XOR
1579 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1580 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1581 && mode_signbit_p (mode, op1))
1582 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1583 simplify_gen_binary (XOR, mode, op1,
1586 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1587 if (GET_CODE (op0) == MULT
1588 && GET_CODE (XEXP (op0, 0)) == NEG)
1592 in1 = XEXP (XEXP (op0, 0), 0);
1593 in2 = XEXP (op0, 1);
1594 return simplify_gen_binary (MINUS, mode, op1,
1595 simplify_gen_binary (MULT, mode,
1599 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1600 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1602 if (COMPARISON_P (op0)
1603 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1604 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1605 && (reversed = reversed_comparison (op0, mode)))
1607 simplify_gen_unary (NEG, mode, reversed, mode);
1609 /* If one of the operands is a PLUS or a MINUS, see if we can
1610 simplify this by the associative law.
1611 Don't use the associative law for floating point.
1612 The inaccuracy makes it nonassociative,
1613 and subtle programs can break if operations are associated. */
1615 if (INTEGRAL_MODE_P (mode)
1616 && (plus_minus_operand_p (op0)
1617 || plus_minus_operand_p (op1))
1618 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1621 /* Reassociate floating point addition only when the user
1622 specifies unsafe math optimizations. */
1623 if (FLOAT_MODE_P (mode)
1624 && flag_unsafe_math_optimizations)
1626 tem = simplify_associative_operation (code, mode, op0, op1);
1634 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1635 using cc0, in which case we want to leave it as a COMPARE
1636 so we can distinguish it from a register-register-copy.
1638 In IEEE floating point, x-0 is not the same as x. */
1640 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1641 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1642 && trueop1 == CONST0_RTX (mode))
1646 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1647 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1648 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1649 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1651 rtx xop00 = XEXP (op0, 0);
1652 rtx xop10 = XEXP (op1, 0);
1655 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1657 if (REG_P (xop00) && REG_P (xop10)
1658 && GET_MODE (xop00) == GET_MODE (xop10)
1659 && REGNO (xop00) == REGNO (xop10)
1660 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1661 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1668 /* We can't assume x-x is 0 even with non-IEEE floating point,
1669 but since it is zero except in very strange circumstances, we
1670 will treat it as zero with -funsafe-math-optimizations. */
1671 if (rtx_equal_p (trueop0, trueop1)
1672 && ! side_effects_p (op0)
1673 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1674 return CONST0_RTX (mode);
1676 /* Change subtraction from zero into negation. (0 - x) is the
1677 same as -x when x is NaN, infinite, or finite and nonzero.
1678 But if the mode has signed zeros, and does not round towards
1679 -infinity, then 0 - 0 is 0, not -0. */
1680 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1681 return simplify_gen_unary (NEG, mode, op1, mode);
1683 /* (-1 - a) is ~a. */
1684 if (trueop0 == constm1_rtx)
1685 return simplify_gen_unary (NOT, mode, op1, mode);
1687 /* Subtracting 0 has no effect unless the mode has signed zeros
1688 and supports rounding towards -infinity. In such a case,
1690 if (!(HONOR_SIGNED_ZEROS (mode)
1691 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1692 && trueop1 == CONST0_RTX (mode))
1695 /* See if this is something like X * C - X or vice versa or
1696 if the multiplication is written as a shift. If so, we can
1697 distribute and make a new multiply, shift, or maybe just
1698 have X (if C is 2 in the example above). But don't make
1699 something more expensive than we had before. */
1701 if (SCALAR_INT_MODE_P (mode))
1703 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1704 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1705 rtx lhs = op0, rhs = op1;
1707 if (GET_CODE (lhs) == NEG)
1711 lhs = XEXP (lhs, 0);
1713 else if (GET_CODE (lhs) == MULT
1714 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1716 coeff0l = INTVAL (XEXP (lhs, 1));
1717 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1718 lhs = XEXP (lhs, 0);
1720 else if (GET_CODE (lhs) == ASHIFT
1721 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1722 && INTVAL (XEXP (lhs, 1)) >= 0
1723 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1725 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1727 lhs = XEXP (lhs, 0);
1730 if (GET_CODE (rhs) == NEG)
1734 rhs = XEXP (rhs, 0);
1736 else if (GET_CODE (rhs) == MULT
1737 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1739 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1740 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1741 rhs = XEXP (rhs, 0);
1743 else if (GET_CODE (rhs) == ASHIFT
1744 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1745 && INTVAL (XEXP (rhs, 1)) >= 0
1746 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1748 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1750 rhs = XEXP (rhs, 0);
1753 if (rtx_equal_p (lhs, rhs))
1755 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1757 unsigned HOST_WIDE_INT l;
1760 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1761 coeff = immed_double_const (l, h, mode);
1763 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1764 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1769 /* (a - (-b)) -> (a + b). True even for IEEE. */
1770 if (GET_CODE (op1) == NEG)
1771 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1773 /* (-x - c) may be simplified as (-c - x). */
1774 if (GET_CODE (op0) == NEG
1775 && (GET_CODE (op1) == CONST_INT
1776 || GET_CODE (op1) == CONST_DOUBLE))
1778 tem = simplify_unary_operation (NEG, mode, op1, mode);
1780 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1783 /* Don't let a relocatable value get a negative coeff. */
1784 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1785 return simplify_gen_binary (PLUS, mode,
1787 neg_const_int (mode, op1));
1789 /* (x - (x & y)) -> (x & ~y) */
1790 if (GET_CODE (op1) == AND)
1792 if (rtx_equal_p (op0, XEXP (op1, 0)))
1794 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1795 GET_MODE (XEXP (op1, 1)));
1796 return simplify_gen_binary (AND, mode, op0, tem);
1798 if (rtx_equal_p (op0, XEXP (op1, 1)))
1800 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1801 GET_MODE (XEXP (op1, 0)));
1802 return simplify_gen_binary (AND, mode, op0, tem);
1806 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1807 by reversing the comparison code if valid. */
1808 if (STORE_FLAG_VALUE == 1
1809 && trueop0 == const1_rtx
1810 && COMPARISON_P (op1)
1811 && (reversed = reversed_comparison (op1, mode)))
1814 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1815 if (GET_CODE (op1) == MULT
1816 && GET_CODE (XEXP (op1, 0)) == NEG)
1820 in1 = XEXP (XEXP (op1, 0), 0);
1821 in2 = XEXP (op1, 1);
1822 return simplify_gen_binary (PLUS, mode,
1823 simplify_gen_binary (MULT, mode,
1828 /* Canonicalize (minus (neg A) (mult B C)) to
1829 (minus (mult (neg B) C) A). */
1830 if (GET_CODE (op1) == MULT
1831 && GET_CODE (op0) == NEG)
1835 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1836 in2 = XEXP (op1, 1);
1837 return simplify_gen_binary (MINUS, mode,
1838 simplify_gen_binary (MULT, mode,
1843 /* If one of the operands is a PLUS or a MINUS, see if we can
1844 simplify this by the associative law. This will, for example,
1845 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1846 Don't use the associative law for floating point.
1847 The inaccuracy makes it nonassociative,
1848 and subtle programs can break if operations are associated. */
1850 if (INTEGRAL_MODE_P (mode)
1851 && (plus_minus_operand_p (op0)
1852 || plus_minus_operand_p (op1))
1853 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1858 if (trueop1 == constm1_rtx)
1859 return simplify_gen_unary (NEG, mode, op0, mode);
1861 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1862 x is NaN, since x * 0 is then also NaN. Nor is it valid
1863 when the mode has signed zeros, since multiplying a negative
1864 number by 0 will give -0, not 0. */
1865 if (!HONOR_NANS (mode)
1866 && !HONOR_SIGNED_ZEROS (mode)
1867 && trueop1 == CONST0_RTX (mode)
1868 && ! side_effects_p (op0))
1871 /* In IEEE floating point, x*1 is not equivalent to x for
1873 if (!HONOR_SNANS (mode)
1874 && trueop1 == CONST1_RTX (mode))
1877 /* Convert multiply by constant power of two into shift unless
1878 we are still generating RTL. This test is a kludge. */
1879 if (GET_CODE (trueop1) == CONST_INT
1880 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1881 /* If the mode is larger than the host word size, and the
1882 uppermost bit is set, then this isn't a power of two due
1883 to implicit sign extension. */
1884 && (width <= HOST_BITS_PER_WIDE_INT
1885 || val != HOST_BITS_PER_WIDE_INT - 1))
1886 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1888 /* Likewise for multipliers wider than a word. */
1889 else if (GET_CODE (trueop1) == CONST_DOUBLE
1890 && (GET_MODE (trueop1) == VOIDmode
1891 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1892 && GET_MODE (op0) == mode
1893 && CONST_DOUBLE_LOW (trueop1) == 0
1894 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1895 return simplify_gen_binary (ASHIFT, mode, op0,
1896 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1898 /* x*2 is x+x and x*(-1) is -x */
1899 if (GET_CODE (trueop1) == CONST_DOUBLE
1900 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1901 && GET_MODE (op0) == mode)
1904 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1906 if (REAL_VALUES_EQUAL (d, dconst2))
1907 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1909 if (REAL_VALUES_EQUAL (d, dconstm1))
1910 return simplify_gen_unary (NEG, mode, op0, mode);
1913 /* Reassociate multiplication, but for floating point MULTs
1914 only when the user specifies unsafe math optimizations. */
1915 if (! FLOAT_MODE_P (mode)
1916 || flag_unsafe_math_optimizations)
1918 tem = simplify_associative_operation (code, mode, op0, op1);
1925 if (trueop1 == const0_rtx)
1927 if (GET_CODE (trueop1) == CONST_INT
1928 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1929 == GET_MODE_MASK (mode)))
1931 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1933 /* A | (~A) -> -1 */
1934 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1935 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1936 && ! side_effects_p (op0)
1937 && SCALAR_INT_MODE_P (mode))
1940 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1941 if (GET_CODE (op1) == CONST_INT
1942 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1943 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1946 /* Convert (A & B) | A to A. */
1947 if (GET_CODE (op0) == AND
1948 && (rtx_equal_p (XEXP (op0, 0), op1)
1949 || rtx_equal_p (XEXP (op0, 1), op1))
1950 && ! side_effects_p (XEXP (op0, 0))
1951 && ! side_effects_p (XEXP (op0, 1)))
1954 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1955 mode size to (rotate A CX). */
1957 if (GET_CODE (op1) == ASHIFT
1958 || GET_CODE (op1) == SUBREG)
1969 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
1970 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
1971 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
1972 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1973 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
1974 == GET_MODE_BITSIZE (mode)))
1975 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
1977 /* Same, but for ashift that has been "simplified" to a wider mode
1978 by simplify_shift_const. */
1980 if (GET_CODE (opleft) == SUBREG
1981 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
1982 && GET_CODE (opright) == LSHIFTRT
1983 && GET_CODE (XEXP (opright, 0)) == SUBREG
1984 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
1985 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
1986 && (GET_MODE_SIZE (GET_MODE (opleft))
1987 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
1988 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
1989 SUBREG_REG (XEXP (opright, 0)))
1990 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
1991 && GET_CODE (XEXP (opright, 1)) == CONST_INT
1992 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
1993 == GET_MODE_BITSIZE (mode)))
1994 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
1995 XEXP (SUBREG_REG (opleft), 1));
1997 /* If we have (ior (and (X C1) C2)), simplify this by making
1998 C1 as small as possible if C1 actually changes. */
1999 if (GET_CODE (op1) == CONST_INT
2000 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2001 || INTVAL (op1) > 0)
2002 && GET_CODE (op0) == AND
2003 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2004 && GET_CODE (op1) == CONST_INT
2005 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2006 return simplify_gen_binary (IOR, mode,
2008 (AND, mode, XEXP (op0, 0),
2009 GEN_INT (INTVAL (XEXP (op0, 1))
2013 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2014 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2015 the PLUS does not affect any of the bits in OP1: then we can do
2016 the IOR as a PLUS and we can associate. This is valid if OP1
2017 can be safely shifted left C bits. */
2018 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2019 && GET_CODE (XEXP (op0, 0)) == PLUS
2020 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2021 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2022 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2024 int count = INTVAL (XEXP (op0, 1));
2025 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2027 if (mask >> count == INTVAL (trueop1)
2028 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2029 return simplify_gen_binary (ASHIFTRT, mode,
2030 plus_constant (XEXP (op0, 0), mask),
2034 tem = simplify_associative_operation (code, mode, op0, op1);
2040 if (trueop1 == const0_rtx)
2042 if (GET_CODE (trueop1) == CONST_INT
2043 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2044 == GET_MODE_MASK (mode)))
2045 return simplify_gen_unary (NOT, mode, op0, mode);
2046 if (rtx_equal_p (trueop0, trueop1)
2047 && ! side_effects_p (op0)
2048 && GET_MODE_CLASS (mode) != MODE_CC)
2049 return CONST0_RTX (mode);
2051 /* Canonicalize XOR of the most significant bit to PLUS. */
2052 if ((GET_CODE (op1) == CONST_INT
2053 || GET_CODE (op1) == CONST_DOUBLE)
2054 && mode_signbit_p (mode, op1))
2055 return simplify_gen_binary (PLUS, mode, op0, op1);
2056 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2057 if ((GET_CODE (op1) == CONST_INT
2058 || GET_CODE (op1) == CONST_DOUBLE)
2059 && GET_CODE (op0) == PLUS
2060 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2061 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2062 && mode_signbit_p (mode, XEXP (op0, 1)))
2063 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2064 simplify_gen_binary (XOR, mode, op1,
2067 /* If we are XORing two things that have no bits in common,
2068 convert them into an IOR. This helps to detect rotation encoded
2069 using those methods and possibly other simplifications. */
2071 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2072 && (nonzero_bits (op0, mode)
2073 & nonzero_bits (op1, mode)) == 0)
2074 return (simplify_gen_binary (IOR, mode, op0, op1));
2076 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2077 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2080 int num_negated = 0;
2082 if (GET_CODE (op0) == NOT)
2083 num_negated++, op0 = XEXP (op0, 0);
2084 if (GET_CODE (op1) == NOT)
2085 num_negated++, op1 = XEXP (op1, 0);
2087 if (num_negated == 2)
2088 return simplify_gen_binary (XOR, mode, op0, op1);
2089 else if (num_negated == 1)
2090 return simplify_gen_unary (NOT, mode,
2091 simplify_gen_binary (XOR, mode, op0, op1),
2095 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2096 correspond to a machine insn or result in further simplifications
2097 if B is a constant. */
2099 if (GET_CODE (op0) == AND
2100 && rtx_equal_p (XEXP (op0, 1), op1)
2101 && ! side_effects_p (op1))
2102 return simplify_gen_binary (AND, mode,
2103 simplify_gen_unary (NOT, mode,
2104 XEXP (op0, 0), mode),
2107 else if (GET_CODE (op0) == AND
2108 && rtx_equal_p (XEXP (op0, 0), op1)
2109 && ! side_effects_p (op1))
2110 return simplify_gen_binary (AND, mode,
2111 simplify_gen_unary (NOT, mode,
2112 XEXP (op0, 1), mode),
2115 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2116 comparison if STORE_FLAG_VALUE is 1. */
2117 if (STORE_FLAG_VALUE == 1
2118 && trueop1 == const1_rtx
2119 && COMPARISON_P (op0)
2120 && (reversed = reversed_comparison (op0, mode)))
2123 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2124 is (lt foo (const_int 0)), so we can perform the above
2125 simplification if STORE_FLAG_VALUE is 1. */
2127 if (STORE_FLAG_VALUE == 1
2128 && trueop1 == const1_rtx
2129 && GET_CODE (op0) == LSHIFTRT
2130 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2131 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2132 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2134 /* (xor (comparison foo bar) (const_int sign-bit))
2135 when STORE_FLAG_VALUE is the sign bit. */
2136 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2137 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2138 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2139 && trueop1 == const_true_rtx
2140 && COMPARISON_P (op0)
2141 && (reversed = reversed_comparison (op0, mode)))
2146 tem = simplify_associative_operation (code, mode, op0, op1);
2152 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2154 /* If we are turning off bits already known off in OP0, we need
2156 if (GET_CODE (trueop1) == CONST_INT
2157 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2158 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2160 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2161 && GET_MODE_CLASS (mode) != MODE_CC)
2164 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2165 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2166 && ! side_effects_p (op0)
2167 && GET_MODE_CLASS (mode) != MODE_CC)
2168 return CONST0_RTX (mode);
2170 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2171 there are no nonzero bits of C outside of X's mode. */
2172 if ((GET_CODE (op0) == SIGN_EXTEND
2173 || GET_CODE (op0) == ZERO_EXTEND)
2174 && GET_CODE (trueop1) == CONST_INT
2175 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2176 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2177 & INTVAL (trueop1)) == 0)
2179 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2180 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2181 gen_int_mode (INTVAL (trueop1),
2183 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2186 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2187 insn (and may simplify more). */
2188 if (GET_CODE (op0) == XOR
2189 && rtx_equal_p (XEXP (op0, 0), op1)
2190 && ! side_effects_p (op1))
2191 return simplify_gen_binary (AND, mode,
2192 simplify_gen_unary (NOT, mode,
2193 XEXP (op0, 1), mode),
2196 if (GET_CODE (op0) == XOR
2197 && rtx_equal_p (XEXP (op0, 1), op1)
2198 && ! side_effects_p (op1))
2199 return simplify_gen_binary (AND, mode,
2200 simplify_gen_unary (NOT, mode,
2201 XEXP (op0, 0), mode),
2204 /* Similarly for (~(A ^ B)) & A. */
2205 if (GET_CODE (op0) == NOT
2206 && GET_CODE (XEXP (op0, 0)) == XOR
2207 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2208 && ! side_effects_p (op1))
2209 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2211 if (GET_CODE (op0) == NOT
2212 && GET_CODE (XEXP (op0, 0)) == XOR
2213 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2214 && ! side_effects_p (op1))
2215 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2217 /* Convert (A | B) & A to A. */
2218 if (GET_CODE (op0) == IOR
2219 && (rtx_equal_p (XEXP (op0, 0), op1)
2220 || rtx_equal_p (XEXP (op0, 1), op1))
2221 && ! side_effects_p (XEXP (op0, 0))
2222 && ! side_effects_p (XEXP (op0, 1)))
2225 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2226 ((A & N) + B) & M -> (A + B) & M
2227 Similarly if (N & M) == 0,
2228 ((A | N) + B) & M -> (A + B) & M
2229 and for - instead of + and/or ^ instead of |. */
2230 if (GET_CODE (trueop1) == CONST_INT
2231 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2232 && ~INTVAL (trueop1)
2233 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2234 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2239 pmop[0] = XEXP (op0, 0);
2240 pmop[1] = XEXP (op0, 1);
2242 for (which = 0; which < 2; which++)
2245 switch (GET_CODE (tem))
2248 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2249 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2250 == INTVAL (trueop1))
2251 pmop[which] = XEXP (tem, 0);
2255 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2256 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2257 pmop[which] = XEXP (tem, 0);
2264 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2266 tem = simplify_gen_binary (GET_CODE (op0), mode,
2268 return simplify_gen_binary (code, mode, tem, op1);
2271 tem = simplify_associative_operation (code, mode, op0, op1);
2277 /* 0/x is 0 (or x&0 if x has side-effects). */
2278 if (trueop0 == CONST0_RTX (mode))
2280 if (side_effects_p (op1))
2281 return simplify_gen_binary (AND, mode, op1, trueop0);
2285 if (trueop1 == CONST1_RTX (mode))
2286 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2287 /* Convert divide by power of two into shift. */
2288 if (GET_CODE (trueop1) == CONST_INT
2289 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2290 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2294 /* Handle floating point and integers separately. */
2295 if (SCALAR_FLOAT_MODE_P (mode))
2297 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2298 safe for modes with NaNs, since 0.0 / 0.0 will then be
2299 NaN rather than 0.0. Nor is it safe for modes with signed
2300 zeros, since dividing 0 by a negative number gives -0.0 */
2301 if (trueop0 == CONST0_RTX (mode)
2302 && !HONOR_NANS (mode)
2303 && !HONOR_SIGNED_ZEROS (mode)
2304 && ! side_effects_p (op1))
2307 if (trueop1 == CONST1_RTX (mode)
2308 && !HONOR_SNANS (mode))
2311 if (GET_CODE (trueop1) == CONST_DOUBLE
2312 && trueop1 != CONST0_RTX (mode))
2315 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2318 if (REAL_VALUES_EQUAL (d, dconstm1)
2319 && !HONOR_SNANS (mode))
2320 return simplify_gen_unary (NEG, mode, op0, mode);
2322 /* Change FP division by a constant into multiplication.
2323 Only do this with -funsafe-math-optimizations. */
2324 if (flag_unsafe_math_optimizations
2325 && !REAL_VALUES_EQUAL (d, dconst0))
2327 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2328 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2329 return simplify_gen_binary (MULT, mode, op0, tem);
2335 /* 0/x is 0 (or x&0 if x has side-effects). */
2336 if (trueop0 == CONST0_RTX (mode))
2338 if (side_effects_p (op1))
2339 return simplify_gen_binary (AND, mode, op1, trueop0);
2343 if (trueop1 == CONST1_RTX (mode))
2344 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2346 if (trueop1 == constm1_rtx)
2348 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2349 return simplify_gen_unary (NEG, mode, x, mode);
2355 /* 0%x is 0 (or x&0 if x has side-effects). */
2356 if (trueop0 == CONST0_RTX (mode))
2358 if (side_effects_p (op1))
2359 return simplify_gen_binary (AND, mode, op1, trueop0);
2362 /* x%1 is 0 (of x&0 if x has side-effects). */
2363 if (trueop1 == CONST1_RTX (mode))
2365 if (side_effects_p (op0))
2366 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2367 return CONST0_RTX (mode);
2369 /* Implement modulus by power of two as AND. */
2370 if (GET_CODE (trueop1) == CONST_INT
2371 && exact_log2 (INTVAL (trueop1)) > 0)
2372 return simplify_gen_binary (AND, mode, op0,
2373 GEN_INT (INTVAL (op1) - 1));
2377 /* 0%x is 0 (or x&0 if x has side-effects). */
2378 if (trueop0 == CONST0_RTX (mode))
2380 if (side_effects_p (op1))
2381 return simplify_gen_binary (AND, mode, op1, trueop0);
2384 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2385 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2387 if (side_effects_p (op0))
2388 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2389 return CONST0_RTX (mode);
2396 /* Rotating ~0 always results in ~0. */
2397 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2398 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2399 && ! side_effects_p (op1))
2402 /* Fall through.... */
2406 if (trueop1 == CONST0_RTX (mode))
2408 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2413 if (width <= HOST_BITS_PER_WIDE_INT
2414 && GET_CODE (trueop1) == CONST_INT
2415 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2416 && ! side_effects_p (op0))
2418 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2420 tem = simplify_associative_operation (code, mode, op0, op1);
2426 if (width <= HOST_BITS_PER_WIDE_INT
2427 && GET_CODE (trueop1) == CONST_INT
2428 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2429 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2430 && ! side_effects_p (op0))
2432 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2434 tem = simplify_associative_operation (code, mode, op0, op1);
2440 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2442 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2444 tem = simplify_associative_operation (code, mode, op0, op1);
2450 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2452 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2454 tem = simplify_associative_operation (code, mode, op0, op1);
2463 /* ??? There are simplifications that can be done. */
2467 if (!VECTOR_MODE_P (mode))
2469 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2470 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2471 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2472 gcc_assert (XVECLEN (trueop1, 0) == 1);
2473 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2475 if (GET_CODE (trueop0) == CONST_VECTOR)
2476 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2481 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2482 gcc_assert (GET_MODE_INNER (mode)
2483 == GET_MODE_INNER (GET_MODE (trueop0)));
2484 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2486 if (GET_CODE (trueop0) == CONST_VECTOR)
2488 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2489 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2490 rtvec v = rtvec_alloc (n_elts);
2493 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2494 for (i = 0; i < n_elts; i++)
2496 rtx x = XVECEXP (trueop1, 0, i);
2498 gcc_assert (GET_CODE (x) == CONST_INT);
2499 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2503 return gen_rtx_CONST_VECTOR (mode, v);
2507 if (XVECLEN (trueop1, 0) == 1
2508 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2509 && GET_CODE (trueop0) == VEC_CONCAT)
2512 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2514 /* Try to find the element in the VEC_CONCAT. */
2515 while (GET_MODE (vec) != mode
2516 && GET_CODE (vec) == VEC_CONCAT)
2518 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2519 if (offset < vec_size)
2520 vec = XEXP (vec, 0);
2524 vec = XEXP (vec, 1);
2526 vec = avoid_constant_pool_reference (vec);
2529 if (GET_MODE (vec) == mode)
2536 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2537 ? GET_MODE (trueop0)
2538 : GET_MODE_INNER (mode));
2539 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2540 ? GET_MODE (trueop1)
2541 : GET_MODE_INNER (mode));
2543 gcc_assert (VECTOR_MODE_P (mode));
2544 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2545 == GET_MODE_SIZE (mode));
2547 if (VECTOR_MODE_P (op0_mode))
2548 gcc_assert (GET_MODE_INNER (mode)
2549 == GET_MODE_INNER (op0_mode));
2551 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2553 if (VECTOR_MODE_P (op1_mode))
2554 gcc_assert (GET_MODE_INNER (mode)
2555 == GET_MODE_INNER (op1_mode));
2557 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2559 if ((GET_CODE (trueop0) == CONST_VECTOR
2560 || GET_CODE (trueop0) == CONST_INT
2561 || GET_CODE (trueop0) == CONST_DOUBLE)
2562 && (GET_CODE (trueop1) == CONST_VECTOR
2563 || GET_CODE (trueop1) == CONST_INT
2564 || GET_CODE (trueop1) == CONST_DOUBLE))
2566 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2567 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2568 rtvec v = rtvec_alloc (n_elts);
2570 unsigned in_n_elts = 1;
2572 if (VECTOR_MODE_P (op0_mode))
2573 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2574 for (i = 0; i < n_elts; i++)
2578 if (!VECTOR_MODE_P (op0_mode))
2579 RTVEC_ELT (v, i) = trueop0;
2581 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2585 if (!VECTOR_MODE_P (op1_mode))
2586 RTVEC_ELT (v, i) = trueop1;
2588 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2593 return gen_rtx_CONST_VECTOR (mode, v);
2606 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2609 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2611 unsigned int width = GET_MODE_BITSIZE (mode);
2613 if (VECTOR_MODE_P (mode)
2614 && code != VEC_CONCAT
2615 && GET_CODE (op0) == CONST_VECTOR
2616 && GET_CODE (op1) == CONST_VECTOR)
2618 unsigned n_elts = GET_MODE_NUNITS (mode);
2619 enum machine_mode op0mode = GET_MODE (op0);
2620 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2621 enum machine_mode op1mode = GET_MODE (op1);
2622 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2623 rtvec v = rtvec_alloc (n_elts);
2626 gcc_assert (op0_n_elts == n_elts);
2627 gcc_assert (op1_n_elts == n_elts);
2628 for (i = 0; i < n_elts; i++)
2630 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2631 CONST_VECTOR_ELT (op0, i),
2632 CONST_VECTOR_ELT (op1, i));
2635 RTVEC_ELT (v, i) = x;
2638 return gen_rtx_CONST_VECTOR (mode, v);
2641 if (VECTOR_MODE_P (mode)
2642 && code == VEC_CONCAT
2643 && CONSTANT_P (op0) && CONSTANT_P (op1))
2645 unsigned n_elts = GET_MODE_NUNITS (mode);
2646 rtvec v = rtvec_alloc (n_elts);
2648 gcc_assert (n_elts >= 2);
2651 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2652 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2654 RTVEC_ELT (v, 0) = op0;
2655 RTVEC_ELT (v, 1) = op1;
2659 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2660 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2663 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2664 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2665 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2667 for (i = 0; i < op0_n_elts; ++i)
2668 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2669 for (i = 0; i < op1_n_elts; ++i)
2670 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2673 return gen_rtx_CONST_VECTOR (mode, v);
2676 if (SCALAR_FLOAT_MODE_P (mode)
2677 && GET_CODE (op0) == CONST_DOUBLE
2678 && GET_CODE (op1) == CONST_DOUBLE
2679 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2690 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2692 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2694 for (i = 0; i < 4; i++)
2711 real_from_target (&r, tmp0, mode);
2712 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2716 REAL_VALUE_TYPE f0, f1, value, result;
2719 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2720 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2721 real_convert (&f0, mode, &f0);
2722 real_convert (&f1, mode, &f1);
2724 if (HONOR_SNANS (mode)
2725 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2729 && REAL_VALUES_EQUAL (f1, dconst0)
2730 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2733 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2734 && flag_trapping_math
2735 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2737 int s0 = REAL_VALUE_NEGATIVE (f0);
2738 int s1 = REAL_VALUE_NEGATIVE (f1);
2743 /* Inf + -Inf = NaN plus exception. */
2748 /* Inf - Inf = NaN plus exception. */
2753 /* Inf / Inf = NaN plus exception. */
2760 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2761 && flag_trapping_math
2762 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2763 || (REAL_VALUE_ISINF (f1)
2764 && REAL_VALUES_EQUAL (f0, dconst0))))
2765 /* Inf * 0 = NaN plus exception. */
2768 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2770 real_convert (&result, mode, &value);
2772 /* Don't constant fold this floating point operation if
2773 the result has overflowed and flag_trapping_math. */
2775 if (flag_trapping_math
2776 && MODE_HAS_INFINITIES (mode)
2777 && REAL_VALUE_ISINF (result)
2778 && !REAL_VALUE_ISINF (f0)
2779 && !REAL_VALUE_ISINF (f1))
2780 /* Overflow plus exception. */
2783 /* Don't constant fold this floating point operation if the
2784 result may dependent upon the run-time rounding mode and
2785 flag_rounding_math is set, or if GCC's software emulation
2786 is unable to accurately represent the result. */
2788 if ((flag_rounding_math
2789 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2790 && !flag_unsafe_math_optimizations))
2791 && (inexact || !real_identical (&result, &value)))
2794 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2798 /* We can fold some multi-word operations. */
2799 if (GET_MODE_CLASS (mode) == MODE_INT
2800 && width == HOST_BITS_PER_WIDE_INT * 2
2801 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2802 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2804 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2805 HOST_WIDE_INT h1, h2, hv, ht;
2807 if (GET_CODE (op0) == CONST_DOUBLE)
2808 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2810 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2812 if (GET_CODE (op1) == CONST_DOUBLE)
2813 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2815 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2820 /* A - B == A + (-B). */
2821 neg_double (l2, h2, &lv, &hv);
2824 /* Fall through.... */
2827 add_double (l1, h1, l2, h2, &lv, &hv);
2831 mul_double (l1, h1, l2, h2, &lv, &hv);
2835 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2836 &lv, &hv, <, &ht))
2841 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2842 <, &ht, &lv, &hv))
2847 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2848 &lv, &hv, <, &ht))
2853 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2854 <, &ht, &lv, &hv))
2859 lv = l1 & l2, hv = h1 & h2;
2863 lv = l1 | l2, hv = h1 | h2;
2867 lv = l1 ^ l2, hv = h1 ^ h2;
2873 && ((unsigned HOST_WIDE_INT) l1
2874 < (unsigned HOST_WIDE_INT) l2)))
2883 && ((unsigned HOST_WIDE_INT) l1
2884 > (unsigned HOST_WIDE_INT) l2)))
2891 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2893 && ((unsigned HOST_WIDE_INT) l1
2894 < (unsigned HOST_WIDE_INT) l2)))
2901 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2903 && ((unsigned HOST_WIDE_INT) l1
2904 > (unsigned HOST_WIDE_INT) l2)))
2910 case LSHIFTRT: case ASHIFTRT:
2912 case ROTATE: case ROTATERT:
2913 if (SHIFT_COUNT_TRUNCATED)
2914 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2916 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2919 if (code == LSHIFTRT || code == ASHIFTRT)
2920 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2922 else if (code == ASHIFT)
2923 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2924 else if (code == ROTATE)
2925 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2926 else /* code == ROTATERT */
2927 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2934 return immed_double_const (lv, hv, mode);
2937 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
2938 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
2940 /* Get the integer argument values in two forms:
2941 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2943 arg0 = INTVAL (op0);
2944 arg1 = INTVAL (op1);
2946 if (width < HOST_BITS_PER_WIDE_INT)
2948 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2949 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2952 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2953 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2956 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2957 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2965 /* Compute the value of the arithmetic. */
2970 val = arg0s + arg1s;
2974 val = arg0s - arg1s;
2978 val = arg0s * arg1s;
2983 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2986 val = arg0s / arg1s;
2991 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2994 val = arg0s % arg1s;
2999 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3002 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3007 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3010 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3028 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3029 the value is in range. We can't return any old value for
3030 out-of-range arguments because either the middle-end (via
3031 shift_truncation_mask) or the back-end might be relying on
3032 target-specific knowledge. Nor can we rely on
3033 shift_truncation_mask, since the shift might not be part of an
3034 ashlM3, lshrM3 or ashrM3 instruction. */
3035 if (SHIFT_COUNT_TRUNCATED)
3036 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3037 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3040 val = (code == ASHIFT
3041 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3042 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3044 /* Sign-extend the result for arithmetic right shifts. */
3045 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3046 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3054 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3055 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3063 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3064 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3068 /* Do nothing here. */
3072 val = arg0s <= arg1s ? arg0s : arg1s;
3076 val = ((unsigned HOST_WIDE_INT) arg0
3077 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3081 val = arg0s > arg1s ? arg0s : arg1s;
3085 val = ((unsigned HOST_WIDE_INT) arg0
3086 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3093 /* ??? There are simplifications that can be done. */
3100 return gen_int_mode (val, mode);
3108 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3111 Rather than test for specific case, we do this by a brute-force method
3112 and do all possible simplifications until no more changes occur. Then
3113 we rebuild the operation. */
3115 struct simplify_plus_minus_op_data
3123 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3125 const struct simplify_plus_minus_op_data *d1 = p1;
3126 const struct simplify_plus_minus_op_data *d2 = p2;
3129 result = (commutative_operand_precedence (d2->op)
3130 - commutative_operand_precedence (d1->op));
3133 return d1->ix - d2->ix;
3137 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3140 struct simplify_plus_minus_op_data ops[8];
3142 int n_ops = 2, input_ops = 2;
3143 int first, changed, canonicalized = 0;
3146 memset (ops, 0, sizeof ops);
3148 /* Set up the two operands and then expand them until nothing has been
3149 changed. If we run out of room in our array, give up; this should
3150 almost never happen. */
3155 ops[1].neg = (code == MINUS);
3161 for (i = 0; i < n_ops; i++)
3163 rtx this_op = ops[i].op;
3164 int this_neg = ops[i].neg;
3165 enum rtx_code this_code = GET_CODE (this_op);
3174 ops[n_ops].op = XEXP (this_op, 1);
3175 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3178 ops[i].op = XEXP (this_op, 0);
3181 canonicalized |= this_neg;
3185 ops[i].op = XEXP (this_op, 0);
3186 ops[i].neg = ! this_neg;
3193 && GET_CODE (XEXP (this_op, 0)) == PLUS
3194 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3195 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3197 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3198 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3199 ops[n_ops].neg = this_neg;
3207 /* ~a -> (-a - 1) */
3210 ops[n_ops].op = constm1_rtx;
3211 ops[n_ops++].neg = this_neg;
3212 ops[i].op = XEXP (this_op, 0);
3213 ops[i].neg = !this_neg;
3222 ops[i].op = neg_const_int (mode, this_op);
3236 gcc_assert (n_ops >= 2);
3239 int n_constants = 0;
3241 for (i = 0; i < n_ops; i++)
3242 if (GET_CODE (ops[i].op) == CONST_INT)
3245 if (n_constants <= 1)
3249 /* If we only have two operands, we can avoid the loops. */
3252 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3255 /* Get the two operands. Be careful with the order, especially for
3256 the cases where code == MINUS. */
3257 if (ops[0].neg && ops[1].neg)
3259 lhs = gen_rtx_NEG (mode, ops[0].op);
3262 else if (ops[0].neg)
3273 return simplify_const_binary_operation (code, mode, lhs, rhs);
3276 /* Now simplify each pair of operands until nothing changes. The first
3277 time through just simplify constants against each other. */
3284 for (i = 0; i < n_ops - 1; i++)
3285 for (j = i + 1; j < n_ops; j++)
3287 rtx lhs = ops[i].op, rhs = ops[j].op;
3288 int lneg = ops[i].neg, rneg = ops[j].neg;
3290 if (lhs != 0 && rhs != 0
3291 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
3293 enum rtx_code ncode = PLUS;
3299 tem = lhs, lhs = rhs, rhs = tem;
3301 else if (swap_commutative_operands_p (lhs, rhs))
3302 tem = lhs, lhs = rhs, rhs = tem;
3304 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3306 /* Reject "simplifications" that just wrap the two
3307 arguments in a CONST. Failure to do so can result
3308 in infinite recursion with simplify_binary_operation
3309 when it calls us to simplify CONST operations. */
3311 && ! (GET_CODE (tem) == CONST
3312 && GET_CODE (XEXP (tem, 0)) == ncode
3313 && XEXP (XEXP (tem, 0), 0) == lhs
3314 && XEXP (XEXP (tem, 0), 1) == rhs)
3315 /* Don't allow -x + -1 -> ~x simplifications in the
3316 first pass. This allows us the chance to combine
3317 the -1 with other constants. */
3319 && GET_CODE (tem) == NOT
3320 && XEXP (tem, 0) == rhs))
3323 if (GET_CODE (tem) == NEG)
3324 tem = XEXP (tem, 0), lneg = !lneg;
3325 if (GET_CODE (tem) == CONST_INT && lneg)
3326 tem = neg_const_int (mode, tem), lneg = 0;
3330 ops[j].op = NULL_RTX;
3340 /* Pack all the operands to the lower-numbered entries. */
3341 for (i = 0, j = 0; j < n_ops; j++)
3345 /* Stabilize sort. */
3351 /* Sort the operations based on swap_commutative_operands_p. */
3352 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
3354 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3356 && GET_CODE (ops[1].op) == CONST_INT
3357 && CONSTANT_P (ops[0].op)
3359 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3361 /* We suppressed creation of trivial CONST expressions in the
3362 combination loop to avoid recursion. Create one manually now.
3363 The combination loop should have ensured that there is exactly
3364 one CONST_INT, and the sort will have ensured that it is last
3365 in the array and that any other constant will be next-to-last. */
3368 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3369 && CONSTANT_P (ops[n_ops - 2].op))
3371 rtx value = ops[n_ops - 1].op;
3372 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3373 value = neg_const_int (mode, value);
3374 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3378 /* Put a non-negated operand first, if possible. */
3380 for (i = 0; i < n_ops && ops[i].neg; i++)
3383 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3392 /* Now make the result by performing the requested operations. */
3394 for (i = 1; i < n_ops; i++)
3395 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3396 mode, result, ops[i].op);
3401 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3403 plus_minus_operand_p (rtx x)
3405 return GET_CODE (x) == PLUS
3406 || GET_CODE (x) == MINUS
3407 || (GET_CODE (x) == CONST
3408 && GET_CODE (XEXP (x, 0)) == PLUS
3409 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3410 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3413 /* Like simplify_binary_operation except used for relational operators.
3414 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3415 not also be VOIDmode.
3417 CMP_MODE specifies in which mode the comparison is done in, so it is
3418 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3419 the operands or, if both are VOIDmode, the operands are compared in
3420 "infinite precision". */
3422 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3423 enum machine_mode cmp_mode, rtx op0, rtx op1)
3425 rtx tem, trueop0, trueop1;
3427 if (cmp_mode == VOIDmode)
3428 cmp_mode = GET_MODE (op0);
3429 if (cmp_mode == VOIDmode)
3430 cmp_mode = GET_MODE (op1);
3432 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3435 if (SCALAR_FLOAT_MODE_P (mode))
3437 if (tem == const0_rtx)
3438 return CONST0_RTX (mode);
3439 #ifdef FLOAT_STORE_FLAG_VALUE
3441 REAL_VALUE_TYPE val;
3442 val = FLOAT_STORE_FLAG_VALUE (mode);
3443 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3449 if (VECTOR_MODE_P (mode))
3451 if (tem == const0_rtx)
3452 return CONST0_RTX (mode);
3453 #ifdef VECTOR_STORE_FLAG_VALUE
3458 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3459 if (val == NULL_RTX)
3461 if (val == const1_rtx)
3462 return CONST1_RTX (mode);
3464 units = GET_MODE_NUNITS (mode);
3465 v = rtvec_alloc (units);
3466 for (i = 0; i < units; i++)
3467 RTVEC_ELT (v, i) = val;
3468 return gen_rtx_raw_CONST_VECTOR (mode, v);
3478 /* For the following tests, ensure const0_rtx is op1. */
3479 if (swap_commutative_operands_p (op0, op1)
3480 || (op0 == const0_rtx && op1 != const0_rtx))
3481 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3483 /* If op0 is a compare, extract the comparison arguments from it. */
3484 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3485 return simplify_relational_operation (code, mode, VOIDmode,
3486 XEXP (op0, 0), XEXP (op0, 1));
3488 if (mode == VOIDmode
3489 || GET_MODE_CLASS (cmp_mode) == MODE_CC
3493 trueop0 = avoid_constant_pool_reference (op0);
3494 trueop1 = avoid_constant_pool_reference (op1);
3495 return simplify_relational_operation_1 (code, mode, cmp_mode,
3499 /* This part of simplify_relational_operation is only used when CMP_MODE
3500 is not in class MODE_CC (i.e. it is a real comparison).
3502 MODE is the mode of the result, while CMP_MODE specifies in which
3503 mode the comparison is done in, so it is the mode of the operands. */
3506 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3507 enum machine_mode cmp_mode, rtx op0, rtx op1)
3509 enum rtx_code op0code = GET_CODE (op0);
3511 if (GET_CODE (op1) == CONST_INT)
3513 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3515 /* If op0 is a comparison, extract the comparison arguments form it. */
3518 if (GET_MODE (op0) == mode)
3519 return simplify_rtx (op0);
3521 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3522 XEXP (op0, 0), XEXP (op0, 1));
3524 else if (code == EQ)
3526 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3527 if (new_code != UNKNOWN)
3528 return simplify_gen_relational (new_code, mode, VOIDmode,
3529 XEXP (op0, 0), XEXP (op0, 1));
3534 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3535 if ((code == EQ || code == NE)
3536 && (op0code == PLUS || op0code == MINUS)
3538 && CONSTANT_P (XEXP (op0, 1))
3539 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3541 rtx x = XEXP (op0, 0);
3542 rtx c = XEXP (op0, 1);
3544 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3546 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3549 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3550 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3552 && op1 == const0_rtx
3553 && GET_MODE_CLASS (mode) == MODE_INT
3554 && cmp_mode != VOIDmode
3555 /* ??? Work-around BImode bugs in the ia64 backend. */
3557 && cmp_mode != BImode
3558 && nonzero_bits (op0, cmp_mode) == 1
3559 && STORE_FLAG_VALUE == 1)
3560 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3561 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3562 : lowpart_subreg (mode, op0, cmp_mode);
3567 /* Check if the given comparison (done in the given MODE) is actually a
3568 tautology or a contradiction.
3569 If no simplification is possible, this function returns zero.
3570 Otherwise, it returns either const_true_rtx or const0_rtx. */
3573 simplify_const_relational_operation (enum rtx_code code,
3574 enum machine_mode mode,
3577 int equal, op0lt, op0ltu, op1lt, op1ltu;
3582 gcc_assert (mode != VOIDmode
3583 || (GET_MODE (op0) == VOIDmode
3584 && GET_MODE (op1) == VOIDmode));
3586 /* If op0 is a compare, extract the comparison arguments from it. */
3587 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3589 op1 = XEXP (op0, 1);
3590 op0 = XEXP (op0, 0);
3592 if (GET_MODE (op0) != VOIDmode)
3593 mode = GET_MODE (op0);
3594 else if (GET_MODE (op1) != VOIDmode)
3595 mode = GET_MODE (op1);
3600 /* We can't simplify MODE_CC values since we don't know what the
3601 actual comparison is. */
3602 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3605 /* Make sure the constant is second. */
3606 if (swap_commutative_operands_p (op0, op1))
3608 tem = op0, op0 = op1, op1 = tem;
3609 code = swap_condition (code);
3612 trueop0 = avoid_constant_pool_reference (op0);
3613 trueop1 = avoid_constant_pool_reference (op1);
3615 /* For integer comparisons of A and B maybe we can simplify A - B and can
3616 then simplify a comparison of that with zero. If A and B are both either
3617 a register or a CONST_INT, this can't help; testing for these cases will
3618 prevent infinite recursion here and speed things up.
3620 If CODE is an unsigned comparison, then we can never do this optimization,
3621 because it gives an incorrect result if the subtraction wraps around zero.
3622 ANSI C defines unsigned operations such that they never overflow, and
3623 thus such cases can not be ignored; but we cannot do it even for
3624 signed comparisons for languages such as Java, so test flag_wrapv. */
3626 if (!flag_wrapv && INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3627 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3628 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3629 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3630 /* We cannot do this for == or != if tem is a nonzero address. */
3631 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
3632 && code != GTU && code != GEU && code != LTU && code != LEU)
3633 return simplify_const_relational_operation (signed_condition (code),
3634 mode, tem, const0_rtx);
3636 if (flag_unsafe_math_optimizations && code == ORDERED)
3637 return const_true_rtx;
3639 if (flag_unsafe_math_optimizations && code == UNORDERED)
3642 /* For modes without NaNs, if the two operands are equal, we know the
3643 result except if they have side-effects. */
3644 if (! HONOR_NANS (GET_MODE (trueop0))
3645 && rtx_equal_p (trueop0, trueop1)
3646 && ! side_effects_p (trueop0))
3647 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3649 /* If the operands are floating-point constants, see if we can fold
3651 else if (GET_CODE (trueop0) == CONST_DOUBLE
3652 && GET_CODE (trueop1) == CONST_DOUBLE
3653 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3655 REAL_VALUE_TYPE d0, d1;
3657 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3658 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3660 /* Comparisons are unordered iff at least one of the values is NaN. */
3661 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3671 return const_true_rtx;
3684 equal = REAL_VALUES_EQUAL (d0, d1);
3685 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3686 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3689 /* Otherwise, see if the operands are both integers. */
3690 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3691 && (GET_CODE (trueop0) == CONST_DOUBLE
3692 || GET_CODE (trueop0) == CONST_INT)
3693 && (GET_CODE (trueop1) == CONST_DOUBLE
3694 || GET_CODE (trueop1) == CONST_INT))
3696 int width = GET_MODE_BITSIZE (mode);
3697 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3698 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3700 /* Get the two words comprising each integer constant. */
3701 if (GET_CODE (trueop0) == CONST_DOUBLE)
3703 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3704 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3708 l0u = l0s = INTVAL (trueop0);
3709 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3712 if (GET_CODE (trueop1) == CONST_DOUBLE)
3714 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3715 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3719 l1u = l1s = INTVAL (trueop1);
3720 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3723 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3724 we have to sign or zero-extend the values. */
3725 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3727 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3728 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3730 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3731 l0s |= ((HOST_WIDE_INT) (-1) << width);
3733 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3734 l1s |= ((HOST_WIDE_INT) (-1) << width);
3736 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3737 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3739 equal = (h0u == h1u && l0u == l1u);
3740 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3741 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3742 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3743 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3746 /* Otherwise, there are some code-specific tests we can make. */
3749 /* Optimize comparisons with upper and lower bounds. */
3750 if (SCALAR_INT_MODE_P (mode)
3751 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3764 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3771 /* x >= min is always true. */
3772 if (rtx_equal_p (trueop1, mmin))
3773 tem = const_true_rtx;
3779 /* x <= max is always true. */
3780 if (rtx_equal_p (trueop1, mmax))
3781 tem = const_true_rtx;
3786 /* x > max is always false. */
3787 if (rtx_equal_p (trueop1, mmax))
3793 /* x < min is always false. */
3794 if (rtx_equal_p (trueop1, mmin))
3801 if (tem == const0_rtx
3802 || tem == const_true_rtx)
3809 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3814 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3815 return const_true_rtx;
3819 /* Optimize abs(x) < 0.0. */
3820 if (trueop1 == CONST0_RTX (mode)
3821 && !HONOR_SNANS (mode)
3822 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3824 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3826 if (GET_CODE (tem) == ABS)
3832 /* Optimize abs(x) >= 0.0. */
3833 if (trueop1 == CONST0_RTX (mode)
3834 && !HONOR_NANS (mode)
3835 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3837 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3839 if (GET_CODE (tem) == ABS)
3840 return const_true_rtx;
3845 /* Optimize ! (abs(x) < 0.0). */
3846 if (trueop1 == CONST0_RTX (mode))
3848 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3850 if (GET_CODE (tem) == ABS)
3851 return const_true_rtx;
3862 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3868 return equal ? const_true_rtx : const0_rtx;
3871 return ! equal ? const_true_rtx : const0_rtx;
3874 return op0lt ? const_true_rtx : const0_rtx;
3877 return op1lt ? const_true_rtx : const0_rtx;
3879 return op0ltu ? const_true_rtx : const0_rtx;
3881 return op1ltu ? const_true_rtx : const0_rtx;
3884 return equal || op0lt ? const_true_rtx : const0_rtx;
3887 return equal || op1lt ? const_true_rtx : const0_rtx;
3889 return equal || op0ltu ? const_true_rtx : const0_rtx;
3891 return equal || op1ltu ? const_true_rtx : const0_rtx;
3893 return const_true_rtx;
3901 /* Simplify CODE, an operation with result mode MODE and three operands,
3902 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
3903 a constant. Return 0 if no simplifications is possible. */
3906 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
3907 enum machine_mode op0_mode, rtx op0, rtx op1,
3910 unsigned int width = GET_MODE_BITSIZE (mode);
3912 /* VOIDmode means "infinite" precision. */
3914 width = HOST_BITS_PER_WIDE_INT;
3920 if (GET_CODE (op0) == CONST_INT
3921 && GET_CODE (op1) == CONST_INT
3922 && GET_CODE (op2) == CONST_INT
3923 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
3924 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
3926 /* Extracting a bit-field from a constant */
3927 HOST_WIDE_INT val = INTVAL (op0);
3929 if (BITS_BIG_ENDIAN)
3930 val >>= (GET_MODE_BITSIZE (op0_mode)
3931 - INTVAL (op2) - INTVAL (op1));
3933 val >>= INTVAL (op2);
3935 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
3937 /* First zero-extend. */
3938 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
3939 /* If desired, propagate sign bit. */
3940 if (code == SIGN_EXTRACT
3941 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
3942 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
3945 /* Clear the bits that don't belong in our mode,
3946 unless they and our sign bit are all one.
3947 So we get either a reasonable negative value or a reasonable
3948 unsigned value for this mode. */
3949 if (width < HOST_BITS_PER_WIDE_INT
3950 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
3951 != ((HOST_WIDE_INT) (-1) << (width - 1))))
3952 val &= ((HOST_WIDE_INT) 1 << width) - 1;
3954 return gen_int_mode (val, mode);
3959 if (GET_CODE (op0) == CONST_INT)
3960 return op0 != const0_rtx ? op1 : op2;
3962 /* Convert c ? a : a into "a". */
3963 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
3966 /* Convert a != b ? a : b into "a". */
3967 if (GET_CODE (op0) == NE
3968 && ! side_effects_p (op0)
3969 && ! HONOR_NANS (mode)
3970 && ! HONOR_SIGNED_ZEROS (mode)
3971 && ((rtx_equal_p (XEXP (op0, 0), op1)
3972 && rtx_equal_p (XEXP (op0, 1), op2))
3973 || (rtx_equal_p (XEXP (op0, 0), op2)
3974 && rtx_equal_p (XEXP (op0, 1), op1))))
3977 /* Convert a == b ? a : b into "b". */
3978 if (GET_CODE (op0) == EQ
3979 && ! side_effects_p (op0)
3980 && ! HONOR_NANS (mode)
3981 && ! HONOR_SIGNED_ZEROS (mode)
3982 && ((rtx_equal_p (XEXP (op0, 0), op1)
3983 && rtx_equal_p (XEXP (op0, 1), op2))
3984 || (rtx_equal_p (XEXP (op0, 0), op2)
3985 && rtx_equal_p (XEXP (op0, 1), op1))))
3988 if (COMPARISON_P (op0) && ! side_effects_p (op0))
3990 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
3991 ? GET_MODE (XEXP (op0, 1))
3992 : GET_MODE (XEXP (op0, 0)));
3995 /* Look for happy constants in op1 and op2. */
3996 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
3998 HOST_WIDE_INT t = INTVAL (op1);
3999 HOST_WIDE_INT f = INTVAL (op2);
4001 if (t == STORE_FLAG_VALUE && f == 0)
4002 code = GET_CODE (op0);
4003 else if (t == 0 && f == STORE_FLAG_VALUE)
4006 tmp = reversed_comparison_code (op0, NULL_RTX);
4014 return simplify_gen_relational (code, mode, cmp_mode,
4015 XEXP (op0, 0), XEXP (op0, 1));
4018 if (cmp_mode == VOIDmode)
4019 cmp_mode = op0_mode;
4020 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4021 cmp_mode, XEXP (op0, 0),
4024 /* See if any simplifications were possible. */
4027 if (GET_CODE (temp) == CONST_INT)
4028 return temp == const0_rtx ? op2 : op1;
4030 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4036 gcc_assert (GET_MODE (op0) == mode);
4037 gcc_assert (GET_MODE (op1) == mode);
4038 gcc_assert (VECTOR_MODE_P (mode));
4039 op2 = avoid_constant_pool_reference (op2);
4040 if (GET_CODE (op2) == CONST_INT)
4042 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4043 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4044 int mask = (1 << n_elts) - 1;
4046 if (!(INTVAL (op2) & mask))
4048 if ((INTVAL (op2) & mask) == mask)
4051 op0 = avoid_constant_pool_reference (op0);
4052 op1 = avoid_constant_pool_reference (op1);
4053 if (GET_CODE (op0) == CONST_VECTOR
4054 && GET_CODE (op1) == CONST_VECTOR)
4056 rtvec v = rtvec_alloc (n_elts);
4059 for (i = 0; i < n_elts; i++)
4060 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4061 ? CONST_VECTOR_ELT (op0, i)
4062 : CONST_VECTOR_ELT (op1, i));
4063 return gen_rtx_CONST_VECTOR (mode, v);
4075 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4076 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4078 Works by unpacking OP into a collection of 8-bit values
4079 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4080 and then repacking them again for OUTERMODE. */
4083 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4084 enum machine_mode innermode, unsigned int byte)
4086 /* We support up to 512-bit values (for V8DFmode). */
4090 value_mask = (1 << value_bit) - 1
4092 unsigned char value[max_bitsize / value_bit];
4101 rtvec result_v = NULL;
4102 enum mode_class outer_class;
4103 enum machine_mode outer_submode;
4105 /* Some ports misuse CCmode. */
4106 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4109 /* We have no way to represent a complex constant at the rtl level. */
4110 if (COMPLEX_MODE_P (outermode))
4113 /* Unpack the value. */
4115 if (GET_CODE (op) == CONST_VECTOR)
4117 num_elem = CONST_VECTOR_NUNITS (op);
4118 elems = &CONST_VECTOR_ELT (op, 0);
4119 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4125 elem_bitsize = max_bitsize;
4127 /* If this asserts, it is too complicated; reducing value_bit may help. */
4128 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4129 /* I don't know how to handle endianness of sub-units. */
4130 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4132 for (elem = 0; elem < num_elem; elem++)
4135 rtx el = elems[elem];
4137 /* Vectors are kept in target memory order. (This is probably
4140 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4141 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4143 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4144 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4145 unsigned bytele = (subword_byte % UNITS_PER_WORD
4146 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4147 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4150 switch (GET_CODE (el))
4154 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4156 *vp++ = INTVAL (el) >> i;
4157 /* CONST_INTs are always logically sign-extended. */
4158 for (; i < elem_bitsize; i += value_bit)
4159 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4163 if (GET_MODE (el) == VOIDmode)
4165 /* If this triggers, someone should have generated a
4166 CONST_INT instead. */
4167 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4169 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4170 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4171 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4174 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4177 /* It shouldn't matter what's done here, so fill it with
4179 for (; i < elem_bitsize; i += value_bit)
4184 long tmp[max_bitsize / 32];
4185 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4187 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4188 gcc_assert (bitsize <= elem_bitsize);
4189 gcc_assert (bitsize % value_bit == 0);
4191 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4194 /* real_to_target produces its result in words affected by
4195 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4196 and use WORDS_BIG_ENDIAN instead; see the documentation
4197 of SUBREG in rtl.texi. */
4198 for (i = 0; i < bitsize; i += value_bit)
4201 if (WORDS_BIG_ENDIAN)
4202 ibase = bitsize - 1 - i;
4205 *vp++ = tmp[ibase / 32] >> i % 32;
4208 /* It shouldn't matter what's done here, so fill it with
4210 for (; i < elem_bitsize; i += value_bit)
4220 /* Now, pick the right byte to start with. */
4221 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4222 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4223 will already have offset 0. */
4224 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4226 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4228 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4229 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4230 byte = (subword_byte % UNITS_PER_WORD
4231 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4234 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4235 so if it's become negative it will instead be very large.) */
4236 gcc_assert (byte < GET_MODE_SIZE (innermode));
4238 /* Convert from bytes to chunks of size value_bit. */
4239 value_start = byte * (BITS_PER_UNIT / value_bit);
4241 /* Re-pack the value. */
4243 if (VECTOR_MODE_P (outermode))
4245 num_elem = GET_MODE_NUNITS (outermode);
4246 result_v = rtvec_alloc (num_elem);
4247 elems = &RTVEC_ELT (result_v, 0);
4248 outer_submode = GET_MODE_INNER (outermode);
4254 outer_submode = outermode;
4257 outer_class = GET_MODE_CLASS (outer_submode);
4258 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4260 gcc_assert (elem_bitsize % value_bit == 0);
4261 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4263 for (elem = 0; elem < num_elem; elem++)
4267 /* Vectors are stored in target memory order. (This is probably
4270 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4271 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4273 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4274 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4275 unsigned bytele = (subword_byte % UNITS_PER_WORD
4276 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4277 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4280 switch (outer_class)
4283 case MODE_PARTIAL_INT:
4285 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4288 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4290 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4291 for (; i < elem_bitsize; i += value_bit)
4292 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4293 << (i - HOST_BITS_PER_WIDE_INT));
4295 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4297 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4298 elems[elem] = gen_int_mode (lo, outer_submode);
4299 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4300 elems[elem] = immed_double_const (lo, hi, outer_submode);
4307 case MODE_DECIMAL_FLOAT:
4310 long tmp[max_bitsize / 32];
4312 /* real_from_target wants its input in words affected by
4313 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4314 and use WORDS_BIG_ENDIAN instead; see the documentation
4315 of SUBREG in rtl.texi. */
4316 for (i = 0; i < max_bitsize / 32; i++)
4318 for (i = 0; i < elem_bitsize; i += value_bit)
4321 if (WORDS_BIG_ENDIAN)
4322 ibase = elem_bitsize - 1 - i;
4325 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4328 real_from_target (&r, tmp, outer_submode);
4329 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4337 if (VECTOR_MODE_P (outermode))
4338 return gen_rtx_CONST_VECTOR (outermode, result_v);
4343 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4344 Return 0 if no simplifications are possible. */
4346 simplify_subreg (enum machine_mode outermode, rtx op,
4347 enum machine_mode innermode, unsigned int byte)
4349 /* Little bit of sanity checking. */
4350 gcc_assert (innermode != VOIDmode);
4351 gcc_assert (outermode != VOIDmode);
4352 gcc_assert (innermode != BLKmode);
4353 gcc_assert (outermode != BLKmode);
4355 gcc_assert (GET_MODE (op) == innermode
4356 || GET_MODE (op) == VOIDmode);
4358 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4359 gcc_assert (byte < GET_MODE_SIZE (innermode));
4361 if (outermode == innermode && !byte)
4364 if (GET_CODE (op) == CONST_INT
4365 || GET_CODE (op) == CONST_DOUBLE
4366 || GET_CODE (op) == CONST_VECTOR)
4367 return simplify_immed_subreg (outermode, op, innermode, byte);
4369 /* Changing mode twice with SUBREG => just change it once,
4370 or not at all if changing back op starting mode. */
4371 if (GET_CODE (op) == SUBREG)
4373 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4374 int final_offset = byte + SUBREG_BYTE (op);
4377 if (outermode == innermostmode
4378 && byte == 0 && SUBREG_BYTE (op) == 0)
4379 return SUBREG_REG (op);
4381 /* The SUBREG_BYTE represents offset, as if the value were stored
4382 in memory. Irritating exception is paradoxical subreg, where
4383 we define SUBREG_BYTE to be 0. On big endian machines, this
4384 value should be negative. For a moment, undo this exception. */
4385 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4387 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4388 if (WORDS_BIG_ENDIAN)
4389 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4390 if (BYTES_BIG_ENDIAN)
4391 final_offset += difference % UNITS_PER_WORD;
4393 if (SUBREG_BYTE (op) == 0
4394 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4396 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4397 if (WORDS_BIG_ENDIAN)
4398 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4399 if (BYTES_BIG_ENDIAN)
4400 final_offset += difference % UNITS_PER_WORD;
4403 /* See whether resulting subreg will be paradoxical. */
4404 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4406 /* In nonparadoxical subregs we can't handle negative offsets. */
4407 if (final_offset < 0)
4409 /* Bail out in case resulting subreg would be incorrect. */
4410 if (final_offset % GET_MODE_SIZE (outermode)
4411 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4417 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4419 /* In paradoxical subreg, see if we are still looking on lower part.
4420 If so, our SUBREG_BYTE will be 0. */
4421 if (WORDS_BIG_ENDIAN)
4422 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4423 if (BYTES_BIG_ENDIAN)
4424 offset += difference % UNITS_PER_WORD;
4425 if (offset == final_offset)
4431 /* Recurse for further possible simplifications. */
4432 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4436 if (validate_subreg (outermode, innermostmode,
4437 SUBREG_REG (op), final_offset))
4438 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4442 /* Merge implicit and explicit truncations. */
4444 if (GET_CODE (op) == TRUNCATE
4445 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4446 && subreg_lowpart_offset (outermode, innermode) == byte)
4447 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4448 GET_MODE (XEXP (op, 0)));
4450 /* SUBREG of a hard register => just change the register number
4451 and/or mode. If the hard register is not valid in that mode,
4452 suppress this simplification. If the hard register is the stack,
4453 frame, or argument pointer, leave this as a SUBREG. */
4456 && REGNO (op) < FIRST_PSEUDO_REGISTER
4457 #ifdef CANNOT_CHANGE_MODE_CLASS
4458 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4459 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4460 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4462 && ((reload_completed && !frame_pointer_needed)
4463 || (REGNO (op) != FRAME_POINTER_REGNUM
4464 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4465 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4468 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4469 && REGNO (op) != ARG_POINTER_REGNUM
4471 && REGNO (op) != STACK_POINTER_REGNUM
4472 && subreg_offset_representable_p (REGNO (op), innermode,
4475 unsigned int regno = REGNO (op);
4476 unsigned int final_regno
4477 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4479 /* ??? We do allow it if the current REG is not valid for
4480 its mode. This is a kludge to work around how float/complex
4481 arguments are passed on 32-bit SPARC and should be fixed. */
4482 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4483 || ! HARD_REGNO_MODE_OK (regno, innermode))
4485 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
4487 /* Propagate original regno. We don't have any way to specify
4488 the offset inside original regno, so do so only for lowpart.
4489 The information is used only by alias analysis that can not
4490 grog partial register anyway. */
4492 if (subreg_lowpart_offset (outermode, innermode) == byte)
4493 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4498 /* If we have a SUBREG of a register that we are replacing and we are
4499 replacing it with a MEM, make a new MEM and try replacing the
4500 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4501 or if we would be widening it. */
4504 && ! mode_dependent_address_p (XEXP (op, 0))
4505 /* Allow splitting of volatile memory references in case we don't
4506 have instruction to move the whole thing. */
4507 && (! MEM_VOLATILE_P (op)
4508 || ! have_insn_for (SET, innermode))
4509 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4510 return adjust_address_nv (op, outermode, byte);
4512 /* Handle complex values represented as CONCAT
4513 of real and imaginary part. */
4514 if (GET_CODE (op) == CONCAT)
4516 unsigned int inner_size, final_offset;
4519 inner_size = GET_MODE_UNIT_SIZE (innermode);
4520 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4521 final_offset = byte % inner_size;
4522 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4525 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4528 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4529 return gen_rtx_SUBREG (outermode, part, final_offset);
4533 /* Optimize SUBREG truncations of zero and sign extended values. */
4534 if ((GET_CODE (op) == ZERO_EXTEND
4535 || GET_CODE (op) == SIGN_EXTEND)
4536 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4538 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4540 /* If we're requesting the lowpart of a zero or sign extension,
4541 there are three possibilities. If the outermode is the same
4542 as the origmode, we can omit both the extension and the subreg.
4543 If the outermode is not larger than the origmode, we can apply
4544 the truncation without the extension. Finally, if the outermode
4545 is larger than the origmode, but both are integer modes, we
4546 can just extend to the appropriate mode. */
4549 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4550 if (outermode == origmode)
4551 return XEXP (op, 0);
4552 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4553 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4554 subreg_lowpart_offset (outermode,
4556 if (SCALAR_INT_MODE_P (outermode))
4557 return simplify_gen_unary (GET_CODE (op), outermode,
4558 XEXP (op, 0), origmode);
4561 /* A SUBREG resulting from a zero extension may fold to zero if
4562 it extracts higher bits that the ZERO_EXTEND's source bits. */
4563 if (GET_CODE (op) == ZERO_EXTEND
4564 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4565 return CONST0_RTX (outermode);
4568 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4569 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4570 the outer subreg is effectively a truncation to the original mode. */
4571 if ((GET_CODE (op) == LSHIFTRT
4572 || GET_CODE (op) == ASHIFTRT)
4573 && SCALAR_INT_MODE_P (outermode)
4574 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4575 to avoid the possibility that an outer LSHIFTRT shifts by more
4576 than the sign extension's sign_bit_copies and introduces zeros
4577 into the high bits of the result. */
4578 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4579 && GET_CODE (XEXP (op, 1)) == CONST_INT
4580 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4581 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4582 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4583 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4584 return simplify_gen_binary (ASHIFTRT, outermode,
4585 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4587 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4588 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4589 the outer subreg is effectively a truncation to the original mode. */
4590 if ((GET_CODE (op) == LSHIFTRT
4591 || GET_CODE (op) == ASHIFTRT)
4592 && SCALAR_INT_MODE_P (outermode)
4593 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4594 && GET_CODE (XEXP (op, 1)) == CONST_INT
4595 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4596 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4597 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4598 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4599 return simplify_gen_binary (LSHIFTRT, outermode,
4600 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4602 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4603 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4604 the outer subreg is effectively a truncation to the original mode. */
4605 if (GET_CODE (op) == ASHIFT
4606 && SCALAR_INT_MODE_P (outermode)
4607 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4608 && GET_CODE (XEXP (op, 1)) == CONST_INT
4609 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4610 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4611 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4612 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4613 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4614 return simplify_gen_binary (ASHIFT, outermode,
4615 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4620 /* Make a SUBREG operation or equivalent if it folds. */
4623 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4624 enum machine_mode innermode, unsigned int byte)
4628 newx = simplify_subreg (outermode, op, innermode, byte);
4632 if (GET_CODE (op) == SUBREG
4633 || GET_CODE (op) == CONCAT
4634 || GET_MODE (op) == VOIDmode)
4637 if (validate_subreg (outermode, innermode, op, byte))
4638 return gen_rtx_SUBREG (outermode, op, byte);
4643 /* Simplify X, an rtx expression.
4645 Return the simplified expression or NULL if no simplifications
4648 This is the preferred entry point into the simplification routines;
4649 however, we still allow passes to call the more specific routines.
4651 Right now GCC has three (yes, three) major bodies of RTL simplification
4652 code that need to be unified.
4654 1. fold_rtx in cse.c. This code uses various CSE specific
4655 information to aid in RTL simplification.
4657 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4658 it uses combine specific information to aid in RTL
4661 3. The routines in this file.
4664 Long term we want to only have one body of simplification code; to
4665 get to that state I recommend the following steps:
4667 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4668 which are not pass dependent state into these routines.
4670 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4671 use this routine whenever possible.
4673 3. Allow for pass dependent state to be provided to these
4674 routines and add simplifications based on the pass dependent
4675 state. Remove code from cse.c & combine.c that becomes
4678 It will take time, but ultimately the compiler will be easier to
4679 maintain and improve. It's totally silly that when we add a
4680 simplification that it needs to be added to 4 places (3 for RTL
4681 simplification and 1 for tree simplification. */
4684 simplify_rtx (rtx x)
4686 enum rtx_code code = GET_CODE (x);
4687 enum machine_mode mode = GET_MODE (x);
4689 switch (GET_RTX_CLASS (code))
4692 return simplify_unary_operation (code, mode,
4693 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4694 case RTX_COMM_ARITH:
4695 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4696 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4698 /* Fall through.... */
4701 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4704 case RTX_BITFIELD_OPS:
4705 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4706 XEXP (x, 0), XEXP (x, 1),
4710 case RTX_COMM_COMPARE:
4711 return simplify_relational_operation (code, mode,
4712 ((GET_MODE (XEXP (x, 0))
4714 ? GET_MODE (XEXP (x, 0))
4715 : GET_MODE (XEXP (x, 1))),
4721 return simplify_gen_subreg (mode, SUBREG_REG (x),
4722 GET_MODE (SUBREG_REG (x)),
4729 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4730 if (GET_CODE (XEXP (x, 0)) == HIGH
4731 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))