1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static bool plus_minus_operand_p (rtx);
54 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
163 /* Call target hook to avoid the effects of -fpic etc.... */
164 addr = targetm.delegitimize_address (addr);
166 /* Split the address into a base and integer offset. */
167 if (GET_CODE (addr) == CONST
168 && GET_CODE (XEXP (addr, 0)) == PLUS
169 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
172 addr = XEXP (XEXP (addr, 0), 0);
175 if (GET_CODE (addr) == LO_SUM)
176 addr = XEXP (addr, 1);
178 /* If this is a constant pool reference, we can turn it into its
179 constant and hope that simplifications happen. */
180 if (GET_CODE (addr) == SYMBOL_REF
181 && CONSTANT_POOL_ADDRESS_P (addr))
183 c = get_pool_constant (addr);
184 cmode = get_pool_mode (addr);
186 /* If we're accessing the constant in a different mode than it was
187 originally stored, attempt to fix that up via subreg simplifications.
188 If that fails we have no choice but to return the original memory. */
189 if (offset != 0 || cmode != GET_MODE (x))
191 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
192 if (tem && CONSTANT_P (tem))
202 /* Return true if X is a MEM referencing the constant pool. */
205 constant_pool_reference_p (rtx x)
207 return avoid_constant_pool_reference (x) != x;
210 /* Make a unary operation by first seeing if it folds and otherwise making
211 the specified operation. */
214 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
215 enum machine_mode op_mode)
219 /* If this simplifies, use it. */
220 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
223 return gen_rtx_fmt_e (code, mode, op);
226 /* Likewise for ternary operations. */
229 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
230 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
234 /* If this simplifies, use it. */
235 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
239 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
242 /* Likewise, for relational operations.
243 CMP_MODE specifies mode comparison is done in. */
246 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
247 enum machine_mode cmp_mode, rtx op0, rtx op1)
251 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
255 return gen_rtx_fmt_ee (code, mode, op0, op1);
258 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
259 resulting RTX. Return a new RTX which is as simplified as possible. */
262 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264 enum rtx_code code = GET_CODE (x);
265 enum machine_mode mode = GET_MODE (x);
266 enum machine_mode op_mode;
269 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
270 to build a new expression substituting recursively. If we can't do
271 anything, return our input. */
276 switch (GET_RTX_CLASS (code))
280 op_mode = GET_MODE (op0);
281 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
282 if (op0 == XEXP (x, 0))
284 return simplify_gen_unary (code, mode, op0, op_mode);
288 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
289 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
290 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return simplify_gen_binary (code, mode, op0, op1);
295 case RTX_COMM_COMPARE:
298 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
299 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
300 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
301 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return simplify_gen_relational (code, mode, op_mode, op0, op1);
306 case RTX_BITFIELD_OPS:
308 op_mode = GET_MODE (op0);
309 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
310 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
311 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
312 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 if (op_mode == VOIDmode)
315 op_mode = GET_MODE (op0);
316 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
319 /* The only case we try to handle is a SUBREG. */
322 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
323 if (op0 == SUBREG_REG (x))
325 op0 = simplify_gen_subreg (GET_MODE (x), op0,
326 GET_MODE (SUBREG_REG (x)),
328 return op0 ? op0 : x;
335 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
336 if (op0 == XEXP (x, 0))
338 return replace_equiv_address_nv (x, op0);
340 else if (code == LO_SUM)
342 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
343 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345 /* (lo_sum (high x) x) -> x */
346 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
349 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return gen_rtx_LO_SUM (mode, op0, op1);
353 else if (code == REG)
355 if (rtx_equal_p (x, old_rtx))
366 /* Try to simplify a unary operation CODE whose output mode is to be
367 MODE with input operand OP whose mode was originally OP_MODE.
368 Return zero if no simplification can be made. */
370 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
371 rtx op, enum machine_mode op_mode)
375 if (GET_CODE (op) == CONST)
378 trueop = avoid_constant_pool_reference (op);
380 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
384 return simplify_unary_operation_1 (code, mode, op);
387 /* Perform some simplifications we can do even if the operands
390 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392 enum rtx_code reversed;
398 /* (not (not X)) == X. */
399 if (GET_CODE (op) == NOT)
402 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
403 comparison is all ones. */
404 if (COMPARISON_P (op)
405 && (mode == BImode || STORE_FLAG_VALUE == -1)
406 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
407 return simplify_gen_relational (reversed, mode, VOIDmode,
408 XEXP (op, 0), XEXP (op, 1));
410 /* (not (plus X -1)) can become (neg X). */
411 if (GET_CODE (op) == PLUS
412 && XEXP (op, 1) == constm1_rtx)
413 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415 /* Similarly, (not (neg X)) is (plus X -1). */
416 if (GET_CODE (op) == NEG)
417 return plus_constant (XEXP (op, 0), -1);
419 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
420 if (GET_CODE (op) == XOR
421 && GET_CODE (XEXP (op, 1)) == CONST_INT
422 && (temp = simplify_unary_operation (NOT, mode,
423 XEXP (op, 1), mode)) != 0)
424 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
427 if (GET_CODE (op) == PLUS
428 && GET_CODE (XEXP (op, 1)) == CONST_INT
429 && mode_signbit_p (mode, XEXP (op, 1))
430 && (temp = simplify_unary_operation (NOT, mode,
431 XEXP (op, 1), mode)) != 0)
432 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
435 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
436 operands other than 1, but that is not valid. We could do a
437 similar simplification for (not (lshiftrt C X)) where C is
438 just the sign bit, but this doesn't seem common enough to
440 if (GET_CODE (op) == ASHIFT
441 && XEXP (op, 0) == const1_rtx)
443 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
444 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
447 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
448 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
449 so we can perform the above simplification. */
451 if (STORE_FLAG_VALUE == -1
452 && GET_CODE (op) == ASHIFTRT
453 && GET_CODE (XEXP (op, 1)) == CONST_INT
454 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
455 return simplify_gen_relational (GE, mode, VOIDmode,
456 XEXP (op, 0), const0_rtx);
459 if (GET_CODE (op) == SUBREG
460 && subreg_lowpart_p (op)
461 && (GET_MODE_SIZE (GET_MODE (op))
462 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
463 && GET_CODE (SUBREG_REG (op)) == ASHIFT
464 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
469 x = gen_rtx_ROTATE (inner_mode,
470 simplify_gen_unary (NOT, inner_mode, const1_rtx,
472 XEXP (SUBREG_REG (op), 1));
473 return rtl_hooks.gen_lowpart_no_emit (mode, x);
476 /* Apply De Morgan's laws to reduce number of patterns for machines
477 with negating logical insns (and-not, nand, etc.). If result has
478 only one NOT, put it first, since that is how the patterns are
481 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
484 enum machine_mode op_mode;
486 op_mode = GET_MODE (in1);
487 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489 op_mode = GET_MODE (in2);
490 if (op_mode == VOIDmode)
492 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
497 in2 = in1; in1 = tem;
500 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
506 /* (neg (neg X)) == X. */
507 if (GET_CODE (op) == NEG)
510 /* (neg (plus X 1)) can become (not X). */
511 if (GET_CODE (op) == PLUS
512 && XEXP (op, 1) == const1_rtx)
513 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515 /* Similarly, (neg (not X)) is (plus X 1). */
516 if (GET_CODE (op) == NOT)
517 return plus_constant (XEXP (op, 0), 1);
519 /* (neg (minus X Y)) can become (minus Y X). This transformation
520 isn't safe for modes with signed zeros, since if X and Y are
521 both +0, (minus Y X) is the same as (minus X Y). If the
522 rounding mode is towards +infinity (or -infinity) then the two
523 expressions will be rounded differently. */
524 if (GET_CODE (op) == MINUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
527 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529 if (GET_CODE (op) == PLUS
530 && !HONOR_SIGNED_ZEROS (mode)
531 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 /* (neg (plus A C)) is simplified to (minus -C A). */
534 if (GET_CODE (XEXP (op, 1)) == CONST_INT
535 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
542 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
543 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
544 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
547 /* (neg (mult A B)) becomes (mult (neg A) B).
548 This works even for floating-point values. */
549 if (GET_CODE (op) == MULT
550 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
553 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
556 /* NEG commutes with ASHIFT since it is multiplication. Only do
557 this if we can then eliminate the NEG (e.g., if the operand
559 if (GET_CODE (op) == ASHIFT)
561 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
563 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
566 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
567 C is equal to the width of MODE minus 1. */
568 if (GET_CODE (op) == ASHIFTRT
569 && GET_CODE (XEXP (op, 1)) == CONST_INT
570 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
571 return simplify_gen_binary (LSHIFTRT, mode,
572 XEXP (op, 0), XEXP (op, 1));
574 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
575 C is equal to the width of MODE minus 1. */
576 if (GET_CODE (op) == LSHIFTRT
577 && GET_CODE (XEXP (op, 1)) == CONST_INT
578 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
579 return simplify_gen_binary (ASHIFTRT, mode,
580 XEXP (op, 0), XEXP (op, 1));
582 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
583 if (GET_CODE (op) == XOR
584 && XEXP (op, 1) == const1_rtx
585 && nonzero_bits (XEXP (op, 0), mode) == 1)
586 return plus_constant (XEXP (op, 0), -1);
588 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
589 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
590 if (GET_CODE (op) == LT
591 && XEXP (op, 1) == const0_rtx)
593 enum machine_mode inner = GET_MODE (XEXP (op, 0));
594 int isize = GET_MODE_BITSIZE (inner);
595 if (STORE_FLAG_VALUE == 1)
597 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
598 GEN_INT (isize - 1));
601 if (GET_MODE_BITSIZE (mode) > isize)
602 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
603 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
605 else if (STORE_FLAG_VALUE == -1)
607 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
608 GEN_INT (isize - 1));
611 if (GET_MODE_BITSIZE (mode) > isize)
612 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
613 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
619 /* We can't handle truncation to a partial integer mode here
620 because we don't know the real bitsize of the partial
622 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
625 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
626 if ((GET_CODE (op) == SIGN_EXTEND
627 || GET_CODE (op) == ZERO_EXTEND)
628 && GET_MODE (XEXP (op, 0)) == mode)
631 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
632 (OP:SI foo:SI) if OP is NEG or ABS. */
633 if ((GET_CODE (op) == ABS
634 || GET_CODE (op) == NEG)
635 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
636 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
637 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
638 return simplify_gen_unary (GET_CODE (op), mode,
639 XEXP (XEXP (op, 0), 0), mode);
641 /* (truncate:A (subreg:B (truncate:C X) 0)) is
643 if (GET_CODE (op) == SUBREG
644 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
645 && subreg_lowpart_p (op))
646 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
647 GET_MODE (XEXP (SUBREG_REG (op), 0)));
649 /* If we know that the value is already truncated, we can
650 replace the TRUNCATE with a SUBREG. Note that this is also
651 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
652 modes we just have to apply a different definition for
653 truncation. But don't do this for an (LSHIFTRT (MULT ...))
654 since this will cause problems with the umulXi3_highpart
656 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
657 GET_MODE_BITSIZE (GET_MODE (op)))
658 ? (num_sign_bit_copies (op, GET_MODE (op))
659 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
660 - GET_MODE_BITSIZE (mode)))
661 : truncated_to_mode (mode, op))
662 && ! (GET_CODE (op) == LSHIFTRT
663 && GET_CODE (XEXP (op, 0)) == MULT))
664 return rtl_hooks.gen_lowpart_no_emit (mode, op);
666 /* A truncate of a comparison can be replaced with a subreg if
667 STORE_FLAG_VALUE permits. This is like the previous test,
668 but it works even if the comparison is done in a mode larger
669 than HOST_BITS_PER_WIDE_INT. */
670 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
672 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
673 return rtl_hooks.gen_lowpart_no_emit (mode, op);
677 if (DECIMAL_FLOAT_MODE_P (mode))
680 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
681 if (GET_CODE (op) == FLOAT_EXTEND
682 && GET_MODE (XEXP (op, 0)) == mode)
685 /* (float_truncate:SF (float_truncate:DF foo:XF))
686 = (float_truncate:SF foo:XF).
687 This may eliminate double rounding, so it is unsafe.
689 (float_truncate:SF (float_extend:XF foo:DF))
690 = (float_truncate:SF foo:DF).
692 (float_truncate:DF (float_extend:XF foo:SF))
693 = (float_extend:SF foo:DF). */
694 if ((GET_CODE (op) == FLOAT_TRUNCATE
695 && flag_unsafe_math_optimizations)
696 || GET_CODE (op) == FLOAT_EXTEND)
697 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
699 > GET_MODE_SIZE (mode)
700 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
704 /* (float_truncate (float x)) is (float x) */
705 if (GET_CODE (op) == FLOAT
706 && (flag_unsafe_math_optimizations
707 || ((unsigned)significand_size (GET_MODE (op))
708 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
709 - num_sign_bit_copies (XEXP (op, 0),
710 GET_MODE (XEXP (op, 0)))))))
711 return simplify_gen_unary (FLOAT, mode,
713 GET_MODE (XEXP (op, 0)));
715 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
716 (OP:SF foo:SF) if OP is NEG or ABS. */
717 if ((GET_CODE (op) == ABS
718 || GET_CODE (op) == NEG)
719 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
720 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
721 return simplify_gen_unary (GET_CODE (op), mode,
722 XEXP (XEXP (op, 0), 0), mode);
724 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
725 is (float_truncate:SF x). */
726 if (GET_CODE (op) == SUBREG
727 && subreg_lowpart_p (op)
728 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
729 return SUBREG_REG (op);
733 if (DECIMAL_FLOAT_MODE_P (mode))
736 /* (float_extend (float_extend x)) is (float_extend x)
738 (float_extend (float x)) is (float x) assuming that double
739 rounding can't happen.
741 if (GET_CODE (op) == FLOAT_EXTEND
742 || (GET_CODE (op) == FLOAT
743 && ((unsigned)significand_size (GET_MODE (op))
744 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
745 - num_sign_bit_copies (XEXP (op, 0),
746 GET_MODE (XEXP (op, 0)))))))
747 return simplify_gen_unary (GET_CODE (op), mode,
749 GET_MODE (XEXP (op, 0)));
754 /* (abs (neg <foo>)) -> (abs <foo>) */
755 if (GET_CODE (op) == NEG)
756 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
757 GET_MODE (XEXP (op, 0)));
759 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
761 if (GET_MODE (op) == VOIDmode)
764 /* If operand is something known to be positive, ignore the ABS. */
765 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
766 || ((GET_MODE_BITSIZE (GET_MODE (op))
767 <= HOST_BITS_PER_WIDE_INT)
768 && ((nonzero_bits (op, GET_MODE (op))
770 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
774 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
775 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
776 return gen_rtx_NEG (mode, op);
781 /* (ffs (*_extend <X>)) = (ffs <X>) */
782 if (GET_CODE (op) == SIGN_EXTEND
783 || GET_CODE (op) == ZERO_EXTEND)
784 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
785 GET_MODE (XEXP (op, 0)));
790 /* (pop* (zero_extend <X>)) = (pop* <X>) */
791 if (GET_CODE (op) == ZERO_EXTEND)
792 return simplify_gen_unary (code, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
797 /* (float (sign_extend <X>)) = (float <X>). */
798 if (GET_CODE (op) == SIGN_EXTEND)
799 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
804 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
805 becomes just the MINUS if its mode is MODE. This allows
806 folding switch statements on machines using casesi (such as
808 if (GET_CODE (op) == TRUNCATE
809 && GET_MODE (XEXP (op, 0)) == mode
810 && GET_CODE (XEXP (op, 0)) == MINUS
811 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
812 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
815 /* Check for a sign extension of a subreg of a promoted
816 variable, where the promotion is sign-extended, and the
817 target mode is the same as the variable's promotion. */
818 if (GET_CODE (op) == SUBREG
819 && SUBREG_PROMOTED_VAR_P (op)
820 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
821 && GET_MODE (XEXP (op, 0)) == mode)
824 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
825 if (! POINTERS_EXTEND_UNSIGNED
826 && mode == Pmode && GET_MODE (op) == ptr_mode
828 || (GET_CODE (op) == SUBREG
829 && REG_P (SUBREG_REG (op))
830 && REG_POINTER (SUBREG_REG (op))
831 && GET_MODE (SUBREG_REG (op)) == Pmode)))
832 return convert_memory_address (Pmode, op);
837 /* Check for a zero extension of a subreg of a promoted
838 variable, where the promotion is zero-extended, and the
839 target mode is the same as the variable's promotion. */
840 if (GET_CODE (op) == SUBREG
841 && SUBREG_PROMOTED_VAR_P (op)
842 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
843 && GET_MODE (XEXP (op, 0)) == mode)
846 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
847 if (POINTERS_EXTEND_UNSIGNED > 0
848 && mode == Pmode && GET_MODE (op) == ptr_mode
850 || (GET_CODE (op) == SUBREG
851 && REG_P (SUBREG_REG (op))
852 && REG_POINTER (SUBREG_REG (op))
853 && GET_MODE (SUBREG_REG (op)) == Pmode)))
854 return convert_memory_address (Pmode, op);
865 /* Try to compute the value of a unary operation CODE whose output mode is to
866 be MODE with input operand OP whose mode was originally OP_MODE.
867 Return zero if the value cannot be computed. */
869 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
870 rtx op, enum machine_mode op_mode)
872 unsigned int width = GET_MODE_BITSIZE (mode);
874 if (code == VEC_DUPLICATE)
876 gcc_assert (VECTOR_MODE_P (mode));
877 if (GET_MODE (op) != VOIDmode)
879 if (!VECTOR_MODE_P (GET_MODE (op)))
880 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
885 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
886 || GET_CODE (op) == CONST_VECTOR)
888 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
889 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
890 rtvec v = rtvec_alloc (n_elts);
893 if (GET_CODE (op) != CONST_VECTOR)
894 for (i = 0; i < n_elts; i++)
895 RTVEC_ELT (v, i) = op;
898 enum machine_mode inmode = GET_MODE (op);
899 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
900 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
902 gcc_assert (in_n_elts < n_elts);
903 gcc_assert ((n_elts % in_n_elts) == 0);
904 for (i = 0; i < n_elts; i++)
905 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
907 return gen_rtx_CONST_VECTOR (mode, v);
911 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
913 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
914 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
915 enum machine_mode opmode = GET_MODE (op);
916 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
917 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
918 rtvec v = rtvec_alloc (n_elts);
921 gcc_assert (op_n_elts == n_elts);
922 for (i = 0; i < n_elts; i++)
924 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
925 CONST_VECTOR_ELT (op, i),
926 GET_MODE_INNER (opmode));
929 RTVEC_ELT (v, i) = x;
931 return gen_rtx_CONST_VECTOR (mode, v);
934 /* The order of these tests is critical so that, for example, we don't
935 check the wrong mode (input vs. output) for a conversion operation,
936 such as FIX. At some point, this should be simplified. */
938 if (code == FLOAT && GET_MODE (op) == VOIDmode
939 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
941 HOST_WIDE_INT hv, lv;
944 if (GET_CODE (op) == CONST_INT)
945 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
947 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
949 REAL_VALUE_FROM_INT (d, lv, hv, mode);
950 d = real_value_truncate (mode, d);
951 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
953 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
954 && (GET_CODE (op) == CONST_DOUBLE
955 || GET_CODE (op) == CONST_INT))
957 HOST_WIDE_INT hv, lv;
960 if (GET_CODE (op) == CONST_INT)
961 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
963 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
965 if (op_mode == VOIDmode)
967 /* We don't know how to interpret negative-looking numbers in
968 this case, so don't try to fold those. */
972 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
975 hv = 0, lv &= GET_MODE_MASK (op_mode);
977 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
978 d = real_value_truncate (mode, d);
979 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
982 if (GET_CODE (op) == CONST_INT
983 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
985 HOST_WIDE_INT arg0 = INTVAL (op);
999 val = (arg0 >= 0 ? arg0 : - arg0);
1003 /* Don't use ffs here. Instead, get low order bit and then its
1004 number. If arg0 is zero, this will return 0, as desired. */
1005 arg0 &= GET_MODE_MASK (mode);
1006 val = exact_log2 (arg0 & (- arg0)) + 1;
1010 arg0 &= GET_MODE_MASK (mode);
1011 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1014 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1018 arg0 &= GET_MODE_MASK (mode);
1021 /* Even if the value at zero is undefined, we have to come
1022 up with some replacement. Seems good enough. */
1023 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1024 val = GET_MODE_BITSIZE (mode);
1027 val = exact_log2 (arg0 & -arg0);
1031 arg0 &= GET_MODE_MASK (mode);
1034 val++, arg0 &= arg0 - 1;
1038 arg0 &= GET_MODE_MASK (mode);
1041 val++, arg0 &= arg0 - 1;
1053 /* When zero-extending a CONST_INT, we need to know its
1055 gcc_assert (op_mode != VOIDmode);
1056 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1058 /* If we were really extending the mode,
1059 we would have to distinguish between zero-extension
1060 and sign-extension. */
1061 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1064 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1065 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1071 if (op_mode == VOIDmode)
1073 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1075 /* If we were really extending the mode,
1076 we would have to distinguish between zero-extension
1077 and sign-extension. */
1078 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1081 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1084 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1086 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1087 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1095 case FLOAT_TRUNCATE:
1105 return gen_int_mode (val, mode);
1108 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1109 for a DImode operation on a CONST_INT. */
1110 else if (GET_MODE (op) == VOIDmode
1111 && width <= HOST_BITS_PER_WIDE_INT * 2
1112 && (GET_CODE (op) == CONST_DOUBLE
1113 || GET_CODE (op) == CONST_INT))
1115 unsigned HOST_WIDE_INT l1, lv;
1116 HOST_WIDE_INT h1, hv;
1118 if (GET_CODE (op) == CONST_DOUBLE)
1119 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1121 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1131 neg_double (l1, h1, &lv, &hv);
1136 neg_double (l1, h1, &lv, &hv);
1148 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1151 lv = exact_log2 (l1 & -l1) + 1;
1157 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1158 - HOST_BITS_PER_WIDE_INT;
1160 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1161 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1162 lv = GET_MODE_BITSIZE (mode);
1168 lv = exact_log2 (l1 & -l1);
1170 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1171 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1172 lv = GET_MODE_BITSIZE (mode);
1195 /* This is just a change-of-mode, so do nothing. */
1200 gcc_assert (op_mode != VOIDmode);
1202 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1206 lv = l1 & GET_MODE_MASK (op_mode);
1210 if (op_mode == VOIDmode
1211 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1215 lv = l1 & GET_MODE_MASK (op_mode);
1216 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1217 && (lv & ((HOST_WIDE_INT) 1
1218 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1219 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1221 hv = HWI_SIGN_EXTEND (lv);
1232 return immed_double_const (lv, hv, mode);
1235 else if (GET_CODE (op) == CONST_DOUBLE
1236 && SCALAR_FLOAT_MODE_P (mode))
1238 REAL_VALUE_TYPE d, t;
1239 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1244 if (HONOR_SNANS (mode) && real_isnan (&d))
1246 real_sqrt (&t, mode, &d);
1250 d = REAL_VALUE_ABS (d);
1253 d = REAL_VALUE_NEGATE (d);
1255 case FLOAT_TRUNCATE:
1256 d = real_value_truncate (mode, d);
1259 /* All this does is change the mode. */
1262 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1269 real_to_target (tmp, &d, GET_MODE (op));
1270 for (i = 0; i < 4; i++)
1272 real_from_target (&d, tmp, mode);
1278 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1281 else if (GET_CODE (op) == CONST_DOUBLE
1282 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1283 && GET_MODE_CLASS (mode) == MODE_INT
1284 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1286 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1287 operators are intentionally left unspecified (to ease implementation
1288 by target backends), for consistency, this routine implements the
1289 same semantics for constant folding as used by the middle-end. */
1291 /* This was formerly used only for non-IEEE float.
1292 eggert@twinsun.com says it is safe for IEEE also. */
1293 HOST_WIDE_INT xh, xl, th, tl;
1294 REAL_VALUE_TYPE x, t;
1295 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1299 if (REAL_VALUE_ISNAN (x))
1302 /* Test against the signed upper bound. */
1303 if (width > HOST_BITS_PER_WIDE_INT)
1305 th = ((unsigned HOST_WIDE_INT) 1
1306 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1312 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1314 real_from_integer (&t, VOIDmode, tl, th, 0);
1315 if (REAL_VALUES_LESS (t, x))
1322 /* Test against the signed lower bound. */
1323 if (width > HOST_BITS_PER_WIDE_INT)
1325 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1331 tl = (HOST_WIDE_INT) -1 << (width - 1);
1333 real_from_integer (&t, VOIDmode, tl, th, 0);
1334 if (REAL_VALUES_LESS (x, t))
1340 REAL_VALUE_TO_INT (&xl, &xh, x);
1344 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1347 /* Test against the unsigned upper bound. */
1348 if (width == 2*HOST_BITS_PER_WIDE_INT)
1353 else if (width >= HOST_BITS_PER_WIDE_INT)
1355 th = ((unsigned HOST_WIDE_INT) 1
1356 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1362 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1364 real_from_integer (&t, VOIDmode, tl, th, 1);
1365 if (REAL_VALUES_LESS (t, x))
1372 REAL_VALUE_TO_INT (&xl, &xh, x);
1378 return immed_double_const (xl, xh, mode);
1384 /* Subroutine of simplify_binary_operation to simplify a commutative,
1385 associative binary operation CODE with result mode MODE, operating
1386 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1387 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1388 canonicalization is possible. */
1391 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1396 /* Linearize the operator to the left. */
1397 if (GET_CODE (op1) == code)
1399 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1400 if (GET_CODE (op0) == code)
1402 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1403 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1406 /* "a op (b op c)" becomes "(b op c) op a". */
1407 if (! swap_commutative_operands_p (op1, op0))
1408 return simplify_gen_binary (code, mode, op1, op0);
1415 if (GET_CODE (op0) == code)
1417 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1418 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1420 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1421 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1424 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1425 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1426 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1427 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1429 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1431 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1432 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1433 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1434 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1436 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1443 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1444 and OP1. Return 0 if no simplification is possible.
1446 Don't use this for relational operations such as EQ or LT.
1447 Use simplify_relational_operation instead. */
1449 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1452 rtx trueop0, trueop1;
1455 /* Relational operations don't work here. We must know the mode
1456 of the operands in order to do the comparison correctly.
1457 Assuming a full word can give incorrect results.
1458 Consider comparing 128 with -128 in QImode. */
1459 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1460 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1462 /* Make sure the constant is second. */
1463 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1464 && swap_commutative_operands_p (op0, op1))
1466 tem = op0, op0 = op1, op1 = tem;
1469 trueop0 = avoid_constant_pool_reference (op0);
1470 trueop1 = avoid_constant_pool_reference (op1);
1472 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1475 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1478 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1479 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1480 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1481 actual constants. */
1484 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1485 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1487 rtx tem, reversed, opleft, opright;
1489 unsigned int width = GET_MODE_BITSIZE (mode);
1491 /* Even if we can't compute a constant result,
1492 there are some cases worth simplifying. */
1497 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1498 when x is NaN, infinite, or finite and nonzero. They aren't
1499 when x is -0 and the rounding mode is not towards -infinity,
1500 since (-0) + 0 is then 0. */
1501 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1504 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1505 transformations are safe even for IEEE. */
1506 if (GET_CODE (op0) == NEG)
1507 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1508 else if (GET_CODE (op1) == NEG)
1509 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1511 /* (~a) + 1 -> -a */
1512 if (INTEGRAL_MODE_P (mode)
1513 && GET_CODE (op0) == NOT
1514 && trueop1 == const1_rtx)
1515 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1517 /* Handle both-operands-constant cases. We can only add
1518 CONST_INTs to constants since the sum of relocatable symbols
1519 can't be handled by most assemblers. Don't add CONST_INT
1520 to CONST_INT since overflow won't be computed properly if wider
1521 than HOST_BITS_PER_WIDE_INT. */
1523 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1524 && GET_CODE (op1) == CONST_INT)
1525 return plus_constant (op0, INTVAL (op1));
1526 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1527 && GET_CODE (op0) == CONST_INT)
1528 return plus_constant (op1, INTVAL (op0));
1530 /* See if this is something like X * C - X or vice versa or
1531 if the multiplication is written as a shift. If so, we can
1532 distribute and make a new multiply, shift, or maybe just
1533 have X (if C is 2 in the example above). But don't make
1534 something more expensive than we had before. */
1536 if (SCALAR_INT_MODE_P (mode))
1538 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1539 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1540 rtx lhs = op0, rhs = op1;
1542 if (GET_CODE (lhs) == NEG)
1546 lhs = XEXP (lhs, 0);
1548 else if (GET_CODE (lhs) == MULT
1549 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1551 coeff0l = INTVAL (XEXP (lhs, 1));
1552 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1553 lhs = XEXP (lhs, 0);
1555 else if (GET_CODE (lhs) == ASHIFT
1556 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1557 && INTVAL (XEXP (lhs, 1)) >= 0
1558 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1560 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1562 lhs = XEXP (lhs, 0);
1565 if (GET_CODE (rhs) == NEG)
1569 rhs = XEXP (rhs, 0);
1571 else if (GET_CODE (rhs) == MULT
1572 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1574 coeff1l = INTVAL (XEXP (rhs, 1));
1575 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1576 rhs = XEXP (rhs, 0);
1578 else if (GET_CODE (rhs) == ASHIFT
1579 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1580 && INTVAL (XEXP (rhs, 1)) >= 0
1581 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1583 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1585 rhs = XEXP (rhs, 0);
1588 if (rtx_equal_p (lhs, rhs))
1590 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1592 unsigned HOST_WIDE_INT l;
1595 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1596 coeff = immed_double_const (l, h, mode);
1598 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1599 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1604 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1605 if ((GET_CODE (op1) == CONST_INT
1606 || GET_CODE (op1) == CONST_DOUBLE)
1607 && GET_CODE (op0) == XOR
1608 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1609 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1610 && mode_signbit_p (mode, op1))
1611 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1612 simplify_gen_binary (XOR, mode, op1,
1615 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1616 if (GET_CODE (op0) == MULT
1617 && GET_CODE (XEXP (op0, 0)) == NEG)
1621 in1 = XEXP (XEXP (op0, 0), 0);
1622 in2 = XEXP (op0, 1);
1623 return simplify_gen_binary (MINUS, mode, op1,
1624 simplify_gen_binary (MULT, mode,
1628 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1629 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1631 if (COMPARISON_P (op0)
1632 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1633 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1634 && (reversed = reversed_comparison (op0, mode)))
1636 simplify_gen_unary (NEG, mode, reversed, mode);
1638 /* If one of the operands is a PLUS or a MINUS, see if we can
1639 simplify this by the associative law.
1640 Don't use the associative law for floating point.
1641 The inaccuracy makes it nonassociative,
1642 and subtle programs can break if operations are associated. */
1644 if (INTEGRAL_MODE_P (mode)
1645 && (plus_minus_operand_p (op0)
1646 || plus_minus_operand_p (op1))
1647 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1650 /* Reassociate floating point addition only when the user
1651 specifies unsafe math optimizations. */
1652 if (FLOAT_MODE_P (mode)
1653 && flag_unsafe_math_optimizations)
1655 tem = simplify_associative_operation (code, mode, op0, op1);
1663 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1664 using cc0, in which case we want to leave it as a COMPARE
1665 so we can distinguish it from a register-register-copy.
1667 In IEEE floating point, x-0 is not the same as x. */
1669 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1670 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1671 && trueop1 == CONST0_RTX (mode))
1675 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1676 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1677 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1678 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1680 rtx xop00 = XEXP (op0, 0);
1681 rtx xop10 = XEXP (op1, 0);
1684 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1686 if (REG_P (xop00) && REG_P (xop10)
1687 && GET_MODE (xop00) == GET_MODE (xop10)
1688 && REGNO (xop00) == REGNO (xop10)
1689 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1690 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1697 /* We can't assume x-x is 0 even with non-IEEE floating point,
1698 but since it is zero except in very strange circumstances, we
1699 will treat it as zero with -funsafe-math-optimizations. */
1700 if (rtx_equal_p (trueop0, trueop1)
1701 && ! side_effects_p (op0)
1702 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1703 return CONST0_RTX (mode);
1705 /* Change subtraction from zero into negation. (0 - x) is the
1706 same as -x when x is NaN, infinite, or finite and nonzero.
1707 But if the mode has signed zeros, and does not round towards
1708 -infinity, then 0 - 0 is 0, not -0. */
1709 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1710 return simplify_gen_unary (NEG, mode, op1, mode);
1712 /* (-1 - a) is ~a. */
1713 if (trueop0 == constm1_rtx)
1714 return simplify_gen_unary (NOT, mode, op1, mode);
1716 /* Subtracting 0 has no effect unless the mode has signed zeros
1717 and supports rounding towards -infinity. In such a case,
1719 if (!(HONOR_SIGNED_ZEROS (mode)
1720 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1721 && trueop1 == CONST0_RTX (mode))
1724 /* See if this is something like X * C - X or vice versa or
1725 if the multiplication is written as a shift. If so, we can
1726 distribute and make a new multiply, shift, or maybe just
1727 have X (if C is 2 in the example above). But don't make
1728 something more expensive than we had before. */
1730 if (SCALAR_INT_MODE_P (mode))
1732 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1733 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1734 rtx lhs = op0, rhs = op1;
1736 if (GET_CODE (lhs) == NEG)
1740 lhs = XEXP (lhs, 0);
1742 else if (GET_CODE (lhs) == MULT
1743 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1745 coeff0l = INTVAL (XEXP (lhs, 1));
1746 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1747 lhs = XEXP (lhs, 0);
1749 else if (GET_CODE (lhs) == ASHIFT
1750 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1751 && INTVAL (XEXP (lhs, 1)) >= 0
1752 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1754 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1756 lhs = XEXP (lhs, 0);
1759 if (GET_CODE (rhs) == NEG)
1763 rhs = XEXP (rhs, 0);
1765 else if (GET_CODE (rhs) == MULT
1766 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1768 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1769 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1770 rhs = XEXP (rhs, 0);
1772 else if (GET_CODE (rhs) == ASHIFT
1773 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1774 && INTVAL (XEXP (rhs, 1)) >= 0
1775 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1777 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1779 rhs = XEXP (rhs, 0);
1782 if (rtx_equal_p (lhs, rhs))
1784 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1786 unsigned HOST_WIDE_INT l;
1789 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1790 coeff = immed_double_const (l, h, mode);
1792 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1793 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1798 /* (a - (-b)) -> (a + b). True even for IEEE. */
1799 if (GET_CODE (op1) == NEG)
1800 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1802 /* (-x - c) may be simplified as (-c - x). */
1803 if (GET_CODE (op0) == NEG
1804 && (GET_CODE (op1) == CONST_INT
1805 || GET_CODE (op1) == CONST_DOUBLE))
1807 tem = simplify_unary_operation (NEG, mode, op1, mode);
1809 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1812 /* Don't let a relocatable value get a negative coeff. */
1813 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1814 return simplify_gen_binary (PLUS, mode,
1816 neg_const_int (mode, op1));
1818 /* (x - (x & y)) -> (x & ~y) */
1819 if (GET_CODE (op1) == AND)
1821 if (rtx_equal_p (op0, XEXP (op1, 0)))
1823 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1824 GET_MODE (XEXP (op1, 1)));
1825 return simplify_gen_binary (AND, mode, op0, tem);
1827 if (rtx_equal_p (op0, XEXP (op1, 1)))
1829 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1830 GET_MODE (XEXP (op1, 0)));
1831 return simplify_gen_binary (AND, mode, op0, tem);
1835 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1836 by reversing the comparison code if valid. */
1837 if (STORE_FLAG_VALUE == 1
1838 && trueop0 == const1_rtx
1839 && COMPARISON_P (op1)
1840 && (reversed = reversed_comparison (op1, mode)))
1843 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1844 if (GET_CODE (op1) == MULT
1845 && GET_CODE (XEXP (op1, 0)) == NEG)
1849 in1 = XEXP (XEXP (op1, 0), 0);
1850 in2 = XEXP (op1, 1);
1851 return simplify_gen_binary (PLUS, mode,
1852 simplify_gen_binary (MULT, mode,
1857 /* Canonicalize (minus (neg A) (mult B C)) to
1858 (minus (mult (neg B) C) A). */
1859 if (GET_CODE (op1) == MULT
1860 && GET_CODE (op0) == NEG)
1864 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1865 in2 = XEXP (op1, 1);
1866 return simplify_gen_binary (MINUS, mode,
1867 simplify_gen_binary (MULT, mode,
1872 /* If one of the operands is a PLUS or a MINUS, see if we can
1873 simplify this by the associative law. This will, for example,
1874 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1875 Don't use the associative law for floating point.
1876 The inaccuracy makes it nonassociative,
1877 and subtle programs can break if operations are associated. */
1879 if (INTEGRAL_MODE_P (mode)
1880 && (plus_minus_operand_p (op0)
1881 || plus_minus_operand_p (op1))
1882 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1887 if (trueop1 == constm1_rtx)
1888 return simplify_gen_unary (NEG, mode, op0, mode);
1890 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1891 x is NaN, since x * 0 is then also NaN. Nor is it valid
1892 when the mode has signed zeros, since multiplying a negative
1893 number by 0 will give -0, not 0. */
1894 if (!HONOR_NANS (mode)
1895 && !HONOR_SIGNED_ZEROS (mode)
1896 && trueop1 == CONST0_RTX (mode)
1897 && ! side_effects_p (op0))
1900 /* In IEEE floating point, x*1 is not equivalent to x for
1902 if (!HONOR_SNANS (mode)
1903 && trueop1 == CONST1_RTX (mode))
1906 /* Convert multiply by constant power of two into shift unless
1907 we are still generating RTL. This test is a kludge. */
1908 if (GET_CODE (trueop1) == CONST_INT
1909 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1910 /* If the mode is larger than the host word size, and the
1911 uppermost bit is set, then this isn't a power of two due
1912 to implicit sign extension. */
1913 && (width <= HOST_BITS_PER_WIDE_INT
1914 || val != HOST_BITS_PER_WIDE_INT - 1))
1915 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1917 /* Likewise for multipliers wider than a word. */
1918 if (GET_CODE (trueop1) == CONST_DOUBLE
1919 && (GET_MODE (trueop1) == VOIDmode
1920 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1921 && GET_MODE (op0) == mode
1922 && CONST_DOUBLE_LOW (trueop1) == 0
1923 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1924 return simplify_gen_binary (ASHIFT, mode, op0,
1925 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1927 /* x*2 is x+x and x*(-1) is -x */
1928 if (GET_CODE (trueop1) == CONST_DOUBLE
1929 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1930 && GET_MODE (op0) == mode)
1933 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1935 if (REAL_VALUES_EQUAL (d, dconst2))
1936 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1938 if (!HONOR_SNANS (mode)
1939 && REAL_VALUES_EQUAL (d, dconstm1))
1940 return simplify_gen_unary (NEG, mode, op0, mode);
1943 /* Optimize -x * -x as x * x. */
1944 if (FLOAT_MODE_P (mode)
1945 && GET_CODE (op0) == NEG
1946 && GET_CODE (op1) == NEG
1947 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1948 && !side_effects_p (XEXP (op0, 0)))
1949 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1951 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1952 if (SCALAR_FLOAT_MODE_P (mode)
1953 && GET_CODE (op0) == ABS
1954 && GET_CODE (op1) == ABS
1955 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1956 && !side_effects_p (XEXP (op0, 0)))
1957 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1959 /* Reassociate multiplication, but for floating point MULTs
1960 only when the user specifies unsafe math optimizations. */
1961 if (! FLOAT_MODE_P (mode)
1962 || flag_unsafe_math_optimizations)
1964 tem = simplify_associative_operation (code, mode, op0, op1);
1971 if (trueop1 == const0_rtx)
1973 if (GET_CODE (trueop1) == CONST_INT
1974 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1975 == GET_MODE_MASK (mode)))
1977 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1979 /* A | (~A) -> -1 */
1980 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1981 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1982 && ! side_effects_p (op0)
1983 && SCALAR_INT_MODE_P (mode))
1986 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1987 if (GET_CODE (op1) == CONST_INT
1988 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1989 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1992 /* Convert (A & B) | A to A. */
1993 if (GET_CODE (op0) == AND
1994 && (rtx_equal_p (XEXP (op0, 0), op1)
1995 || rtx_equal_p (XEXP (op0, 1), op1))
1996 && ! side_effects_p (XEXP (op0, 0))
1997 && ! side_effects_p (XEXP (op0, 1)))
2000 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2001 mode size to (rotate A CX). */
2003 if (GET_CODE (op1) == ASHIFT
2004 || GET_CODE (op1) == SUBREG)
2015 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2016 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2017 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2018 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2019 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2020 == GET_MODE_BITSIZE (mode)))
2021 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2023 /* Same, but for ashift that has been "simplified" to a wider mode
2024 by simplify_shift_const. */
2026 if (GET_CODE (opleft) == SUBREG
2027 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2028 && GET_CODE (opright) == LSHIFTRT
2029 && GET_CODE (XEXP (opright, 0)) == SUBREG
2030 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2031 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2032 && (GET_MODE_SIZE (GET_MODE (opleft))
2033 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2034 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2035 SUBREG_REG (XEXP (opright, 0)))
2036 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2037 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2038 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2039 == GET_MODE_BITSIZE (mode)))
2040 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2041 XEXP (SUBREG_REG (opleft), 1));
2043 /* If we have (ior (and (X C1) C2)), simplify this by making
2044 C1 as small as possible if C1 actually changes. */
2045 if (GET_CODE (op1) == CONST_INT
2046 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2047 || INTVAL (op1) > 0)
2048 && GET_CODE (op0) == AND
2049 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2050 && GET_CODE (op1) == CONST_INT
2051 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2052 return simplify_gen_binary (IOR, mode,
2054 (AND, mode, XEXP (op0, 0),
2055 GEN_INT (INTVAL (XEXP (op0, 1))
2059 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2060 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2061 the PLUS does not affect any of the bits in OP1: then we can do
2062 the IOR as a PLUS and we can associate. This is valid if OP1
2063 can be safely shifted left C bits. */
2064 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2065 && GET_CODE (XEXP (op0, 0)) == PLUS
2066 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2067 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2068 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2070 int count = INTVAL (XEXP (op0, 1));
2071 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2073 if (mask >> count == INTVAL (trueop1)
2074 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2075 return simplify_gen_binary (ASHIFTRT, mode,
2076 plus_constant (XEXP (op0, 0), mask),
2080 tem = simplify_associative_operation (code, mode, op0, op1);
2086 if (trueop1 == const0_rtx)
2088 if (GET_CODE (trueop1) == CONST_INT
2089 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2090 == GET_MODE_MASK (mode)))
2091 return simplify_gen_unary (NOT, mode, op0, mode);
2092 if (rtx_equal_p (trueop0, trueop1)
2093 && ! side_effects_p (op0)
2094 && GET_MODE_CLASS (mode) != MODE_CC)
2095 return CONST0_RTX (mode);
2097 /* Canonicalize XOR of the most significant bit to PLUS. */
2098 if ((GET_CODE (op1) == CONST_INT
2099 || GET_CODE (op1) == CONST_DOUBLE)
2100 && mode_signbit_p (mode, op1))
2101 return simplify_gen_binary (PLUS, mode, op0, op1);
2102 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2103 if ((GET_CODE (op1) == CONST_INT
2104 || GET_CODE (op1) == CONST_DOUBLE)
2105 && GET_CODE (op0) == PLUS
2106 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2107 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2108 && mode_signbit_p (mode, XEXP (op0, 1)))
2109 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2110 simplify_gen_binary (XOR, mode, op1,
2113 /* If we are XORing two things that have no bits in common,
2114 convert them into an IOR. This helps to detect rotation encoded
2115 using those methods and possibly other simplifications. */
2117 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2118 && (nonzero_bits (op0, mode)
2119 & nonzero_bits (op1, mode)) == 0)
2120 return (simplify_gen_binary (IOR, mode, op0, op1));
2122 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2123 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2126 int num_negated = 0;
2128 if (GET_CODE (op0) == NOT)
2129 num_negated++, op0 = XEXP (op0, 0);
2130 if (GET_CODE (op1) == NOT)
2131 num_negated++, op1 = XEXP (op1, 0);
2133 if (num_negated == 2)
2134 return simplify_gen_binary (XOR, mode, op0, op1);
2135 else if (num_negated == 1)
2136 return simplify_gen_unary (NOT, mode,
2137 simplify_gen_binary (XOR, mode, op0, op1),
2141 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2142 correspond to a machine insn or result in further simplifications
2143 if B is a constant. */
2145 if (GET_CODE (op0) == AND
2146 && rtx_equal_p (XEXP (op0, 1), op1)
2147 && ! side_effects_p (op1))
2148 return simplify_gen_binary (AND, mode,
2149 simplify_gen_unary (NOT, mode,
2150 XEXP (op0, 0), mode),
2153 else if (GET_CODE (op0) == AND
2154 && rtx_equal_p (XEXP (op0, 0), op1)
2155 && ! side_effects_p (op1))
2156 return simplify_gen_binary (AND, mode,
2157 simplify_gen_unary (NOT, mode,
2158 XEXP (op0, 1), mode),
2161 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2162 comparison if STORE_FLAG_VALUE is 1. */
2163 if (STORE_FLAG_VALUE == 1
2164 && trueop1 == const1_rtx
2165 && COMPARISON_P (op0)
2166 && (reversed = reversed_comparison (op0, mode)))
2169 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2170 is (lt foo (const_int 0)), so we can perform the above
2171 simplification if STORE_FLAG_VALUE is 1. */
2173 if (STORE_FLAG_VALUE == 1
2174 && trueop1 == const1_rtx
2175 && GET_CODE (op0) == LSHIFTRT
2176 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2177 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2178 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2180 /* (xor (comparison foo bar) (const_int sign-bit))
2181 when STORE_FLAG_VALUE is the sign bit. */
2182 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2183 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2184 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2185 && trueop1 == const_true_rtx
2186 && COMPARISON_P (op0)
2187 && (reversed = reversed_comparison (op0, mode)))
2192 tem = simplify_associative_operation (code, mode, op0, op1);
2198 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2200 /* If we are turning off bits already known off in OP0, we need
2202 if (GET_CODE (trueop1) == CONST_INT
2203 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2204 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2206 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2207 && GET_MODE_CLASS (mode) != MODE_CC)
2210 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2211 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2212 && ! side_effects_p (op0)
2213 && GET_MODE_CLASS (mode) != MODE_CC)
2214 return CONST0_RTX (mode);
2216 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2217 there are no nonzero bits of C outside of X's mode. */
2218 if ((GET_CODE (op0) == SIGN_EXTEND
2219 || GET_CODE (op0) == ZERO_EXTEND)
2220 && GET_CODE (trueop1) == CONST_INT
2221 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2222 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2223 & INTVAL (trueop1)) == 0)
2225 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2226 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2227 gen_int_mode (INTVAL (trueop1),
2229 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2232 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2233 insn (and may simplify more). */
2234 if (GET_CODE (op0) == XOR
2235 && rtx_equal_p (XEXP (op0, 0), op1)
2236 && ! side_effects_p (op1))
2237 return simplify_gen_binary (AND, mode,
2238 simplify_gen_unary (NOT, mode,
2239 XEXP (op0, 1), mode),
2242 if (GET_CODE (op0) == XOR
2243 && rtx_equal_p (XEXP (op0, 1), op1)
2244 && ! side_effects_p (op1))
2245 return simplify_gen_binary (AND, mode,
2246 simplify_gen_unary (NOT, mode,
2247 XEXP (op0, 0), mode),
2250 /* Similarly for (~(A ^ B)) & A. */
2251 if (GET_CODE (op0) == NOT
2252 && GET_CODE (XEXP (op0, 0)) == XOR
2253 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2254 && ! side_effects_p (op1))
2255 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2257 if (GET_CODE (op0) == NOT
2258 && GET_CODE (XEXP (op0, 0)) == XOR
2259 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2260 && ! side_effects_p (op1))
2261 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2263 /* Convert (A | B) & A to A. */
2264 if (GET_CODE (op0) == IOR
2265 && (rtx_equal_p (XEXP (op0, 0), op1)
2266 || rtx_equal_p (XEXP (op0, 1), op1))
2267 && ! side_effects_p (XEXP (op0, 0))
2268 && ! side_effects_p (XEXP (op0, 1)))
2271 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2272 ((A & N) + B) & M -> (A + B) & M
2273 Similarly if (N & M) == 0,
2274 ((A | N) + B) & M -> (A + B) & M
2275 and for - instead of + and/or ^ instead of |. */
2276 if (GET_CODE (trueop1) == CONST_INT
2277 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2278 && ~INTVAL (trueop1)
2279 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2280 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2285 pmop[0] = XEXP (op0, 0);
2286 pmop[1] = XEXP (op0, 1);
2288 for (which = 0; which < 2; which++)
2291 switch (GET_CODE (tem))
2294 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2295 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2296 == INTVAL (trueop1))
2297 pmop[which] = XEXP (tem, 0);
2301 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2302 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2303 pmop[which] = XEXP (tem, 0);
2310 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2312 tem = simplify_gen_binary (GET_CODE (op0), mode,
2314 return simplify_gen_binary (code, mode, tem, op1);
2317 tem = simplify_associative_operation (code, mode, op0, op1);
2323 /* 0/x is 0 (or x&0 if x has side-effects). */
2324 if (trueop0 == CONST0_RTX (mode))
2326 if (side_effects_p (op1))
2327 return simplify_gen_binary (AND, mode, op1, trueop0);
2331 if (trueop1 == CONST1_RTX (mode))
2332 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2333 /* Convert divide by power of two into shift. */
2334 if (GET_CODE (trueop1) == CONST_INT
2335 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2336 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2340 /* Handle floating point and integers separately. */
2341 if (SCALAR_FLOAT_MODE_P (mode))
2343 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2344 safe for modes with NaNs, since 0.0 / 0.0 will then be
2345 NaN rather than 0.0. Nor is it safe for modes with signed
2346 zeros, since dividing 0 by a negative number gives -0.0 */
2347 if (trueop0 == CONST0_RTX (mode)
2348 && !HONOR_NANS (mode)
2349 && !HONOR_SIGNED_ZEROS (mode)
2350 && ! side_effects_p (op1))
2353 if (trueop1 == CONST1_RTX (mode)
2354 && !HONOR_SNANS (mode))
2357 if (GET_CODE (trueop1) == CONST_DOUBLE
2358 && trueop1 != CONST0_RTX (mode))
2361 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2364 if (REAL_VALUES_EQUAL (d, dconstm1)
2365 && !HONOR_SNANS (mode))
2366 return simplify_gen_unary (NEG, mode, op0, mode);
2368 /* Change FP division by a constant into multiplication.
2369 Only do this with -funsafe-math-optimizations. */
2370 if (flag_unsafe_math_optimizations
2371 && !REAL_VALUES_EQUAL (d, dconst0))
2373 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2374 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2375 return simplify_gen_binary (MULT, mode, op0, tem);
2381 /* 0/x is 0 (or x&0 if x has side-effects). */
2382 if (trueop0 == CONST0_RTX (mode))
2384 if (side_effects_p (op1))
2385 return simplify_gen_binary (AND, mode, op1, trueop0);
2389 if (trueop1 == CONST1_RTX (mode))
2390 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2392 if (trueop1 == constm1_rtx)
2394 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2395 return simplify_gen_unary (NEG, mode, x, mode);
2401 /* 0%x is 0 (or x&0 if x has side-effects). */
2402 if (trueop0 == CONST0_RTX (mode))
2404 if (side_effects_p (op1))
2405 return simplify_gen_binary (AND, mode, op1, trueop0);
2408 /* x%1 is 0 (of x&0 if x has side-effects). */
2409 if (trueop1 == CONST1_RTX (mode))
2411 if (side_effects_p (op0))
2412 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2413 return CONST0_RTX (mode);
2415 /* Implement modulus by power of two as AND. */
2416 if (GET_CODE (trueop1) == CONST_INT
2417 && exact_log2 (INTVAL (trueop1)) > 0)
2418 return simplify_gen_binary (AND, mode, op0,
2419 GEN_INT (INTVAL (op1) - 1));
2423 /* 0%x is 0 (or x&0 if x has side-effects). */
2424 if (trueop0 == CONST0_RTX (mode))
2426 if (side_effects_p (op1))
2427 return simplify_gen_binary (AND, mode, op1, trueop0);
2430 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2431 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2433 if (side_effects_p (op0))
2434 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2435 return CONST0_RTX (mode);
2442 if (trueop1 == CONST0_RTX (mode))
2444 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2446 /* Rotating ~0 always results in ~0. */
2447 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2448 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2449 && ! side_effects_p (op1))
2455 if (trueop1 == CONST0_RTX (mode))
2457 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2462 if (trueop1 == CONST0_RTX (mode))
2464 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2466 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2467 if (GET_CODE (op0) == CLZ
2468 && GET_CODE (trueop1) == CONST_INT
2469 && STORE_FLAG_VALUE == 1
2470 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2472 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2473 unsigned HOST_WIDE_INT zero_val = 0;
2475 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2476 && zero_val == GET_MODE_BITSIZE (imode)
2477 && INTVAL (trueop1) == exact_log2 (zero_val))
2478 return simplify_gen_relational (EQ, mode, imode,
2479 XEXP (op0, 0), const0_rtx);
2484 if (width <= HOST_BITS_PER_WIDE_INT
2485 && GET_CODE (trueop1) == CONST_INT
2486 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2487 && ! side_effects_p (op0))
2489 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2491 tem = simplify_associative_operation (code, mode, op0, op1);
2497 if (width <= HOST_BITS_PER_WIDE_INT
2498 && GET_CODE (trueop1) == CONST_INT
2499 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2500 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2501 && ! side_effects_p (op0))
2503 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2505 tem = simplify_associative_operation (code, mode, op0, op1);
2511 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2513 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2515 tem = simplify_associative_operation (code, mode, op0, op1);
2521 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2523 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2525 tem = simplify_associative_operation (code, mode, op0, op1);
2534 /* ??? There are simplifications that can be done. */
2538 if (!VECTOR_MODE_P (mode))
2540 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2541 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2542 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2543 gcc_assert (XVECLEN (trueop1, 0) == 1);
2544 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2546 if (GET_CODE (trueop0) == CONST_VECTOR)
2547 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2552 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2553 gcc_assert (GET_MODE_INNER (mode)
2554 == GET_MODE_INNER (GET_MODE (trueop0)));
2555 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2557 if (GET_CODE (trueop0) == CONST_VECTOR)
2559 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2560 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2561 rtvec v = rtvec_alloc (n_elts);
2564 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2565 for (i = 0; i < n_elts; i++)
2567 rtx x = XVECEXP (trueop1, 0, i);
2569 gcc_assert (GET_CODE (x) == CONST_INT);
2570 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2574 return gen_rtx_CONST_VECTOR (mode, v);
2578 if (XVECLEN (trueop1, 0) == 1
2579 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2580 && GET_CODE (trueop0) == VEC_CONCAT)
2583 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2585 /* Try to find the element in the VEC_CONCAT. */
2586 while (GET_MODE (vec) != mode
2587 && GET_CODE (vec) == VEC_CONCAT)
2589 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2590 if (offset < vec_size)
2591 vec = XEXP (vec, 0);
2595 vec = XEXP (vec, 1);
2597 vec = avoid_constant_pool_reference (vec);
2600 if (GET_MODE (vec) == mode)
2607 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2608 ? GET_MODE (trueop0)
2609 : GET_MODE_INNER (mode));
2610 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2611 ? GET_MODE (trueop1)
2612 : GET_MODE_INNER (mode));
2614 gcc_assert (VECTOR_MODE_P (mode));
2615 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2616 == GET_MODE_SIZE (mode));
2618 if (VECTOR_MODE_P (op0_mode))
2619 gcc_assert (GET_MODE_INNER (mode)
2620 == GET_MODE_INNER (op0_mode));
2622 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2624 if (VECTOR_MODE_P (op1_mode))
2625 gcc_assert (GET_MODE_INNER (mode)
2626 == GET_MODE_INNER (op1_mode));
2628 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2630 if ((GET_CODE (trueop0) == CONST_VECTOR
2631 || GET_CODE (trueop0) == CONST_INT
2632 || GET_CODE (trueop0) == CONST_DOUBLE)
2633 && (GET_CODE (trueop1) == CONST_VECTOR
2634 || GET_CODE (trueop1) == CONST_INT
2635 || GET_CODE (trueop1) == CONST_DOUBLE))
2637 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2638 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2639 rtvec v = rtvec_alloc (n_elts);
2641 unsigned in_n_elts = 1;
2643 if (VECTOR_MODE_P (op0_mode))
2644 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2645 for (i = 0; i < n_elts; i++)
2649 if (!VECTOR_MODE_P (op0_mode))
2650 RTVEC_ELT (v, i) = trueop0;
2652 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2656 if (!VECTOR_MODE_P (op1_mode))
2657 RTVEC_ELT (v, i) = trueop1;
2659 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2664 return gen_rtx_CONST_VECTOR (mode, v);
2677 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2680 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2682 unsigned int width = GET_MODE_BITSIZE (mode);
2684 if (VECTOR_MODE_P (mode)
2685 && code != VEC_CONCAT
2686 && GET_CODE (op0) == CONST_VECTOR
2687 && GET_CODE (op1) == CONST_VECTOR)
2689 unsigned n_elts = GET_MODE_NUNITS (mode);
2690 enum machine_mode op0mode = GET_MODE (op0);
2691 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2692 enum machine_mode op1mode = GET_MODE (op1);
2693 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2694 rtvec v = rtvec_alloc (n_elts);
2697 gcc_assert (op0_n_elts == n_elts);
2698 gcc_assert (op1_n_elts == n_elts);
2699 for (i = 0; i < n_elts; i++)
2701 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2702 CONST_VECTOR_ELT (op0, i),
2703 CONST_VECTOR_ELT (op1, i));
2706 RTVEC_ELT (v, i) = x;
2709 return gen_rtx_CONST_VECTOR (mode, v);
2712 if (VECTOR_MODE_P (mode)
2713 && code == VEC_CONCAT
2714 && CONSTANT_P (op0) && CONSTANT_P (op1))
2716 unsigned n_elts = GET_MODE_NUNITS (mode);
2717 rtvec v = rtvec_alloc (n_elts);
2719 gcc_assert (n_elts >= 2);
2722 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2723 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2725 RTVEC_ELT (v, 0) = op0;
2726 RTVEC_ELT (v, 1) = op1;
2730 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2731 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2734 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2735 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2736 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2738 for (i = 0; i < op0_n_elts; ++i)
2739 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2740 for (i = 0; i < op1_n_elts; ++i)
2741 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2744 return gen_rtx_CONST_VECTOR (mode, v);
2747 if (SCALAR_FLOAT_MODE_P (mode)
2748 && GET_CODE (op0) == CONST_DOUBLE
2749 && GET_CODE (op1) == CONST_DOUBLE
2750 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2761 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2763 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2765 for (i = 0; i < 4; i++)
2782 real_from_target (&r, tmp0, mode);
2783 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2787 REAL_VALUE_TYPE f0, f1, value, result;
2790 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2791 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2792 real_convert (&f0, mode, &f0);
2793 real_convert (&f1, mode, &f1);
2795 if (HONOR_SNANS (mode)
2796 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2800 && REAL_VALUES_EQUAL (f1, dconst0)
2801 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2804 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2805 && flag_trapping_math
2806 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2808 int s0 = REAL_VALUE_NEGATIVE (f0);
2809 int s1 = REAL_VALUE_NEGATIVE (f1);
2814 /* Inf + -Inf = NaN plus exception. */
2819 /* Inf - Inf = NaN plus exception. */
2824 /* Inf / Inf = NaN plus exception. */
2831 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2832 && flag_trapping_math
2833 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2834 || (REAL_VALUE_ISINF (f1)
2835 && REAL_VALUES_EQUAL (f0, dconst0))))
2836 /* Inf * 0 = NaN plus exception. */
2839 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2841 real_convert (&result, mode, &value);
2843 /* Don't constant fold this floating point operation if
2844 the result has overflowed and flag_trapping_math. */
2846 if (flag_trapping_math
2847 && MODE_HAS_INFINITIES (mode)
2848 && REAL_VALUE_ISINF (result)
2849 && !REAL_VALUE_ISINF (f0)
2850 && !REAL_VALUE_ISINF (f1))
2851 /* Overflow plus exception. */
2854 /* Don't constant fold this floating point operation if the
2855 result may dependent upon the run-time rounding mode and
2856 flag_rounding_math is set, or if GCC's software emulation
2857 is unable to accurately represent the result. */
2859 if ((flag_rounding_math
2860 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2861 && !flag_unsafe_math_optimizations))
2862 && (inexact || !real_identical (&result, &value)))
2865 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2869 /* We can fold some multi-word operations. */
2870 if (GET_MODE_CLASS (mode) == MODE_INT
2871 && width == HOST_BITS_PER_WIDE_INT * 2
2872 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2873 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2875 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2876 HOST_WIDE_INT h1, h2, hv, ht;
2878 if (GET_CODE (op0) == CONST_DOUBLE)
2879 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2881 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2883 if (GET_CODE (op1) == CONST_DOUBLE)
2884 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2886 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2891 /* A - B == A + (-B). */
2892 neg_double (l2, h2, &lv, &hv);
2895 /* Fall through.... */
2898 add_double (l1, h1, l2, h2, &lv, &hv);
2902 mul_double (l1, h1, l2, h2, &lv, &hv);
2906 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2907 &lv, &hv, <, &ht))
2912 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2913 <, &ht, &lv, &hv))
2918 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2919 &lv, &hv, <, &ht))
2924 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2925 <, &ht, &lv, &hv))
2930 lv = l1 & l2, hv = h1 & h2;
2934 lv = l1 | l2, hv = h1 | h2;
2938 lv = l1 ^ l2, hv = h1 ^ h2;
2944 && ((unsigned HOST_WIDE_INT) l1
2945 < (unsigned HOST_WIDE_INT) l2)))
2954 && ((unsigned HOST_WIDE_INT) l1
2955 > (unsigned HOST_WIDE_INT) l2)))
2962 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2964 && ((unsigned HOST_WIDE_INT) l1
2965 < (unsigned HOST_WIDE_INT) l2)))
2972 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2974 && ((unsigned HOST_WIDE_INT) l1
2975 > (unsigned HOST_WIDE_INT) l2)))
2981 case LSHIFTRT: case ASHIFTRT:
2983 case ROTATE: case ROTATERT:
2984 if (SHIFT_COUNT_TRUNCATED)
2985 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2987 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2990 if (code == LSHIFTRT || code == ASHIFTRT)
2991 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2993 else if (code == ASHIFT)
2994 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2995 else if (code == ROTATE)
2996 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2997 else /* code == ROTATERT */
2998 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3005 return immed_double_const (lv, hv, mode);
3008 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3009 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3011 /* Get the integer argument values in two forms:
3012 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3014 arg0 = INTVAL (op0);
3015 arg1 = INTVAL (op1);
3017 if (width < HOST_BITS_PER_WIDE_INT)
3019 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3020 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3023 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3024 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3027 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3028 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3036 /* Compute the value of the arithmetic. */
3041 val = arg0s + arg1s;
3045 val = arg0s - arg1s;
3049 val = arg0s * arg1s;
3054 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3057 val = arg0s / arg1s;
3062 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3065 val = arg0s % arg1s;
3070 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3073 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3078 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3081 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3099 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3100 the value is in range. We can't return any old value for
3101 out-of-range arguments because either the middle-end (via
3102 shift_truncation_mask) or the back-end might be relying on
3103 target-specific knowledge. Nor can we rely on
3104 shift_truncation_mask, since the shift might not be part of an
3105 ashlM3, lshrM3 or ashrM3 instruction. */
3106 if (SHIFT_COUNT_TRUNCATED)
3107 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3108 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3111 val = (code == ASHIFT
3112 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3113 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3115 /* Sign-extend the result for arithmetic right shifts. */
3116 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3117 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3125 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3126 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3134 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3135 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3139 /* Do nothing here. */
3143 val = arg0s <= arg1s ? arg0s : arg1s;
3147 val = ((unsigned HOST_WIDE_INT) arg0
3148 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3152 val = arg0s > arg1s ? arg0s : arg1s;
3156 val = ((unsigned HOST_WIDE_INT) arg0
3157 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3165 /* ??? There are simplifications that can be done. */
3172 return gen_int_mode (val, mode);
3180 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3183 Rather than test for specific case, we do this by a brute-force method
3184 and do all possible simplifications until no more changes occur. Then
3185 we rebuild the operation. */
3187 struct simplify_plus_minus_op_data
3194 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3196 const struct simplify_plus_minus_op_data *d1 = p1;
3197 const struct simplify_plus_minus_op_data *d2 = p2;
3200 result = (commutative_operand_precedence (d2->op)
3201 - commutative_operand_precedence (d1->op));
3205 /* Group together equal REGs to do more simplification. */
3206 if (REG_P (d1->op) && REG_P (d2->op))
3207 return REGNO (d1->op) - REGNO (d2->op);
3213 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3216 struct simplify_plus_minus_op_data ops[8];
3218 int n_ops = 2, input_ops = 2;
3219 int changed, n_constants = 0, canonicalized = 0;
3222 memset (ops, 0, sizeof ops);
3224 /* Set up the two operands and then expand them until nothing has been
3225 changed. If we run out of room in our array, give up; this should
3226 almost never happen. */
3231 ops[1].neg = (code == MINUS);
3237 for (i = 0; i < n_ops; i++)
3239 rtx this_op = ops[i].op;
3240 int this_neg = ops[i].neg;
3241 enum rtx_code this_code = GET_CODE (this_op);
3250 ops[n_ops].op = XEXP (this_op, 1);
3251 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3254 ops[i].op = XEXP (this_op, 0);
3257 canonicalized |= this_neg;
3261 ops[i].op = XEXP (this_op, 0);
3262 ops[i].neg = ! this_neg;
3269 && GET_CODE (XEXP (this_op, 0)) == PLUS
3270 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3271 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3273 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3274 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3275 ops[n_ops].neg = this_neg;
3283 /* ~a -> (-a - 1) */
3286 ops[n_ops].op = constm1_rtx;
3287 ops[n_ops++].neg = this_neg;
3288 ops[i].op = XEXP (this_op, 0);
3289 ops[i].neg = !this_neg;
3299 ops[i].op = neg_const_int (mode, this_op);
3313 if (n_constants > 1)
3316 gcc_assert (n_ops >= 2);
3318 /* If we only have two operands, we can avoid the loops. */
3321 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3324 /* Get the two operands. Be careful with the order, especially for
3325 the cases where code == MINUS. */
3326 if (ops[0].neg && ops[1].neg)
3328 lhs = gen_rtx_NEG (mode, ops[0].op);
3331 else if (ops[0].neg)
3342 return simplify_const_binary_operation (code, mode, lhs, rhs);
3345 /* Now simplify each pair of operands until nothing changes. */
3348 /* Insertion sort is good enough for an eight-element array. */
3349 for (i = 1; i < n_ops; i++)
3351 struct simplify_plus_minus_op_data save;
3353 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3359 ops[j + 1] = ops[j];
3360 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3364 /* This is only useful the first time through. */
3369 for (i = n_ops - 1; i > 0; i--)
3370 for (j = i - 1; j >= 0; j--)
3372 rtx lhs = ops[j].op, rhs = ops[i].op;
3373 int lneg = ops[j].neg, rneg = ops[i].neg;
3375 if (lhs != 0 && rhs != 0)
3377 enum rtx_code ncode = PLUS;
3383 tem = lhs, lhs = rhs, rhs = tem;
3385 else if (swap_commutative_operands_p (lhs, rhs))
3386 tem = lhs, lhs = rhs, rhs = tem;
3388 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3389 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3391 rtx tem_lhs, tem_rhs;
3393 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3394 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3395 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3397 if (tem && !CONSTANT_P (tem))
3398 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3401 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3403 /* Reject "simplifications" that just wrap the two
3404 arguments in a CONST. Failure to do so can result
3405 in infinite recursion with simplify_binary_operation
3406 when it calls us to simplify CONST operations. */
3408 && ! (GET_CODE (tem) == CONST
3409 && GET_CODE (XEXP (tem, 0)) == ncode
3410 && XEXP (XEXP (tem, 0), 0) == lhs
3411 && XEXP (XEXP (tem, 0), 1) == rhs))
3414 if (GET_CODE (tem) == NEG)
3415 tem = XEXP (tem, 0), lneg = !lneg;
3416 if (GET_CODE (tem) == CONST_INT && lneg)
3417 tem = neg_const_int (mode, tem), lneg = 0;
3421 ops[j].op = NULL_RTX;
3427 /* Pack all the operands to the lower-numbered entries. */
3428 for (i = 0, j = 0; j < n_ops; j++)
3438 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3440 && GET_CODE (ops[1].op) == CONST_INT
3441 && CONSTANT_P (ops[0].op)
3443 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3445 /* We suppressed creation of trivial CONST expressions in the
3446 combination loop to avoid recursion. Create one manually now.
3447 The combination loop should have ensured that there is exactly
3448 one CONST_INT, and the sort will have ensured that it is last
3449 in the array and that any other constant will be next-to-last. */
3452 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3453 && CONSTANT_P (ops[n_ops - 2].op))
3455 rtx value = ops[n_ops - 1].op;
3456 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3457 value = neg_const_int (mode, value);
3458 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3462 /* Put a non-negated operand first, if possible. */
3464 for (i = 0; i < n_ops && ops[i].neg; i++)
3467 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3476 /* Now make the result by performing the requested operations. */
3478 for (i = 1; i < n_ops; i++)
3479 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3480 mode, result, ops[i].op);
3485 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3487 plus_minus_operand_p (rtx x)
3489 return GET_CODE (x) == PLUS
3490 || GET_CODE (x) == MINUS
3491 || (GET_CODE (x) == CONST
3492 && GET_CODE (XEXP (x, 0)) == PLUS
3493 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3494 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3497 /* Like simplify_binary_operation except used for relational operators.
3498 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3499 not also be VOIDmode.
3501 CMP_MODE specifies in which mode the comparison is done in, so it is
3502 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3503 the operands or, if both are VOIDmode, the operands are compared in
3504 "infinite precision". */
3506 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3507 enum machine_mode cmp_mode, rtx op0, rtx op1)
3509 rtx tem, trueop0, trueop1;
3511 if (cmp_mode == VOIDmode)
3512 cmp_mode = GET_MODE (op0);
3513 if (cmp_mode == VOIDmode)
3514 cmp_mode = GET_MODE (op1);
3516 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3519 if (SCALAR_FLOAT_MODE_P (mode))
3521 if (tem == const0_rtx)
3522 return CONST0_RTX (mode);
3523 #ifdef FLOAT_STORE_FLAG_VALUE
3525 REAL_VALUE_TYPE val;
3526 val = FLOAT_STORE_FLAG_VALUE (mode);
3527 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3533 if (VECTOR_MODE_P (mode))
3535 if (tem == const0_rtx)
3536 return CONST0_RTX (mode);
3537 #ifdef VECTOR_STORE_FLAG_VALUE
3542 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3543 if (val == NULL_RTX)
3545 if (val == const1_rtx)
3546 return CONST1_RTX (mode);
3548 units = GET_MODE_NUNITS (mode);
3549 v = rtvec_alloc (units);
3550 for (i = 0; i < units; i++)
3551 RTVEC_ELT (v, i) = val;
3552 return gen_rtx_raw_CONST_VECTOR (mode, v);
3562 /* For the following tests, ensure const0_rtx is op1. */
3563 if (swap_commutative_operands_p (op0, op1)
3564 || (op0 == const0_rtx && op1 != const0_rtx))
3565 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3567 /* If op0 is a compare, extract the comparison arguments from it. */
3568 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3569 return simplify_relational_operation (code, mode, VOIDmode,
3570 XEXP (op0, 0), XEXP (op0, 1));
3572 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3576 trueop0 = avoid_constant_pool_reference (op0);
3577 trueop1 = avoid_constant_pool_reference (op1);
3578 return simplify_relational_operation_1 (code, mode, cmp_mode,
3582 /* This part of simplify_relational_operation is only used when CMP_MODE
3583 is not in class MODE_CC (i.e. it is a real comparison).
3585 MODE is the mode of the result, while CMP_MODE specifies in which
3586 mode the comparison is done in, so it is the mode of the operands. */
3589 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3590 enum machine_mode cmp_mode, rtx op0, rtx op1)
3592 enum rtx_code op0code = GET_CODE (op0);
3594 if (GET_CODE (op1) == CONST_INT)
3596 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3598 /* If op0 is a comparison, extract the comparison arguments
3602 if (GET_MODE (op0) == mode)
3603 return simplify_rtx (op0);
3605 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3606 XEXP (op0, 0), XEXP (op0, 1));
3608 else if (code == EQ)
3610 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3611 if (new_code != UNKNOWN)
3612 return simplify_gen_relational (new_code, mode, VOIDmode,
3613 XEXP (op0, 0), XEXP (op0, 1));
3618 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3619 if ((code == EQ || code == NE)
3620 && (op0code == PLUS || op0code == MINUS)
3622 && CONSTANT_P (XEXP (op0, 1))
3623 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3625 rtx x = XEXP (op0, 0);
3626 rtx c = XEXP (op0, 1);
3628 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3630 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3633 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3634 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3636 && op1 == const0_rtx
3637 && GET_MODE_CLASS (mode) == MODE_INT
3638 && cmp_mode != VOIDmode
3639 /* ??? Work-around BImode bugs in the ia64 backend. */
3641 && cmp_mode != BImode
3642 && nonzero_bits (op0, cmp_mode) == 1
3643 && STORE_FLAG_VALUE == 1)
3644 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3645 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3646 : lowpart_subreg (mode, op0, cmp_mode);
3648 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3649 if ((code == EQ || code == NE)
3650 && op1 == const0_rtx
3652 return simplify_gen_relational (code, mode, cmp_mode,
3653 XEXP (op0, 0), XEXP (op0, 1));
3655 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3656 if ((code == EQ || code == NE)
3658 && rtx_equal_p (XEXP (op0, 0), op1)
3659 && !side_effects_p (XEXP (op0, 0)))
3660 return simplify_gen_relational (code, mode, cmp_mode,
3661 XEXP (op0, 1), const0_rtx);
3663 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3664 if ((code == EQ || code == NE)
3666 && rtx_equal_p (XEXP (op0, 1), op1)
3667 && !side_effects_p (XEXP (op0, 1)))
3668 return simplify_gen_relational (code, mode, cmp_mode,
3669 XEXP (op0, 0), const0_rtx);
3671 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3672 if ((code == EQ || code == NE)
3674 && (GET_CODE (op1) == CONST_INT
3675 || GET_CODE (op1) == CONST_DOUBLE)
3676 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3677 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3678 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3679 simplify_gen_binary (XOR, cmp_mode,
3680 XEXP (op0, 1), op1));
3685 /* Check if the given comparison (done in the given MODE) is actually a
3686 tautology or a contradiction.
3687 If no simplification is possible, this function returns zero.
3688 Otherwise, it returns either const_true_rtx or const0_rtx. */
3691 simplify_const_relational_operation (enum rtx_code code,
3692 enum machine_mode mode,
3695 int equal, op0lt, op0ltu, op1lt, op1ltu;
3700 gcc_assert (mode != VOIDmode
3701 || (GET_MODE (op0) == VOIDmode
3702 && GET_MODE (op1) == VOIDmode));
3704 /* If op0 is a compare, extract the comparison arguments from it. */
3705 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3707 op1 = XEXP (op0, 1);
3708 op0 = XEXP (op0, 0);
3710 if (GET_MODE (op0) != VOIDmode)
3711 mode = GET_MODE (op0);
3712 else if (GET_MODE (op1) != VOIDmode)
3713 mode = GET_MODE (op1);
3718 /* We can't simplify MODE_CC values since we don't know what the
3719 actual comparison is. */
3720 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3723 /* Make sure the constant is second. */
3724 if (swap_commutative_operands_p (op0, op1))
3726 tem = op0, op0 = op1, op1 = tem;
3727 code = swap_condition (code);
3730 trueop0 = avoid_constant_pool_reference (op0);
3731 trueop1 = avoid_constant_pool_reference (op1);
3733 /* For integer comparisons of A and B maybe we can simplify A - B and can
3734 then simplify a comparison of that with zero. If A and B are both either
3735 a register or a CONST_INT, this can't help; testing for these cases will
3736 prevent infinite recursion here and speed things up.
3738 We can only do this for EQ and NE comparisons as otherwise we may
3739 lose or introduce overflow which we cannot disregard as undefined as
3740 we do not know the signedness of the operation on either the left or
3741 the right hand side of the comparison. */
3743 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3744 && (code == EQ || code == NE)
3745 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3746 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3747 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3748 /* We cannot do this if tem is a nonzero address. */
3749 && ! nonzero_address_p (tem))
3750 return simplify_const_relational_operation (signed_condition (code),
3751 mode, tem, const0_rtx);
3753 if (! HONOR_NANS (mode) && code == ORDERED)
3754 return const_true_rtx;
3756 if (! HONOR_NANS (mode) && code == UNORDERED)
3759 /* For modes without NaNs, if the two operands are equal, we know the
3760 result except if they have side-effects. */
3761 if (! HONOR_NANS (GET_MODE (trueop0))
3762 && rtx_equal_p (trueop0, trueop1)
3763 && ! side_effects_p (trueop0))
3764 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3766 /* If the operands are floating-point constants, see if we can fold
3768 else if (GET_CODE (trueop0) == CONST_DOUBLE
3769 && GET_CODE (trueop1) == CONST_DOUBLE
3770 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3772 REAL_VALUE_TYPE d0, d1;
3774 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3775 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3777 /* Comparisons are unordered iff at least one of the values is NaN. */
3778 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3788 return const_true_rtx;
3801 equal = REAL_VALUES_EQUAL (d0, d1);
3802 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3803 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3806 /* Otherwise, see if the operands are both integers. */
3807 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3808 && (GET_CODE (trueop0) == CONST_DOUBLE
3809 || GET_CODE (trueop0) == CONST_INT)
3810 && (GET_CODE (trueop1) == CONST_DOUBLE
3811 || GET_CODE (trueop1) == CONST_INT))
3813 int width = GET_MODE_BITSIZE (mode);
3814 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3815 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3817 /* Get the two words comprising each integer constant. */
3818 if (GET_CODE (trueop0) == CONST_DOUBLE)
3820 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3821 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3825 l0u = l0s = INTVAL (trueop0);
3826 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3829 if (GET_CODE (trueop1) == CONST_DOUBLE)
3831 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3832 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3836 l1u = l1s = INTVAL (trueop1);
3837 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3840 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3841 we have to sign or zero-extend the values. */
3842 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3844 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3845 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3847 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3848 l0s |= ((HOST_WIDE_INT) (-1) << width);
3850 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3851 l1s |= ((HOST_WIDE_INT) (-1) << width);
3853 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3854 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3856 equal = (h0u == h1u && l0u == l1u);
3857 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3858 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3859 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3860 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3863 /* Otherwise, there are some code-specific tests we can make. */
3866 /* Optimize comparisons with upper and lower bounds. */
3867 if (SCALAR_INT_MODE_P (mode)
3868 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3881 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3888 /* x >= min is always true. */
3889 if (rtx_equal_p (trueop1, mmin))
3890 tem = const_true_rtx;
3896 /* x <= max is always true. */
3897 if (rtx_equal_p (trueop1, mmax))
3898 tem = const_true_rtx;
3903 /* x > max is always false. */
3904 if (rtx_equal_p (trueop1, mmax))
3910 /* x < min is always false. */
3911 if (rtx_equal_p (trueop1, mmin))
3918 if (tem == const0_rtx
3919 || tem == const_true_rtx)
3926 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3931 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3932 return const_true_rtx;
3936 /* Optimize abs(x) < 0.0. */
3937 if (trueop1 == CONST0_RTX (mode)
3938 && !HONOR_SNANS (mode)
3939 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3941 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3943 if (GET_CODE (tem) == ABS)
3949 /* Optimize abs(x) >= 0.0. */
3950 if (trueop1 == CONST0_RTX (mode)
3951 && !HONOR_NANS (mode)
3952 && !(flag_wrapv && INTEGRAL_MODE_P (mode)))
3954 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3956 if (GET_CODE (tem) == ABS)
3957 return const_true_rtx;
3962 /* Optimize ! (abs(x) < 0.0). */
3963 if (trueop1 == CONST0_RTX (mode))
3965 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3967 if (GET_CODE (tem) == ABS)
3968 return const_true_rtx;
3979 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3985 return equal ? const_true_rtx : const0_rtx;
3988 return ! equal ? const_true_rtx : const0_rtx;
3991 return op0lt ? const_true_rtx : const0_rtx;
3994 return op1lt ? const_true_rtx : const0_rtx;
3996 return op0ltu ? const_true_rtx : const0_rtx;
3998 return op1ltu ? const_true_rtx : const0_rtx;
4001 return equal || op0lt ? const_true_rtx : const0_rtx;
4004 return equal || op1lt ? const_true_rtx : const0_rtx;
4006 return equal || op0ltu ? const_true_rtx : const0_rtx;
4008 return equal || op1ltu ? const_true_rtx : const0_rtx;
4010 return const_true_rtx;
4018 /* Simplify CODE, an operation with result mode MODE and three operands,
4019 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4020 a constant. Return 0 if no simplifications is possible. */
4023 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4024 enum machine_mode op0_mode, rtx op0, rtx op1,
4027 unsigned int width = GET_MODE_BITSIZE (mode);
4029 /* VOIDmode means "infinite" precision. */
4031 width = HOST_BITS_PER_WIDE_INT;
4037 if (GET_CODE (op0) == CONST_INT
4038 && GET_CODE (op1) == CONST_INT
4039 && GET_CODE (op2) == CONST_INT
4040 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4041 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4043 /* Extracting a bit-field from a constant */
4044 HOST_WIDE_INT val = INTVAL (op0);
4046 if (BITS_BIG_ENDIAN)
4047 val >>= (GET_MODE_BITSIZE (op0_mode)
4048 - INTVAL (op2) - INTVAL (op1));
4050 val >>= INTVAL (op2);
4052 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4054 /* First zero-extend. */
4055 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4056 /* If desired, propagate sign bit. */
4057 if (code == SIGN_EXTRACT
4058 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4059 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4062 /* Clear the bits that don't belong in our mode,
4063 unless they and our sign bit are all one.
4064 So we get either a reasonable negative value or a reasonable
4065 unsigned value for this mode. */
4066 if (width < HOST_BITS_PER_WIDE_INT
4067 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4068 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4069 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4071 return gen_int_mode (val, mode);
4076 if (GET_CODE (op0) == CONST_INT)
4077 return op0 != const0_rtx ? op1 : op2;
4079 /* Convert c ? a : a into "a". */
4080 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4083 /* Convert a != b ? a : b into "a". */
4084 if (GET_CODE (op0) == NE
4085 && ! side_effects_p (op0)
4086 && ! HONOR_NANS (mode)
4087 && ! HONOR_SIGNED_ZEROS (mode)
4088 && ((rtx_equal_p (XEXP (op0, 0), op1)
4089 && rtx_equal_p (XEXP (op0, 1), op2))
4090 || (rtx_equal_p (XEXP (op0, 0), op2)
4091 && rtx_equal_p (XEXP (op0, 1), op1))))
4094 /* Convert a == b ? a : b into "b". */
4095 if (GET_CODE (op0) == EQ
4096 && ! side_effects_p (op0)
4097 && ! HONOR_NANS (mode)
4098 && ! HONOR_SIGNED_ZEROS (mode)
4099 && ((rtx_equal_p (XEXP (op0, 0), op1)
4100 && rtx_equal_p (XEXP (op0, 1), op2))
4101 || (rtx_equal_p (XEXP (op0, 0), op2)
4102 && rtx_equal_p (XEXP (op0, 1), op1))))
4105 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4107 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4108 ? GET_MODE (XEXP (op0, 1))
4109 : GET_MODE (XEXP (op0, 0)));
4112 /* Look for happy constants in op1 and op2. */
4113 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4115 HOST_WIDE_INT t = INTVAL (op1);
4116 HOST_WIDE_INT f = INTVAL (op2);
4118 if (t == STORE_FLAG_VALUE && f == 0)
4119 code = GET_CODE (op0);
4120 else if (t == 0 && f == STORE_FLAG_VALUE)
4123 tmp = reversed_comparison_code (op0, NULL_RTX);
4131 return simplify_gen_relational (code, mode, cmp_mode,
4132 XEXP (op0, 0), XEXP (op0, 1));
4135 if (cmp_mode == VOIDmode)
4136 cmp_mode = op0_mode;
4137 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4138 cmp_mode, XEXP (op0, 0),
4141 /* See if any simplifications were possible. */
4144 if (GET_CODE (temp) == CONST_INT)
4145 return temp == const0_rtx ? op2 : op1;
4147 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4153 gcc_assert (GET_MODE (op0) == mode);
4154 gcc_assert (GET_MODE (op1) == mode);
4155 gcc_assert (VECTOR_MODE_P (mode));
4156 op2 = avoid_constant_pool_reference (op2);
4157 if (GET_CODE (op2) == CONST_INT)
4159 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4160 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4161 int mask = (1 << n_elts) - 1;
4163 if (!(INTVAL (op2) & mask))
4165 if ((INTVAL (op2) & mask) == mask)
4168 op0 = avoid_constant_pool_reference (op0);
4169 op1 = avoid_constant_pool_reference (op1);
4170 if (GET_CODE (op0) == CONST_VECTOR
4171 && GET_CODE (op1) == CONST_VECTOR)
4173 rtvec v = rtvec_alloc (n_elts);
4176 for (i = 0; i < n_elts; i++)
4177 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4178 ? CONST_VECTOR_ELT (op0, i)
4179 : CONST_VECTOR_ELT (op1, i));
4180 return gen_rtx_CONST_VECTOR (mode, v);
4192 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4193 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4195 Works by unpacking OP into a collection of 8-bit values
4196 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4197 and then repacking them again for OUTERMODE. */
4200 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4201 enum machine_mode innermode, unsigned int byte)
4203 /* We support up to 512-bit values (for V8DFmode). */
4207 value_mask = (1 << value_bit) - 1
4209 unsigned char value[max_bitsize / value_bit];
4218 rtvec result_v = NULL;
4219 enum mode_class outer_class;
4220 enum machine_mode outer_submode;
4222 /* Some ports misuse CCmode. */
4223 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4226 /* We have no way to represent a complex constant at the rtl level. */
4227 if (COMPLEX_MODE_P (outermode))
4230 /* Unpack the value. */
4232 if (GET_CODE (op) == CONST_VECTOR)
4234 num_elem = CONST_VECTOR_NUNITS (op);
4235 elems = &CONST_VECTOR_ELT (op, 0);
4236 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4242 elem_bitsize = max_bitsize;
4244 /* If this asserts, it is too complicated; reducing value_bit may help. */
4245 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4246 /* I don't know how to handle endianness of sub-units. */
4247 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4249 for (elem = 0; elem < num_elem; elem++)
4252 rtx el = elems[elem];
4254 /* Vectors are kept in target memory order. (This is probably
4257 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4258 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4260 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4261 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4262 unsigned bytele = (subword_byte % UNITS_PER_WORD
4263 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4264 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4267 switch (GET_CODE (el))
4271 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4273 *vp++ = INTVAL (el) >> i;
4274 /* CONST_INTs are always logically sign-extended. */
4275 for (; i < elem_bitsize; i += value_bit)
4276 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4280 if (GET_MODE (el) == VOIDmode)
4282 /* If this triggers, someone should have generated a
4283 CONST_INT instead. */
4284 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4286 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4287 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4288 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4291 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4294 /* It shouldn't matter what's done here, so fill it with
4296 for (; i < elem_bitsize; i += value_bit)
4301 long tmp[max_bitsize / 32];
4302 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4304 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4305 gcc_assert (bitsize <= elem_bitsize);
4306 gcc_assert (bitsize % value_bit == 0);
4308 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4311 /* real_to_target produces its result in words affected by
4312 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4313 and use WORDS_BIG_ENDIAN instead; see the documentation
4314 of SUBREG in rtl.texi. */
4315 for (i = 0; i < bitsize; i += value_bit)
4318 if (WORDS_BIG_ENDIAN)
4319 ibase = bitsize - 1 - i;
4322 *vp++ = tmp[ibase / 32] >> i % 32;
4325 /* It shouldn't matter what's done here, so fill it with
4327 for (; i < elem_bitsize; i += value_bit)
4337 /* Now, pick the right byte to start with. */
4338 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4339 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4340 will already have offset 0. */
4341 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4343 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4345 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4346 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4347 byte = (subword_byte % UNITS_PER_WORD
4348 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4351 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4352 so if it's become negative it will instead be very large.) */
4353 gcc_assert (byte < GET_MODE_SIZE (innermode));
4355 /* Convert from bytes to chunks of size value_bit. */
4356 value_start = byte * (BITS_PER_UNIT / value_bit);
4358 /* Re-pack the value. */
4360 if (VECTOR_MODE_P (outermode))
4362 num_elem = GET_MODE_NUNITS (outermode);
4363 result_v = rtvec_alloc (num_elem);
4364 elems = &RTVEC_ELT (result_v, 0);
4365 outer_submode = GET_MODE_INNER (outermode);
4371 outer_submode = outermode;
4374 outer_class = GET_MODE_CLASS (outer_submode);
4375 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4377 gcc_assert (elem_bitsize % value_bit == 0);
4378 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4380 for (elem = 0; elem < num_elem; elem++)
4384 /* Vectors are stored in target memory order. (This is probably
4387 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4388 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4390 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4391 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4392 unsigned bytele = (subword_byte % UNITS_PER_WORD
4393 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4394 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4397 switch (outer_class)
4400 case MODE_PARTIAL_INT:
4402 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4405 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4407 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4408 for (; i < elem_bitsize; i += value_bit)
4409 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4410 << (i - HOST_BITS_PER_WIDE_INT));
4412 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4414 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4415 elems[elem] = gen_int_mode (lo, outer_submode);
4416 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4417 elems[elem] = immed_double_const (lo, hi, outer_submode);
4424 case MODE_DECIMAL_FLOAT:
4427 long tmp[max_bitsize / 32];
4429 /* real_from_target wants its input in words affected by
4430 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4431 and use WORDS_BIG_ENDIAN instead; see the documentation
4432 of SUBREG in rtl.texi. */
4433 for (i = 0; i < max_bitsize / 32; i++)
4435 for (i = 0; i < elem_bitsize; i += value_bit)
4438 if (WORDS_BIG_ENDIAN)
4439 ibase = elem_bitsize - 1 - i;
4442 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4445 real_from_target (&r, tmp, outer_submode);
4446 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4454 if (VECTOR_MODE_P (outermode))
4455 return gen_rtx_CONST_VECTOR (outermode, result_v);
4460 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4461 Return 0 if no simplifications are possible. */
4463 simplify_subreg (enum machine_mode outermode, rtx op,
4464 enum machine_mode innermode, unsigned int byte)
4466 /* Little bit of sanity checking. */
4467 gcc_assert (innermode != VOIDmode);
4468 gcc_assert (outermode != VOIDmode);
4469 gcc_assert (innermode != BLKmode);
4470 gcc_assert (outermode != BLKmode);
4472 gcc_assert (GET_MODE (op) == innermode
4473 || GET_MODE (op) == VOIDmode);
4475 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4476 gcc_assert (byte < GET_MODE_SIZE (innermode));
4478 if (outermode == innermode && !byte)
4481 if (GET_CODE (op) == CONST_INT
4482 || GET_CODE (op) == CONST_DOUBLE
4483 || GET_CODE (op) == CONST_VECTOR)
4484 return simplify_immed_subreg (outermode, op, innermode, byte);
4486 /* Changing mode twice with SUBREG => just change it once,
4487 or not at all if changing back op starting mode. */
4488 if (GET_CODE (op) == SUBREG)
4490 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4491 int final_offset = byte + SUBREG_BYTE (op);
4494 if (outermode == innermostmode
4495 && byte == 0 && SUBREG_BYTE (op) == 0)
4496 return SUBREG_REG (op);
4498 /* The SUBREG_BYTE represents offset, as if the value were stored
4499 in memory. Irritating exception is paradoxical subreg, where
4500 we define SUBREG_BYTE to be 0. On big endian machines, this
4501 value should be negative. For a moment, undo this exception. */
4502 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4504 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4505 if (WORDS_BIG_ENDIAN)
4506 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4507 if (BYTES_BIG_ENDIAN)
4508 final_offset += difference % UNITS_PER_WORD;
4510 if (SUBREG_BYTE (op) == 0
4511 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4513 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4514 if (WORDS_BIG_ENDIAN)
4515 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4516 if (BYTES_BIG_ENDIAN)
4517 final_offset += difference % UNITS_PER_WORD;
4520 /* See whether resulting subreg will be paradoxical. */
4521 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4523 /* In nonparadoxical subregs we can't handle negative offsets. */
4524 if (final_offset < 0)
4526 /* Bail out in case resulting subreg would be incorrect. */
4527 if (final_offset % GET_MODE_SIZE (outermode)
4528 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4534 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4536 /* In paradoxical subreg, see if we are still looking on lower part.
4537 If so, our SUBREG_BYTE will be 0. */
4538 if (WORDS_BIG_ENDIAN)
4539 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4540 if (BYTES_BIG_ENDIAN)
4541 offset += difference % UNITS_PER_WORD;
4542 if (offset == final_offset)
4548 /* Recurse for further possible simplifications. */
4549 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4553 if (validate_subreg (outermode, innermostmode,
4554 SUBREG_REG (op), final_offset))
4555 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4559 /* Merge implicit and explicit truncations. */
4561 if (GET_CODE (op) == TRUNCATE
4562 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4563 && subreg_lowpart_offset (outermode, innermode) == byte)
4564 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4565 GET_MODE (XEXP (op, 0)));
4567 /* SUBREG of a hard register => just change the register number
4568 and/or mode. If the hard register is not valid in that mode,
4569 suppress this simplification. If the hard register is the stack,
4570 frame, or argument pointer, leave this as a SUBREG. */
4573 && REGNO (op) < FIRST_PSEUDO_REGISTER
4574 #ifdef CANNOT_CHANGE_MODE_CLASS
4575 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4576 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4577 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4579 && ((reload_completed && !frame_pointer_needed)
4580 || (REGNO (op) != FRAME_POINTER_REGNUM
4581 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4582 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4585 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4586 && REGNO (op) != ARG_POINTER_REGNUM
4588 && REGNO (op) != STACK_POINTER_REGNUM
4589 && subreg_offset_representable_p (REGNO (op), innermode,
4592 unsigned int regno = REGNO (op);
4593 unsigned int final_regno
4594 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4596 /* ??? We do allow it if the current REG is not valid for
4597 its mode. This is a kludge to work around how float/complex
4598 arguments are passed on 32-bit SPARC and should be fixed. */
4599 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4600 || ! HARD_REGNO_MODE_OK (regno, innermode))
4603 int final_offset = byte;
4605 /* Adjust offset for paradoxical subregs. */
4607 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4609 int difference = (GET_MODE_SIZE (innermode)
4610 - GET_MODE_SIZE (outermode));
4611 if (WORDS_BIG_ENDIAN)
4612 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4613 if (BYTES_BIG_ENDIAN)
4614 final_offset += difference % UNITS_PER_WORD;
4617 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4619 /* Propagate original regno. We don't have any way to specify
4620 the offset inside original regno, so do so only for lowpart.
4621 The information is used only by alias analysis that can not
4622 grog partial register anyway. */
4624 if (subreg_lowpart_offset (outermode, innermode) == byte)
4625 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4630 /* If we have a SUBREG of a register that we are replacing and we are
4631 replacing it with a MEM, make a new MEM and try replacing the
4632 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4633 or if we would be widening it. */
4636 && ! mode_dependent_address_p (XEXP (op, 0))
4637 /* Allow splitting of volatile memory references in case we don't
4638 have instruction to move the whole thing. */
4639 && (! MEM_VOLATILE_P (op)
4640 || ! have_insn_for (SET, innermode))
4641 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4642 return adjust_address_nv (op, outermode, byte);
4644 /* Handle complex values represented as CONCAT
4645 of real and imaginary part. */
4646 if (GET_CODE (op) == CONCAT)
4648 unsigned int inner_size, final_offset;
4651 inner_size = GET_MODE_UNIT_SIZE (innermode);
4652 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4653 final_offset = byte % inner_size;
4654 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4657 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4660 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4661 return gen_rtx_SUBREG (outermode, part, final_offset);
4665 /* Optimize SUBREG truncations of zero and sign extended values. */
4666 if ((GET_CODE (op) == ZERO_EXTEND
4667 || GET_CODE (op) == SIGN_EXTEND)
4668 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4670 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4672 /* If we're requesting the lowpart of a zero or sign extension,
4673 there are three possibilities. If the outermode is the same
4674 as the origmode, we can omit both the extension and the subreg.
4675 If the outermode is not larger than the origmode, we can apply
4676 the truncation without the extension. Finally, if the outermode
4677 is larger than the origmode, but both are integer modes, we
4678 can just extend to the appropriate mode. */
4681 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4682 if (outermode == origmode)
4683 return XEXP (op, 0);
4684 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4685 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4686 subreg_lowpart_offset (outermode,
4688 if (SCALAR_INT_MODE_P (outermode))
4689 return simplify_gen_unary (GET_CODE (op), outermode,
4690 XEXP (op, 0), origmode);
4693 /* A SUBREG resulting from a zero extension may fold to zero if
4694 it extracts higher bits that the ZERO_EXTEND's source bits. */
4695 if (GET_CODE (op) == ZERO_EXTEND
4696 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4697 return CONST0_RTX (outermode);
4700 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4701 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4702 the outer subreg is effectively a truncation to the original mode. */
4703 if ((GET_CODE (op) == LSHIFTRT
4704 || GET_CODE (op) == ASHIFTRT)
4705 && SCALAR_INT_MODE_P (outermode)
4706 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4707 to avoid the possibility that an outer LSHIFTRT shifts by more
4708 than the sign extension's sign_bit_copies and introduces zeros
4709 into the high bits of the result. */
4710 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4711 && GET_CODE (XEXP (op, 1)) == CONST_INT
4712 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4713 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4714 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4715 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4716 return simplify_gen_binary (ASHIFTRT, outermode,
4717 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4719 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4720 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4721 the outer subreg is effectively a truncation to the original mode. */
4722 if ((GET_CODE (op) == LSHIFTRT
4723 || GET_CODE (op) == ASHIFTRT)
4724 && SCALAR_INT_MODE_P (outermode)
4725 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4726 && GET_CODE (XEXP (op, 1)) == CONST_INT
4727 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4728 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4729 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4730 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4731 return simplify_gen_binary (LSHIFTRT, outermode,
4732 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4734 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4735 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4736 the outer subreg is effectively a truncation to the original mode. */
4737 if (GET_CODE (op) == ASHIFT
4738 && SCALAR_INT_MODE_P (outermode)
4739 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4740 && GET_CODE (XEXP (op, 1)) == CONST_INT
4741 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4742 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4743 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4744 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4745 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4746 return simplify_gen_binary (ASHIFT, outermode,
4747 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4752 /* Make a SUBREG operation or equivalent if it folds. */
4755 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4756 enum machine_mode innermode, unsigned int byte)
4760 newx = simplify_subreg (outermode, op, innermode, byte);
4764 if (GET_CODE (op) == SUBREG
4765 || GET_CODE (op) == CONCAT
4766 || GET_MODE (op) == VOIDmode)
4769 if (validate_subreg (outermode, innermode, op, byte))
4770 return gen_rtx_SUBREG (outermode, op, byte);
4775 /* Simplify X, an rtx expression.
4777 Return the simplified expression or NULL if no simplifications
4780 This is the preferred entry point into the simplification routines;
4781 however, we still allow passes to call the more specific routines.
4783 Right now GCC has three (yes, three) major bodies of RTL simplification
4784 code that need to be unified.
4786 1. fold_rtx in cse.c. This code uses various CSE specific
4787 information to aid in RTL simplification.
4789 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4790 it uses combine specific information to aid in RTL
4793 3. The routines in this file.
4796 Long term we want to only have one body of simplification code; to
4797 get to that state I recommend the following steps:
4799 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4800 which are not pass dependent state into these routines.
4802 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4803 use this routine whenever possible.
4805 3. Allow for pass dependent state to be provided to these
4806 routines and add simplifications based on the pass dependent
4807 state. Remove code from cse.c & combine.c that becomes
4810 It will take time, but ultimately the compiler will be easier to
4811 maintain and improve. It's totally silly that when we add a
4812 simplification that it needs to be added to 4 places (3 for RTL
4813 simplification and 1 for tree simplification. */
4816 simplify_rtx (rtx x)
4818 enum rtx_code code = GET_CODE (x);
4819 enum machine_mode mode = GET_MODE (x);
4821 switch (GET_RTX_CLASS (code))
4824 return simplify_unary_operation (code, mode,
4825 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4826 case RTX_COMM_ARITH:
4827 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4828 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4830 /* Fall through.... */
4833 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4836 case RTX_BITFIELD_OPS:
4837 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4838 XEXP (x, 0), XEXP (x, 1),
4842 case RTX_COMM_COMPARE:
4843 return simplify_relational_operation (code, mode,
4844 ((GET_MODE (XEXP (x, 0))
4846 ? GET_MODE (XEXP (x, 0))
4847 : GET_MODE (XEXP (x, 1))),
4853 return simplify_gen_subreg (mode, SUBREG_REG (x),
4854 GET_MODE (SUBREG_REG (x)),
4861 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4862 if (GET_CODE (XEXP (x, 0)) == HIGH
4863 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))