1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 if (GET_MODE (x) == BLKmode)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
218 return gen_rtx_fmt_e (code, mode, op);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
271 switch (GET_RTX_CLASS (code))
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
279 return simplify_gen_unary (code, mode, op0, op_mode);
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return simplify_gen_binary (code, mode, op0, op1);
290 case RTX_COMM_COMPARE:
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
301 case RTX_BITFIELD_OPS:
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
314 /* The only case we try to handle is a SUBREG. */
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
323 return op0 ? op0 : x;
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
333 return replace_equiv_address_nv (x, op0);
335 else if (code == LO_SUM)
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
346 return gen_rtx_LO_SUM (mode, op0, op1);
348 else if (code == REG)
350 if (rtx_equal_p (x, old_rtx))
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
370 if (GET_CODE (op) == CONST)
373 trueop = avoid_constant_pool_reference (op);
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
379 return simplify_unary_operation_1 (code, mode, op);
382 /* Perform some simplifications we can do even if the operands
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
387 enum rtx_code reversed;
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && GET_CODE (XEXP (op, 1)) == CONST_INT
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
492 in2 = in1; in1 = tem;
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
554 if (GET_CODE (op) == ASHIFT)
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && GET_CODE (XEXP (op, 1)) == CONST_INT
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx
587 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
601 else if (STORE_FLAG_VALUE == -1)
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
673 if (DECIMAL_FLOAT_MODE_P (mode))
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
704 && ((unsigned)significand_size (GET_MODE (op))
705 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
706 - num_sign_bit_copies (XEXP (op, 0),
707 GET_MODE (XEXP (op, 0))))))))
708 return simplify_gen_unary (FLOAT, mode,
710 GET_MODE (XEXP (op, 0)));
712 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
713 (OP:SF foo:SF) if OP is NEG or ABS. */
714 if ((GET_CODE (op) == ABS
715 || GET_CODE (op) == NEG)
716 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
717 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
718 return simplify_gen_unary (GET_CODE (op), mode,
719 XEXP (XEXP (op, 0), 0), mode);
721 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
722 is (float_truncate:SF x). */
723 if (GET_CODE (op) == SUBREG
724 && subreg_lowpart_p (op)
725 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
726 return SUBREG_REG (op);
730 if (DECIMAL_FLOAT_MODE_P (mode))
733 /* (float_extend (float_extend x)) is (float_extend x)
735 (float_extend (float x)) is (float x) assuming that double
736 rounding can't happen.
738 if (GET_CODE (op) == FLOAT_EXTEND
739 || (GET_CODE (op) == FLOAT
740 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
741 && ((unsigned)significand_size (GET_MODE (op))
742 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
743 - num_sign_bit_copies (XEXP (op, 0),
744 GET_MODE (XEXP (op, 0)))))))
745 return simplify_gen_unary (GET_CODE (op), mode,
747 GET_MODE (XEXP (op, 0)));
752 /* (abs (neg <foo>)) -> (abs <foo>) */
753 if (GET_CODE (op) == NEG)
754 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
755 GET_MODE (XEXP (op, 0)));
757 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
759 if (GET_MODE (op) == VOIDmode)
762 /* If operand is something known to be positive, ignore the ABS. */
763 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
764 || ((GET_MODE_BITSIZE (GET_MODE (op))
765 <= HOST_BITS_PER_WIDE_INT)
766 && ((nonzero_bits (op, GET_MODE (op))
768 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
772 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
773 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
774 return gen_rtx_NEG (mode, op);
779 /* (ffs (*_extend <X>)) = (ffs <X>) */
780 if (GET_CODE (op) == SIGN_EXTEND
781 || GET_CODE (op) == ZERO_EXTEND)
782 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
783 GET_MODE (XEXP (op, 0)));
787 switch (GET_CODE (op))
791 /* (popcount (zero_extend <X>)) = (popcount <X>) */
792 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
793 GET_MODE (XEXP (op, 0)));
797 /* Rotations don't affect popcount. */
798 if (!side_effects_p (XEXP (op, 1)))
799 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
800 GET_MODE (XEXP (op, 0)));
809 switch (GET_CODE (op))
815 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
816 GET_MODE (XEXP (op, 0)));
820 /* Rotations don't affect parity. */
821 if (!side_effects_p (XEXP (op, 1)))
822 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
823 GET_MODE (XEXP (op, 0)));
832 /* (bswap (bswap x)) -> x. */
833 if (GET_CODE (op) == BSWAP)
838 /* (float (sign_extend <X>)) = (float <X>). */
839 if (GET_CODE (op) == SIGN_EXTEND)
840 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
841 GET_MODE (XEXP (op, 0)));
845 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
846 becomes just the MINUS if its mode is MODE. This allows
847 folding switch statements on machines using casesi (such as
849 if (GET_CODE (op) == TRUNCATE
850 && GET_MODE (XEXP (op, 0)) == mode
851 && GET_CODE (XEXP (op, 0)) == MINUS
852 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
853 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
856 /* Check for a sign extension of a subreg of a promoted
857 variable, where the promotion is sign-extended, and the
858 target mode is the same as the variable's promotion. */
859 if (GET_CODE (op) == SUBREG
860 && SUBREG_PROMOTED_VAR_P (op)
861 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
862 && GET_MODE (XEXP (op, 0)) == mode)
865 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
866 if (! POINTERS_EXTEND_UNSIGNED
867 && mode == Pmode && GET_MODE (op) == ptr_mode
869 || (GET_CODE (op) == SUBREG
870 && REG_P (SUBREG_REG (op))
871 && REG_POINTER (SUBREG_REG (op))
872 && GET_MODE (SUBREG_REG (op)) == Pmode)))
873 return convert_memory_address (Pmode, op);
878 /* Check for a zero extension of a subreg of a promoted
879 variable, where the promotion is zero-extended, and the
880 target mode is the same as the variable's promotion. */
881 if (GET_CODE (op) == SUBREG
882 && SUBREG_PROMOTED_VAR_P (op)
883 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
884 && GET_MODE (XEXP (op, 0)) == mode)
887 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
888 if (POINTERS_EXTEND_UNSIGNED > 0
889 && mode == Pmode && GET_MODE (op) == ptr_mode
891 || (GET_CODE (op) == SUBREG
892 && REG_P (SUBREG_REG (op))
893 && REG_POINTER (SUBREG_REG (op))
894 && GET_MODE (SUBREG_REG (op)) == Pmode)))
895 return convert_memory_address (Pmode, op);
906 /* Try to compute the value of a unary operation CODE whose output mode is to
907 be MODE with input operand OP whose mode was originally OP_MODE.
908 Return zero if the value cannot be computed. */
910 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
911 rtx op, enum machine_mode op_mode)
913 unsigned int width = GET_MODE_BITSIZE (mode);
915 if (code == VEC_DUPLICATE)
917 gcc_assert (VECTOR_MODE_P (mode));
918 if (GET_MODE (op) != VOIDmode)
920 if (!VECTOR_MODE_P (GET_MODE (op)))
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
923 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
926 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
927 || GET_CODE (op) == CONST_VECTOR)
929 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
930 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
931 rtvec v = rtvec_alloc (n_elts);
934 if (GET_CODE (op) != CONST_VECTOR)
935 for (i = 0; i < n_elts; i++)
936 RTVEC_ELT (v, i) = op;
939 enum machine_mode inmode = GET_MODE (op);
940 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
941 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
943 gcc_assert (in_n_elts < n_elts);
944 gcc_assert ((n_elts % in_n_elts) == 0);
945 for (i = 0; i < n_elts; i++)
946 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
948 return gen_rtx_CONST_VECTOR (mode, v);
952 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
954 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
955 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
956 enum machine_mode opmode = GET_MODE (op);
957 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
958 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
959 rtvec v = rtvec_alloc (n_elts);
962 gcc_assert (op_n_elts == n_elts);
963 for (i = 0; i < n_elts; i++)
965 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
966 CONST_VECTOR_ELT (op, i),
967 GET_MODE_INNER (opmode));
970 RTVEC_ELT (v, i) = x;
972 return gen_rtx_CONST_VECTOR (mode, v);
975 /* The order of these tests is critical so that, for example, we don't
976 check the wrong mode (input vs. output) for a conversion operation,
977 such as FIX. At some point, this should be simplified. */
979 if (code == FLOAT && GET_MODE (op) == VOIDmode
980 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
982 HOST_WIDE_INT hv, lv;
985 if (GET_CODE (op) == CONST_INT)
986 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
988 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
990 REAL_VALUE_FROM_INT (d, lv, hv, mode);
991 d = real_value_truncate (mode, d);
992 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
994 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
995 && (GET_CODE (op) == CONST_DOUBLE
996 || GET_CODE (op) == CONST_INT))
998 HOST_WIDE_INT hv, lv;
1001 if (GET_CODE (op) == CONST_INT)
1002 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1004 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1006 if (op_mode == VOIDmode)
1008 /* We don't know how to interpret negative-looking numbers in
1009 this case, so don't try to fold those. */
1013 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1016 hv = 0, lv &= GET_MODE_MASK (op_mode);
1018 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1019 d = real_value_truncate (mode, d);
1020 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1023 if (GET_CODE (op) == CONST_INT
1024 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1026 HOST_WIDE_INT arg0 = INTVAL (op);
1040 val = (arg0 >= 0 ? arg0 : - arg0);
1044 /* Don't use ffs here. Instead, get low order bit and then its
1045 number. If arg0 is zero, this will return 0, as desired. */
1046 arg0 &= GET_MODE_MASK (mode);
1047 val = exact_log2 (arg0 & (- arg0)) + 1;
1051 arg0 &= GET_MODE_MASK (mode);
1052 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1055 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1059 arg0 &= GET_MODE_MASK (mode);
1062 /* Even if the value at zero is undefined, we have to come
1063 up with some replacement. Seems good enough. */
1064 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1065 val = GET_MODE_BITSIZE (mode);
1068 val = exact_log2 (arg0 & -arg0);
1072 arg0 &= GET_MODE_MASK (mode);
1075 val++, arg0 &= arg0 - 1;
1079 arg0 &= GET_MODE_MASK (mode);
1082 val++, arg0 &= arg0 - 1;
1091 for (s = 0; s < width; s += 8)
1093 unsigned int d = width - s - 8;
1094 unsigned HOST_WIDE_INT byte;
1095 byte = (arg0 >> s) & 0xff;
1106 /* When zero-extending a CONST_INT, we need to know its
1108 gcc_assert (op_mode != VOIDmode);
1109 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1111 /* If we were really extending the mode,
1112 we would have to distinguish between zero-extension
1113 and sign-extension. */
1114 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1117 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1118 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1124 if (op_mode == VOIDmode)
1126 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1128 /* If we were really extending the mode,
1129 we would have to distinguish between zero-extension
1130 and sign-extension. */
1131 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1134 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1137 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1139 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1140 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1148 case FLOAT_TRUNCATE:
1158 return gen_int_mode (val, mode);
1161 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1162 for a DImode operation on a CONST_INT. */
1163 else if (GET_MODE (op) == VOIDmode
1164 && width <= HOST_BITS_PER_WIDE_INT * 2
1165 && (GET_CODE (op) == CONST_DOUBLE
1166 || GET_CODE (op) == CONST_INT))
1168 unsigned HOST_WIDE_INT l1, lv;
1169 HOST_WIDE_INT h1, hv;
1171 if (GET_CODE (op) == CONST_DOUBLE)
1172 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1174 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1184 neg_double (l1, h1, &lv, &hv);
1189 neg_double (l1, h1, &lv, &hv);
1201 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1204 lv = exact_log2 (l1 & -l1) + 1;
1210 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1211 - HOST_BITS_PER_WIDE_INT;
1213 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1214 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1215 lv = GET_MODE_BITSIZE (mode);
1221 lv = exact_log2 (l1 & -l1);
1223 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1224 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1225 lv = GET_MODE_BITSIZE (mode);
1253 for (s = 0; s < width; s += 8)
1255 unsigned int d = width - s - 8;
1256 unsigned HOST_WIDE_INT byte;
1258 if (s < HOST_BITS_PER_WIDE_INT)
1259 byte = (l1 >> s) & 0xff;
1261 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1263 if (d < HOST_BITS_PER_WIDE_INT)
1266 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1272 /* This is just a change-of-mode, so do nothing. */
1277 gcc_assert (op_mode != VOIDmode);
1279 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1283 lv = l1 & GET_MODE_MASK (op_mode);
1287 if (op_mode == VOIDmode
1288 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1292 lv = l1 & GET_MODE_MASK (op_mode);
1293 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1294 && (lv & ((HOST_WIDE_INT) 1
1295 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1296 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1298 hv = HWI_SIGN_EXTEND (lv);
1309 return immed_double_const (lv, hv, mode);
1312 else if (GET_CODE (op) == CONST_DOUBLE
1313 && SCALAR_FLOAT_MODE_P (mode))
1315 REAL_VALUE_TYPE d, t;
1316 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1321 if (HONOR_SNANS (mode) && real_isnan (&d))
1323 real_sqrt (&t, mode, &d);
1327 d = REAL_VALUE_ABS (d);
1330 d = REAL_VALUE_NEGATE (d);
1332 case FLOAT_TRUNCATE:
1333 d = real_value_truncate (mode, d);
1336 /* All this does is change the mode. */
1339 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1346 real_to_target (tmp, &d, GET_MODE (op));
1347 for (i = 0; i < 4; i++)
1349 real_from_target (&d, tmp, mode);
1355 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1358 else if (GET_CODE (op) == CONST_DOUBLE
1359 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1360 && GET_MODE_CLASS (mode) == MODE_INT
1361 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1363 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1364 operators are intentionally left unspecified (to ease implementation
1365 by target backends), for consistency, this routine implements the
1366 same semantics for constant folding as used by the middle-end. */
1368 /* This was formerly used only for non-IEEE float.
1369 eggert@twinsun.com says it is safe for IEEE also. */
1370 HOST_WIDE_INT xh, xl, th, tl;
1371 REAL_VALUE_TYPE x, t;
1372 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1376 if (REAL_VALUE_ISNAN (x))
1379 /* Test against the signed upper bound. */
1380 if (width > HOST_BITS_PER_WIDE_INT)
1382 th = ((unsigned HOST_WIDE_INT) 1
1383 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1389 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1391 real_from_integer (&t, VOIDmode, tl, th, 0);
1392 if (REAL_VALUES_LESS (t, x))
1399 /* Test against the signed lower bound. */
1400 if (width > HOST_BITS_PER_WIDE_INT)
1402 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1408 tl = (HOST_WIDE_INT) -1 << (width - 1);
1410 real_from_integer (&t, VOIDmode, tl, th, 0);
1411 if (REAL_VALUES_LESS (x, t))
1417 REAL_VALUE_TO_INT (&xl, &xh, x);
1421 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1424 /* Test against the unsigned upper bound. */
1425 if (width == 2*HOST_BITS_PER_WIDE_INT)
1430 else if (width >= HOST_BITS_PER_WIDE_INT)
1432 th = ((unsigned HOST_WIDE_INT) 1
1433 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1439 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1441 real_from_integer (&t, VOIDmode, tl, th, 1);
1442 if (REAL_VALUES_LESS (t, x))
1449 REAL_VALUE_TO_INT (&xl, &xh, x);
1455 return immed_double_const (xl, xh, mode);
1461 /* Subroutine of simplify_binary_operation to simplify a commutative,
1462 associative binary operation CODE with result mode MODE, operating
1463 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1464 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1465 canonicalization is possible. */
1468 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1473 /* Linearize the operator to the left. */
1474 if (GET_CODE (op1) == code)
1476 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1477 if (GET_CODE (op0) == code)
1479 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1480 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1483 /* "a op (b op c)" becomes "(b op c) op a". */
1484 if (! swap_commutative_operands_p (op1, op0))
1485 return simplify_gen_binary (code, mode, op1, op0);
1492 if (GET_CODE (op0) == code)
1494 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1495 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1497 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1498 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1501 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1502 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1504 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1506 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1507 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1509 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1516 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1517 and OP1. Return 0 if no simplification is possible.
1519 Don't use this for relational operations such as EQ or LT.
1520 Use simplify_relational_operation instead. */
1522 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1525 rtx trueop0, trueop1;
1528 /* Relational operations don't work here. We must know the mode
1529 of the operands in order to do the comparison correctly.
1530 Assuming a full word can give incorrect results.
1531 Consider comparing 128 with -128 in QImode. */
1532 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1533 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1535 /* Make sure the constant is second. */
1536 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1537 && swap_commutative_operands_p (op0, op1))
1539 tem = op0, op0 = op1, op1 = tem;
1542 trueop0 = avoid_constant_pool_reference (op0);
1543 trueop1 = avoid_constant_pool_reference (op1);
1545 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1548 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1551 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1552 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1553 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1554 actual constants. */
1557 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1558 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1560 rtx tem, reversed, opleft, opright;
1562 unsigned int width = GET_MODE_BITSIZE (mode);
1564 /* Even if we can't compute a constant result,
1565 there are some cases worth simplifying. */
1570 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1571 when x is NaN, infinite, or finite and nonzero. They aren't
1572 when x is -0 and the rounding mode is not towards -infinity,
1573 since (-0) + 0 is then 0. */
1574 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1577 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1578 transformations are safe even for IEEE. */
1579 if (GET_CODE (op0) == NEG)
1580 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1581 else if (GET_CODE (op1) == NEG)
1582 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1584 /* (~a) + 1 -> -a */
1585 if (INTEGRAL_MODE_P (mode)
1586 && GET_CODE (op0) == NOT
1587 && trueop1 == const1_rtx)
1588 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1590 /* Handle both-operands-constant cases. We can only add
1591 CONST_INTs to constants since the sum of relocatable symbols
1592 can't be handled by most assemblers. Don't add CONST_INT
1593 to CONST_INT since overflow won't be computed properly if wider
1594 than HOST_BITS_PER_WIDE_INT. */
1596 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1597 && GET_CODE (op1) == CONST_INT)
1598 return plus_constant (op0, INTVAL (op1));
1599 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1600 && GET_CODE (op0) == CONST_INT)
1601 return plus_constant (op1, INTVAL (op0));
1603 /* See if this is something like X * C - X or vice versa or
1604 if the multiplication is written as a shift. If so, we can
1605 distribute and make a new multiply, shift, or maybe just
1606 have X (if C is 2 in the example above). But don't make
1607 something more expensive than we had before. */
1609 if (SCALAR_INT_MODE_P (mode))
1611 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1612 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1613 rtx lhs = op0, rhs = op1;
1615 if (GET_CODE (lhs) == NEG)
1619 lhs = XEXP (lhs, 0);
1621 else if (GET_CODE (lhs) == MULT
1622 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1624 coeff0l = INTVAL (XEXP (lhs, 1));
1625 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1626 lhs = XEXP (lhs, 0);
1628 else if (GET_CODE (lhs) == ASHIFT
1629 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1630 && INTVAL (XEXP (lhs, 1)) >= 0
1631 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1633 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1635 lhs = XEXP (lhs, 0);
1638 if (GET_CODE (rhs) == NEG)
1642 rhs = XEXP (rhs, 0);
1644 else if (GET_CODE (rhs) == MULT
1645 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1647 coeff1l = INTVAL (XEXP (rhs, 1));
1648 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1649 rhs = XEXP (rhs, 0);
1651 else if (GET_CODE (rhs) == ASHIFT
1652 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1653 && INTVAL (XEXP (rhs, 1)) >= 0
1654 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1656 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1658 rhs = XEXP (rhs, 0);
1661 if (rtx_equal_p (lhs, rhs))
1663 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1665 unsigned HOST_WIDE_INT l;
1668 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1669 coeff = immed_double_const (l, h, mode);
1671 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1672 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1677 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1678 if ((GET_CODE (op1) == CONST_INT
1679 || GET_CODE (op1) == CONST_DOUBLE)
1680 && GET_CODE (op0) == XOR
1681 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1682 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1683 && mode_signbit_p (mode, op1))
1684 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1685 simplify_gen_binary (XOR, mode, op1,
1688 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1689 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1690 && GET_CODE (op0) == MULT
1691 && GET_CODE (XEXP (op0, 0)) == NEG)
1695 in1 = XEXP (XEXP (op0, 0), 0);
1696 in2 = XEXP (op0, 1);
1697 return simplify_gen_binary (MINUS, mode, op1,
1698 simplify_gen_binary (MULT, mode,
1702 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1703 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1705 if (COMPARISON_P (op0)
1706 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1707 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1708 && (reversed = reversed_comparison (op0, mode)))
1710 simplify_gen_unary (NEG, mode, reversed, mode);
1712 /* If one of the operands is a PLUS or a MINUS, see if we can
1713 simplify this by the associative law.
1714 Don't use the associative law for floating point.
1715 The inaccuracy makes it nonassociative,
1716 and subtle programs can break if operations are associated. */
1718 if (INTEGRAL_MODE_P (mode)
1719 && (plus_minus_operand_p (op0)
1720 || plus_minus_operand_p (op1))
1721 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1724 /* Reassociate floating point addition only when the user
1725 specifies unsafe math optimizations. */
1726 if (FLOAT_MODE_P (mode)
1727 && flag_unsafe_math_optimizations)
1729 tem = simplify_associative_operation (code, mode, op0, op1);
1737 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1738 using cc0, in which case we want to leave it as a COMPARE
1739 so we can distinguish it from a register-register-copy.
1741 In IEEE floating point, x-0 is not the same as x. */
1743 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1744 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1745 && trueop1 == CONST0_RTX (mode))
1749 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1750 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1751 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1752 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1754 rtx xop00 = XEXP (op0, 0);
1755 rtx xop10 = XEXP (op1, 0);
1758 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1760 if (REG_P (xop00) && REG_P (xop10)
1761 && GET_MODE (xop00) == GET_MODE (xop10)
1762 && REGNO (xop00) == REGNO (xop10)
1763 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1764 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1771 /* We can't assume x-x is 0 even with non-IEEE floating point,
1772 but since it is zero except in very strange circumstances, we
1773 will treat it as zero with -ffinite-math-only. */
1774 if (rtx_equal_p (trueop0, trueop1)
1775 && ! side_effects_p (op0)
1776 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1777 return CONST0_RTX (mode);
1779 /* Change subtraction from zero into negation. (0 - x) is the
1780 same as -x when x is NaN, infinite, or finite and nonzero.
1781 But if the mode has signed zeros, and does not round towards
1782 -infinity, then 0 - 0 is 0, not -0. */
1783 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1784 return simplify_gen_unary (NEG, mode, op1, mode);
1786 /* (-1 - a) is ~a. */
1787 if (trueop0 == constm1_rtx)
1788 return simplify_gen_unary (NOT, mode, op1, mode);
1790 /* Subtracting 0 has no effect unless the mode has signed zeros
1791 and supports rounding towards -infinity. In such a case,
1793 if (!(HONOR_SIGNED_ZEROS (mode)
1794 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1795 && trueop1 == CONST0_RTX (mode))
1798 /* See if this is something like X * C - X or vice versa or
1799 if the multiplication is written as a shift. If so, we can
1800 distribute and make a new multiply, shift, or maybe just
1801 have X (if C is 2 in the example above). But don't make
1802 something more expensive than we had before. */
1804 if (SCALAR_INT_MODE_P (mode))
1806 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1807 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1808 rtx lhs = op0, rhs = op1;
1810 if (GET_CODE (lhs) == NEG)
1814 lhs = XEXP (lhs, 0);
1816 else if (GET_CODE (lhs) == MULT
1817 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1819 coeff0l = INTVAL (XEXP (lhs, 1));
1820 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1821 lhs = XEXP (lhs, 0);
1823 else if (GET_CODE (lhs) == ASHIFT
1824 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1825 && INTVAL (XEXP (lhs, 1)) >= 0
1826 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1828 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1830 lhs = XEXP (lhs, 0);
1833 if (GET_CODE (rhs) == NEG)
1837 rhs = XEXP (rhs, 0);
1839 else if (GET_CODE (rhs) == MULT
1840 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1842 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1843 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1844 rhs = XEXP (rhs, 0);
1846 else if (GET_CODE (rhs) == ASHIFT
1847 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1848 && INTVAL (XEXP (rhs, 1)) >= 0
1849 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1851 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1853 rhs = XEXP (rhs, 0);
1856 if (rtx_equal_p (lhs, rhs))
1858 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1860 unsigned HOST_WIDE_INT l;
1863 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1864 coeff = immed_double_const (l, h, mode);
1866 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1867 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1872 /* (a - (-b)) -> (a + b). True even for IEEE. */
1873 if (GET_CODE (op1) == NEG)
1874 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1876 /* (-x - c) may be simplified as (-c - x). */
1877 if (GET_CODE (op0) == NEG
1878 && (GET_CODE (op1) == CONST_INT
1879 || GET_CODE (op1) == CONST_DOUBLE))
1881 tem = simplify_unary_operation (NEG, mode, op1, mode);
1883 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1886 /* Don't let a relocatable value get a negative coeff. */
1887 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1888 return simplify_gen_binary (PLUS, mode,
1890 neg_const_int (mode, op1));
1892 /* (x - (x & y)) -> (x & ~y) */
1893 if (GET_CODE (op1) == AND)
1895 if (rtx_equal_p (op0, XEXP (op1, 0)))
1897 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1898 GET_MODE (XEXP (op1, 1)));
1899 return simplify_gen_binary (AND, mode, op0, tem);
1901 if (rtx_equal_p (op0, XEXP (op1, 1)))
1903 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1904 GET_MODE (XEXP (op1, 0)));
1905 return simplify_gen_binary (AND, mode, op0, tem);
1909 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1910 by reversing the comparison code if valid. */
1911 if (STORE_FLAG_VALUE == 1
1912 && trueop0 == const1_rtx
1913 && COMPARISON_P (op1)
1914 && (reversed = reversed_comparison (op1, mode)))
1917 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1918 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1919 && GET_CODE (op1) == MULT
1920 && GET_CODE (XEXP (op1, 0)) == NEG)
1924 in1 = XEXP (XEXP (op1, 0), 0);
1925 in2 = XEXP (op1, 1);
1926 return simplify_gen_binary (PLUS, mode,
1927 simplify_gen_binary (MULT, mode,
1932 /* Canonicalize (minus (neg A) (mult B C)) to
1933 (minus (mult (neg B) C) A). */
1934 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1935 && GET_CODE (op1) == MULT
1936 && GET_CODE (op0) == NEG)
1940 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1941 in2 = XEXP (op1, 1);
1942 return simplify_gen_binary (MINUS, mode,
1943 simplify_gen_binary (MULT, mode,
1948 /* If one of the operands is a PLUS or a MINUS, see if we can
1949 simplify this by the associative law. This will, for example,
1950 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1951 Don't use the associative law for floating point.
1952 The inaccuracy makes it nonassociative,
1953 and subtle programs can break if operations are associated. */
1955 if (INTEGRAL_MODE_P (mode)
1956 && (plus_minus_operand_p (op0)
1957 || plus_minus_operand_p (op1))
1958 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1963 if (trueop1 == constm1_rtx)
1964 return simplify_gen_unary (NEG, mode, op0, mode);
1966 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1967 x is NaN, since x * 0 is then also NaN. Nor is it valid
1968 when the mode has signed zeros, since multiplying a negative
1969 number by 0 will give -0, not 0. */
1970 if (!HONOR_NANS (mode)
1971 && !HONOR_SIGNED_ZEROS (mode)
1972 && trueop1 == CONST0_RTX (mode)
1973 && ! side_effects_p (op0))
1976 /* In IEEE floating point, x*1 is not equivalent to x for
1978 if (!HONOR_SNANS (mode)
1979 && trueop1 == CONST1_RTX (mode))
1982 /* Convert multiply by constant power of two into shift unless
1983 we are still generating RTL. This test is a kludge. */
1984 if (GET_CODE (trueop1) == CONST_INT
1985 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1986 /* If the mode is larger than the host word size, and the
1987 uppermost bit is set, then this isn't a power of two due
1988 to implicit sign extension. */
1989 && (width <= HOST_BITS_PER_WIDE_INT
1990 || val != HOST_BITS_PER_WIDE_INT - 1))
1991 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1993 /* Likewise for multipliers wider than a word. */
1994 if (GET_CODE (trueop1) == CONST_DOUBLE
1995 && (GET_MODE (trueop1) == VOIDmode
1996 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1997 && GET_MODE (op0) == mode
1998 && CONST_DOUBLE_LOW (trueop1) == 0
1999 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2000 return simplify_gen_binary (ASHIFT, mode, op0,
2001 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2003 /* x*2 is x+x and x*(-1) is -x */
2004 if (GET_CODE (trueop1) == CONST_DOUBLE
2005 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2006 && GET_MODE (op0) == mode)
2009 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2011 if (REAL_VALUES_EQUAL (d, dconst2))
2012 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2014 if (!HONOR_SNANS (mode)
2015 && REAL_VALUES_EQUAL (d, dconstm1))
2016 return simplify_gen_unary (NEG, mode, op0, mode);
2019 /* Optimize -x * -x as x * x. */
2020 if (FLOAT_MODE_P (mode)
2021 && GET_CODE (op0) == NEG
2022 && GET_CODE (op1) == NEG
2023 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2024 && !side_effects_p (XEXP (op0, 0)))
2025 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2027 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2028 if (SCALAR_FLOAT_MODE_P (mode)
2029 && GET_CODE (op0) == ABS
2030 && GET_CODE (op1) == ABS
2031 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2032 && !side_effects_p (XEXP (op0, 0)))
2033 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2035 /* Reassociate multiplication, but for floating point MULTs
2036 only when the user specifies unsafe math optimizations. */
2037 if (! FLOAT_MODE_P (mode)
2038 || flag_unsafe_math_optimizations)
2040 tem = simplify_associative_operation (code, mode, op0, op1);
2047 if (trueop1 == const0_rtx)
2049 if (GET_CODE (trueop1) == CONST_INT
2050 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2051 == GET_MODE_MASK (mode)))
2053 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2055 /* A | (~A) -> -1 */
2056 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2057 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2058 && ! side_effects_p (op0)
2059 && SCALAR_INT_MODE_P (mode))
2062 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2063 if (GET_CODE (op1) == CONST_INT
2064 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2065 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2068 /* Canonicalize (X & C1) | C2. */
2069 if (GET_CODE (op0) == AND
2070 && GET_CODE (trueop1) == CONST_INT
2071 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2073 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2074 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2075 HOST_WIDE_INT c2 = INTVAL (trueop1);
2077 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2079 && !side_effects_p (XEXP (op0, 0)))
2082 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2083 if (((c1|c2) & mask) == mask)
2084 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2086 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2087 if (((c1 & ~c2) & mask) != (c1 & mask))
2089 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2090 gen_int_mode (c1 & ~c2, mode));
2091 return simplify_gen_binary (IOR, mode, tem, op1);
2095 /* Convert (A & B) | A to A. */
2096 if (GET_CODE (op0) == AND
2097 && (rtx_equal_p (XEXP (op0, 0), op1)
2098 || rtx_equal_p (XEXP (op0, 1), op1))
2099 && ! side_effects_p (XEXP (op0, 0))
2100 && ! side_effects_p (XEXP (op0, 1)))
2103 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2104 mode size to (rotate A CX). */
2106 if (GET_CODE (op1) == ASHIFT
2107 || GET_CODE (op1) == SUBREG)
2118 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2119 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2120 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2121 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2122 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2123 == GET_MODE_BITSIZE (mode)))
2124 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2126 /* Same, but for ashift that has been "simplified" to a wider mode
2127 by simplify_shift_const. */
2129 if (GET_CODE (opleft) == SUBREG
2130 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2131 && GET_CODE (opright) == LSHIFTRT
2132 && GET_CODE (XEXP (opright, 0)) == SUBREG
2133 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2134 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2135 && (GET_MODE_SIZE (GET_MODE (opleft))
2136 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2137 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2138 SUBREG_REG (XEXP (opright, 0)))
2139 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2140 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2141 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2142 == GET_MODE_BITSIZE (mode)))
2143 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2144 XEXP (SUBREG_REG (opleft), 1));
2146 /* If we have (ior (and (X C1) C2)), simplify this by making
2147 C1 as small as possible if C1 actually changes. */
2148 if (GET_CODE (op1) == CONST_INT
2149 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2150 || INTVAL (op1) > 0)
2151 && GET_CODE (op0) == AND
2152 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2153 && GET_CODE (op1) == CONST_INT
2154 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2155 return simplify_gen_binary (IOR, mode,
2157 (AND, mode, XEXP (op0, 0),
2158 GEN_INT (INTVAL (XEXP (op0, 1))
2162 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2163 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2164 the PLUS does not affect any of the bits in OP1: then we can do
2165 the IOR as a PLUS and we can associate. This is valid if OP1
2166 can be safely shifted left C bits. */
2167 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2168 && GET_CODE (XEXP (op0, 0)) == PLUS
2169 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2170 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2171 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2173 int count = INTVAL (XEXP (op0, 1));
2174 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2176 if (mask >> count == INTVAL (trueop1)
2177 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2178 return simplify_gen_binary (ASHIFTRT, mode,
2179 plus_constant (XEXP (op0, 0), mask),
2183 tem = simplify_associative_operation (code, mode, op0, op1);
2189 if (trueop1 == const0_rtx)
2191 if (GET_CODE (trueop1) == CONST_INT
2192 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2193 == GET_MODE_MASK (mode)))
2194 return simplify_gen_unary (NOT, mode, op0, mode);
2195 if (rtx_equal_p (trueop0, trueop1)
2196 && ! side_effects_p (op0)
2197 && GET_MODE_CLASS (mode) != MODE_CC)
2198 return CONST0_RTX (mode);
2200 /* Canonicalize XOR of the most significant bit to PLUS. */
2201 if ((GET_CODE (op1) == CONST_INT
2202 || GET_CODE (op1) == CONST_DOUBLE)
2203 && mode_signbit_p (mode, op1))
2204 return simplify_gen_binary (PLUS, mode, op0, op1);
2205 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2206 if ((GET_CODE (op1) == CONST_INT
2207 || GET_CODE (op1) == CONST_DOUBLE)
2208 && GET_CODE (op0) == PLUS
2209 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2210 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2211 && mode_signbit_p (mode, XEXP (op0, 1)))
2212 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2213 simplify_gen_binary (XOR, mode, op1,
2216 /* If we are XORing two things that have no bits in common,
2217 convert them into an IOR. This helps to detect rotation encoded
2218 using those methods and possibly other simplifications. */
2220 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2221 && (nonzero_bits (op0, mode)
2222 & nonzero_bits (op1, mode)) == 0)
2223 return (simplify_gen_binary (IOR, mode, op0, op1));
2225 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2226 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2229 int num_negated = 0;
2231 if (GET_CODE (op0) == NOT)
2232 num_negated++, op0 = XEXP (op0, 0);
2233 if (GET_CODE (op1) == NOT)
2234 num_negated++, op1 = XEXP (op1, 0);
2236 if (num_negated == 2)
2237 return simplify_gen_binary (XOR, mode, op0, op1);
2238 else if (num_negated == 1)
2239 return simplify_gen_unary (NOT, mode,
2240 simplify_gen_binary (XOR, mode, op0, op1),
2244 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2245 correspond to a machine insn or result in further simplifications
2246 if B is a constant. */
2248 if (GET_CODE (op0) == AND
2249 && rtx_equal_p (XEXP (op0, 1), op1)
2250 && ! side_effects_p (op1))
2251 return simplify_gen_binary (AND, mode,
2252 simplify_gen_unary (NOT, mode,
2253 XEXP (op0, 0), mode),
2256 else if (GET_CODE (op0) == AND
2257 && rtx_equal_p (XEXP (op0, 0), op1)
2258 && ! side_effects_p (op1))
2259 return simplify_gen_binary (AND, mode,
2260 simplify_gen_unary (NOT, mode,
2261 XEXP (op0, 1), mode),
2264 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2265 comparison if STORE_FLAG_VALUE is 1. */
2266 if (STORE_FLAG_VALUE == 1
2267 && trueop1 == const1_rtx
2268 && COMPARISON_P (op0)
2269 && (reversed = reversed_comparison (op0, mode)))
2272 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2273 is (lt foo (const_int 0)), so we can perform the above
2274 simplification if STORE_FLAG_VALUE is 1. */
2276 if (STORE_FLAG_VALUE == 1
2277 && trueop1 == const1_rtx
2278 && GET_CODE (op0) == LSHIFTRT
2279 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2280 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2281 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2283 /* (xor (comparison foo bar) (const_int sign-bit))
2284 when STORE_FLAG_VALUE is the sign bit. */
2285 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2286 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2287 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2288 && trueop1 == const_true_rtx
2289 && COMPARISON_P (op0)
2290 && (reversed = reversed_comparison (op0, mode)))
2293 tem = simplify_associative_operation (code, mode, op0, op1);
2299 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2301 /* If we are turning off bits already known off in OP0, we need
2303 if (GET_CODE (trueop1) == CONST_INT
2304 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2305 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2307 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2308 && GET_MODE_CLASS (mode) != MODE_CC)
2311 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2312 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2313 && ! side_effects_p (op0)
2314 && GET_MODE_CLASS (mode) != MODE_CC)
2315 return CONST0_RTX (mode);
2317 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2318 there are no nonzero bits of C outside of X's mode. */
2319 if ((GET_CODE (op0) == SIGN_EXTEND
2320 || GET_CODE (op0) == ZERO_EXTEND)
2321 && GET_CODE (trueop1) == CONST_INT
2322 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2323 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2324 & INTVAL (trueop1)) == 0)
2326 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2327 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2328 gen_int_mode (INTVAL (trueop1),
2330 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2333 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2334 if (GET_CODE (op0) == IOR
2335 && GET_CODE (trueop1) == CONST_INT
2336 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2338 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2339 return simplify_gen_binary (IOR, mode,
2340 simplify_gen_binary (AND, mode,
2341 XEXP (op0, 0), op1),
2342 gen_int_mode (tmp, mode));
2345 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2346 insn (and may simplify more). */
2347 if (GET_CODE (op0) == XOR
2348 && rtx_equal_p (XEXP (op0, 0), op1)
2349 && ! side_effects_p (op1))
2350 return simplify_gen_binary (AND, mode,
2351 simplify_gen_unary (NOT, mode,
2352 XEXP (op0, 1), mode),
2355 if (GET_CODE (op0) == XOR
2356 && rtx_equal_p (XEXP (op0, 1), op1)
2357 && ! side_effects_p (op1))
2358 return simplify_gen_binary (AND, mode,
2359 simplify_gen_unary (NOT, mode,
2360 XEXP (op0, 0), mode),
2363 /* Similarly for (~(A ^ B)) & A. */
2364 if (GET_CODE (op0) == NOT
2365 && GET_CODE (XEXP (op0, 0)) == XOR
2366 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2367 && ! side_effects_p (op1))
2368 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2370 if (GET_CODE (op0) == NOT
2371 && GET_CODE (XEXP (op0, 0)) == XOR
2372 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2373 && ! side_effects_p (op1))
2374 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2376 /* Convert (A | B) & A to A. */
2377 if (GET_CODE (op0) == IOR
2378 && (rtx_equal_p (XEXP (op0, 0), op1)
2379 || rtx_equal_p (XEXP (op0, 1), op1))
2380 && ! side_effects_p (XEXP (op0, 0))
2381 && ! side_effects_p (XEXP (op0, 1)))
2384 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2385 ((A & N) + B) & M -> (A + B) & M
2386 Similarly if (N & M) == 0,
2387 ((A | N) + B) & M -> (A + B) & M
2388 and for - instead of + and/or ^ instead of |. */
2389 if (GET_CODE (trueop1) == CONST_INT
2390 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2391 && ~INTVAL (trueop1)
2392 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2393 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2398 pmop[0] = XEXP (op0, 0);
2399 pmop[1] = XEXP (op0, 1);
2401 for (which = 0; which < 2; which++)
2404 switch (GET_CODE (tem))
2407 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2408 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2409 == INTVAL (trueop1))
2410 pmop[which] = XEXP (tem, 0);
2414 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2415 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2416 pmop[which] = XEXP (tem, 0);
2423 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2425 tem = simplify_gen_binary (GET_CODE (op0), mode,
2427 return simplify_gen_binary (code, mode, tem, op1);
2430 tem = simplify_associative_operation (code, mode, op0, op1);
2436 /* 0/x is 0 (or x&0 if x has side-effects). */
2437 if (trueop0 == CONST0_RTX (mode))
2439 if (side_effects_p (op1))
2440 return simplify_gen_binary (AND, mode, op1, trueop0);
2444 if (trueop1 == CONST1_RTX (mode))
2445 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2446 /* Convert divide by power of two into shift. */
2447 if (GET_CODE (trueop1) == CONST_INT
2448 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2449 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2453 /* Handle floating point and integers separately. */
2454 if (SCALAR_FLOAT_MODE_P (mode))
2456 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2457 safe for modes with NaNs, since 0.0 / 0.0 will then be
2458 NaN rather than 0.0. Nor is it safe for modes with signed
2459 zeros, since dividing 0 by a negative number gives -0.0 */
2460 if (trueop0 == CONST0_RTX (mode)
2461 && !HONOR_NANS (mode)
2462 && !HONOR_SIGNED_ZEROS (mode)
2463 && ! side_effects_p (op1))
2466 if (trueop1 == CONST1_RTX (mode)
2467 && !HONOR_SNANS (mode))
2470 if (GET_CODE (trueop1) == CONST_DOUBLE
2471 && trueop1 != CONST0_RTX (mode))
2474 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2477 if (REAL_VALUES_EQUAL (d, dconstm1)
2478 && !HONOR_SNANS (mode))
2479 return simplify_gen_unary (NEG, mode, op0, mode);
2481 /* Change FP division by a constant into multiplication.
2482 Only do this with -funsafe-math-optimizations. */
2483 if (flag_unsafe_math_optimizations
2484 && !REAL_VALUES_EQUAL (d, dconst0))
2486 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2487 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2488 return simplify_gen_binary (MULT, mode, op0, tem);
2494 /* 0/x is 0 (or x&0 if x has side-effects). */
2495 if (trueop0 == CONST0_RTX (mode))
2497 if (side_effects_p (op1))
2498 return simplify_gen_binary (AND, mode, op1, trueop0);
2502 if (trueop1 == CONST1_RTX (mode))
2503 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2505 if (trueop1 == constm1_rtx)
2507 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2508 return simplify_gen_unary (NEG, mode, x, mode);
2514 /* 0%x is 0 (or x&0 if x has side-effects). */
2515 if (trueop0 == CONST0_RTX (mode))
2517 if (side_effects_p (op1))
2518 return simplify_gen_binary (AND, mode, op1, trueop0);
2521 /* x%1 is 0 (of x&0 if x has side-effects). */
2522 if (trueop1 == CONST1_RTX (mode))
2524 if (side_effects_p (op0))
2525 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2526 return CONST0_RTX (mode);
2528 /* Implement modulus by power of two as AND. */
2529 if (GET_CODE (trueop1) == CONST_INT
2530 && exact_log2 (INTVAL (trueop1)) > 0)
2531 return simplify_gen_binary (AND, mode, op0,
2532 GEN_INT (INTVAL (op1) - 1));
2536 /* 0%x is 0 (or x&0 if x has side-effects). */
2537 if (trueop0 == CONST0_RTX (mode))
2539 if (side_effects_p (op1))
2540 return simplify_gen_binary (AND, mode, op1, trueop0);
2543 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2544 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2546 if (side_effects_p (op0))
2547 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2548 return CONST0_RTX (mode);
2555 if (trueop1 == CONST0_RTX (mode))
2557 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2559 /* Rotating ~0 always results in ~0. */
2560 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2561 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2562 && ! side_effects_p (op1))
2568 if (trueop1 == CONST0_RTX (mode))
2570 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2575 if (trueop1 == CONST0_RTX (mode))
2577 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2579 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2580 if (GET_CODE (op0) == CLZ
2581 && GET_CODE (trueop1) == CONST_INT
2582 && STORE_FLAG_VALUE == 1
2583 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2585 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2586 unsigned HOST_WIDE_INT zero_val = 0;
2588 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2589 && zero_val == GET_MODE_BITSIZE (imode)
2590 && INTVAL (trueop1) == exact_log2 (zero_val))
2591 return simplify_gen_relational (EQ, mode, imode,
2592 XEXP (op0, 0), const0_rtx);
2597 if (width <= HOST_BITS_PER_WIDE_INT
2598 && GET_CODE (trueop1) == CONST_INT
2599 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2600 && ! side_effects_p (op0))
2602 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2604 tem = simplify_associative_operation (code, mode, op0, op1);
2610 if (width <= HOST_BITS_PER_WIDE_INT
2611 && GET_CODE (trueop1) == CONST_INT
2612 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2613 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2614 && ! side_effects_p (op0))
2616 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2618 tem = simplify_associative_operation (code, mode, op0, op1);
2624 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2626 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2628 tem = simplify_associative_operation (code, mode, op0, op1);
2634 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2636 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2638 tem = simplify_associative_operation (code, mode, op0, op1);
2647 /* ??? There are simplifications that can be done. */
2651 if (!VECTOR_MODE_P (mode))
2653 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2654 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2655 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2656 gcc_assert (XVECLEN (trueop1, 0) == 1);
2657 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2659 if (GET_CODE (trueop0) == CONST_VECTOR)
2660 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2663 /* Extract a scalar element from a nested VEC_SELECT expression
2664 (with optional nested VEC_CONCAT expression). Some targets
2665 (i386) extract scalar element from a vector using chain of
2666 nested VEC_SELECT expressions. When input operand is a memory
2667 operand, this operation can be simplified to a simple scalar
2668 load from an offseted memory address. */
2669 if (GET_CODE (trueop0) == VEC_SELECT)
2671 rtx op0 = XEXP (trueop0, 0);
2672 rtx op1 = XEXP (trueop0, 1);
2674 enum machine_mode opmode = GET_MODE (op0);
2675 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2676 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2678 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2684 gcc_assert (GET_CODE (op1) == PARALLEL);
2685 gcc_assert (i < n_elts);
2687 /* Select element, pointed by nested selector. */
2688 elem = INTVAL (CONST_VECTOR_ELT (op1, i));
2690 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2691 if (GET_CODE (op0) == VEC_CONCAT)
2693 rtx op00 = XEXP (op0, 0);
2694 rtx op01 = XEXP (op0, 1);
2696 enum machine_mode mode00, mode01;
2697 int n_elts00, n_elts01;
2699 mode00 = GET_MODE (op00);
2700 mode01 = GET_MODE (op01);
2702 /* Find out number of elements of each operand. */
2703 if (VECTOR_MODE_P (mode00))
2705 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2706 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2711 if (VECTOR_MODE_P (mode01))
2713 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2714 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2719 gcc_assert (n_elts == n_elts00 + n_elts01);
2721 /* Select correct operand of VEC_CONCAT
2722 and adjust selector. */
2723 if (elem < n_elts01)
2734 vec = rtvec_alloc (1);
2735 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2737 tmp = gen_rtx_fmt_ee (code, mode,
2738 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2744 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2745 gcc_assert (GET_MODE_INNER (mode)
2746 == GET_MODE_INNER (GET_MODE (trueop0)));
2747 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2749 if (GET_CODE (trueop0) == CONST_VECTOR)
2751 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2752 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2753 rtvec v = rtvec_alloc (n_elts);
2756 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2757 for (i = 0; i < n_elts; i++)
2759 rtx x = XVECEXP (trueop1, 0, i);
2761 gcc_assert (GET_CODE (x) == CONST_INT);
2762 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2766 return gen_rtx_CONST_VECTOR (mode, v);
2770 if (XVECLEN (trueop1, 0) == 1
2771 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2772 && GET_CODE (trueop0) == VEC_CONCAT)
2775 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2777 /* Try to find the element in the VEC_CONCAT. */
2778 while (GET_MODE (vec) != mode
2779 && GET_CODE (vec) == VEC_CONCAT)
2781 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2782 if (offset < vec_size)
2783 vec = XEXP (vec, 0);
2787 vec = XEXP (vec, 1);
2789 vec = avoid_constant_pool_reference (vec);
2792 if (GET_MODE (vec) == mode)
2799 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2800 ? GET_MODE (trueop0)
2801 : GET_MODE_INNER (mode));
2802 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2803 ? GET_MODE (trueop1)
2804 : GET_MODE_INNER (mode));
2806 gcc_assert (VECTOR_MODE_P (mode));
2807 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2808 == GET_MODE_SIZE (mode));
2810 if (VECTOR_MODE_P (op0_mode))
2811 gcc_assert (GET_MODE_INNER (mode)
2812 == GET_MODE_INNER (op0_mode));
2814 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2816 if (VECTOR_MODE_P (op1_mode))
2817 gcc_assert (GET_MODE_INNER (mode)
2818 == GET_MODE_INNER (op1_mode));
2820 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2822 if ((GET_CODE (trueop0) == CONST_VECTOR
2823 || GET_CODE (trueop0) == CONST_INT
2824 || GET_CODE (trueop0) == CONST_DOUBLE)
2825 && (GET_CODE (trueop1) == CONST_VECTOR
2826 || GET_CODE (trueop1) == CONST_INT
2827 || GET_CODE (trueop1) == CONST_DOUBLE))
2829 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2830 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2831 rtvec v = rtvec_alloc (n_elts);
2833 unsigned in_n_elts = 1;
2835 if (VECTOR_MODE_P (op0_mode))
2836 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2837 for (i = 0; i < n_elts; i++)
2841 if (!VECTOR_MODE_P (op0_mode))
2842 RTVEC_ELT (v, i) = trueop0;
2844 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2848 if (!VECTOR_MODE_P (op1_mode))
2849 RTVEC_ELT (v, i) = trueop1;
2851 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2856 return gen_rtx_CONST_VECTOR (mode, v);
2869 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2872 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2874 unsigned int width = GET_MODE_BITSIZE (mode);
2876 if (VECTOR_MODE_P (mode)
2877 && code != VEC_CONCAT
2878 && GET_CODE (op0) == CONST_VECTOR
2879 && GET_CODE (op1) == CONST_VECTOR)
2881 unsigned n_elts = GET_MODE_NUNITS (mode);
2882 enum machine_mode op0mode = GET_MODE (op0);
2883 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2884 enum machine_mode op1mode = GET_MODE (op1);
2885 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2886 rtvec v = rtvec_alloc (n_elts);
2889 gcc_assert (op0_n_elts == n_elts);
2890 gcc_assert (op1_n_elts == n_elts);
2891 for (i = 0; i < n_elts; i++)
2893 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2894 CONST_VECTOR_ELT (op0, i),
2895 CONST_VECTOR_ELT (op1, i));
2898 RTVEC_ELT (v, i) = x;
2901 return gen_rtx_CONST_VECTOR (mode, v);
2904 if (VECTOR_MODE_P (mode)
2905 && code == VEC_CONCAT
2906 && CONSTANT_P (op0) && CONSTANT_P (op1))
2908 unsigned n_elts = GET_MODE_NUNITS (mode);
2909 rtvec v = rtvec_alloc (n_elts);
2911 gcc_assert (n_elts >= 2);
2914 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2915 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2917 RTVEC_ELT (v, 0) = op0;
2918 RTVEC_ELT (v, 1) = op1;
2922 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2923 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2926 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2927 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2928 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2930 for (i = 0; i < op0_n_elts; ++i)
2931 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2932 for (i = 0; i < op1_n_elts; ++i)
2933 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2936 return gen_rtx_CONST_VECTOR (mode, v);
2939 if (SCALAR_FLOAT_MODE_P (mode)
2940 && GET_CODE (op0) == CONST_DOUBLE
2941 && GET_CODE (op1) == CONST_DOUBLE
2942 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2953 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2955 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2957 for (i = 0; i < 4; i++)
2974 real_from_target (&r, tmp0, mode);
2975 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2979 REAL_VALUE_TYPE f0, f1, value, result;
2982 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2983 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2984 real_convert (&f0, mode, &f0);
2985 real_convert (&f1, mode, &f1);
2987 if (HONOR_SNANS (mode)
2988 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2992 && REAL_VALUES_EQUAL (f1, dconst0)
2993 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2996 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2997 && flag_trapping_math
2998 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3000 int s0 = REAL_VALUE_NEGATIVE (f0);
3001 int s1 = REAL_VALUE_NEGATIVE (f1);
3006 /* Inf + -Inf = NaN plus exception. */
3011 /* Inf - Inf = NaN plus exception. */
3016 /* Inf / Inf = NaN plus exception. */
3023 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3024 && flag_trapping_math
3025 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3026 || (REAL_VALUE_ISINF (f1)
3027 && REAL_VALUES_EQUAL (f0, dconst0))))
3028 /* Inf * 0 = NaN plus exception. */
3031 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3033 real_convert (&result, mode, &value);
3035 /* Don't constant fold this floating point operation if
3036 the result has overflowed and flag_trapping_math. */
3038 if (flag_trapping_math
3039 && MODE_HAS_INFINITIES (mode)
3040 && REAL_VALUE_ISINF (result)
3041 && !REAL_VALUE_ISINF (f0)
3042 && !REAL_VALUE_ISINF (f1))
3043 /* Overflow plus exception. */
3046 /* Don't constant fold this floating point operation if the
3047 result may dependent upon the run-time rounding mode and
3048 flag_rounding_math is set, or if GCC's software emulation
3049 is unable to accurately represent the result. */
3051 if ((flag_rounding_math
3052 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
3053 && !flag_unsafe_math_optimizations))
3054 && (inexact || !real_identical (&result, &value)))
3057 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3061 /* We can fold some multi-word operations. */
3062 if (GET_MODE_CLASS (mode) == MODE_INT
3063 && width == HOST_BITS_PER_WIDE_INT * 2
3064 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
3065 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
3067 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3068 HOST_WIDE_INT h1, h2, hv, ht;
3070 if (GET_CODE (op0) == CONST_DOUBLE)
3071 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3073 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3075 if (GET_CODE (op1) == CONST_DOUBLE)
3076 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3078 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3083 /* A - B == A + (-B). */
3084 neg_double (l2, h2, &lv, &hv);
3087 /* Fall through.... */
3090 add_double (l1, h1, l2, h2, &lv, &hv);
3094 mul_double (l1, h1, l2, h2, &lv, &hv);
3098 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3099 &lv, &hv, <, &ht))
3104 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3105 <, &ht, &lv, &hv))
3110 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3111 &lv, &hv, <, &ht))
3116 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3117 <, &ht, &lv, &hv))
3122 lv = l1 & l2, hv = h1 & h2;
3126 lv = l1 | l2, hv = h1 | h2;
3130 lv = l1 ^ l2, hv = h1 ^ h2;
3136 && ((unsigned HOST_WIDE_INT) l1
3137 < (unsigned HOST_WIDE_INT) l2)))
3146 && ((unsigned HOST_WIDE_INT) l1
3147 > (unsigned HOST_WIDE_INT) l2)))
3154 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3156 && ((unsigned HOST_WIDE_INT) l1
3157 < (unsigned HOST_WIDE_INT) l2)))
3164 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3166 && ((unsigned HOST_WIDE_INT) l1
3167 > (unsigned HOST_WIDE_INT) l2)))
3173 case LSHIFTRT: case ASHIFTRT:
3175 case ROTATE: case ROTATERT:
3176 if (SHIFT_COUNT_TRUNCATED)
3177 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3179 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3182 if (code == LSHIFTRT || code == ASHIFTRT)
3183 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3185 else if (code == ASHIFT)
3186 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3187 else if (code == ROTATE)
3188 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3189 else /* code == ROTATERT */
3190 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3197 return immed_double_const (lv, hv, mode);
3200 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3201 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3203 /* Get the integer argument values in two forms:
3204 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3206 arg0 = INTVAL (op0);
3207 arg1 = INTVAL (op1);
3209 if (width < HOST_BITS_PER_WIDE_INT)
3211 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3212 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3215 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3216 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3219 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3220 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3228 /* Compute the value of the arithmetic. */
3233 val = arg0s + arg1s;
3237 val = arg0s - arg1s;
3241 val = arg0s * arg1s;
3246 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3249 val = arg0s / arg1s;
3254 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3257 val = arg0s % arg1s;
3262 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3265 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3270 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3273 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3291 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3292 the value is in range. We can't return any old value for
3293 out-of-range arguments because either the middle-end (via
3294 shift_truncation_mask) or the back-end might be relying on
3295 target-specific knowledge. Nor can we rely on
3296 shift_truncation_mask, since the shift might not be part of an
3297 ashlM3, lshrM3 or ashrM3 instruction. */
3298 if (SHIFT_COUNT_TRUNCATED)
3299 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3300 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3303 val = (code == ASHIFT
3304 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3305 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3307 /* Sign-extend the result for arithmetic right shifts. */
3308 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3309 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3317 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3318 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3326 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3327 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3331 /* Do nothing here. */
3335 val = arg0s <= arg1s ? arg0s : arg1s;
3339 val = ((unsigned HOST_WIDE_INT) arg0
3340 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3344 val = arg0s > arg1s ? arg0s : arg1s;
3348 val = ((unsigned HOST_WIDE_INT) arg0
3349 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3357 /* ??? There are simplifications that can be done. */
3364 return gen_int_mode (val, mode);
3372 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3375 Rather than test for specific case, we do this by a brute-force method
3376 and do all possible simplifications until no more changes occur. Then
3377 we rebuild the operation. */
3379 struct simplify_plus_minus_op_data
3386 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3390 result = (commutative_operand_precedence (y)
3391 - commutative_operand_precedence (x));
3395 /* Group together equal REGs to do more simplification. */
3396 if (REG_P (x) && REG_P (y))
3397 return REGNO (x) > REGNO (y);
3403 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3406 struct simplify_plus_minus_op_data ops[8];
3408 int n_ops = 2, input_ops = 2;
3409 int changed, n_constants = 0, canonicalized = 0;
3412 memset (ops, 0, sizeof ops);
3414 /* Set up the two operands and then expand them until nothing has been
3415 changed. If we run out of room in our array, give up; this should
3416 almost never happen. */
3421 ops[1].neg = (code == MINUS);
3427 for (i = 0; i < n_ops; i++)
3429 rtx this_op = ops[i].op;
3430 int this_neg = ops[i].neg;
3431 enum rtx_code this_code = GET_CODE (this_op);
3440 ops[n_ops].op = XEXP (this_op, 1);
3441 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3444 ops[i].op = XEXP (this_op, 0);
3447 canonicalized |= this_neg;
3451 ops[i].op = XEXP (this_op, 0);
3452 ops[i].neg = ! this_neg;
3459 && GET_CODE (XEXP (this_op, 0)) == PLUS
3460 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3461 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3463 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3464 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3465 ops[n_ops].neg = this_neg;
3473 /* ~a -> (-a - 1) */
3476 ops[n_ops].op = constm1_rtx;
3477 ops[n_ops++].neg = this_neg;
3478 ops[i].op = XEXP (this_op, 0);
3479 ops[i].neg = !this_neg;
3489 ops[i].op = neg_const_int (mode, this_op);
3503 if (n_constants > 1)
3506 gcc_assert (n_ops >= 2);
3508 /* If we only have two operands, we can avoid the loops. */
3511 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3514 /* Get the two operands. Be careful with the order, especially for
3515 the cases where code == MINUS. */
3516 if (ops[0].neg && ops[1].neg)
3518 lhs = gen_rtx_NEG (mode, ops[0].op);
3521 else if (ops[0].neg)
3532 return simplify_const_binary_operation (code, mode, lhs, rhs);
3535 /* Now simplify each pair of operands until nothing changes. */
3538 /* Insertion sort is good enough for an eight-element array. */
3539 for (i = 1; i < n_ops; i++)
3541 struct simplify_plus_minus_op_data save;
3543 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3549 ops[j + 1] = ops[j];
3550 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3554 /* This is only useful the first time through. */
3559 for (i = n_ops - 1; i > 0; i--)
3560 for (j = i - 1; j >= 0; j--)
3562 rtx lhs = ops[j].op, rhs = ops[i].op;
3563 int lneg = ops[j].neg, rneg = ops[i].neg;
3565 if (lhs != 0 && rhs != 0)
3567 enum rtx_code ncode = PLUS;
3573 tem = lhs, lhs = rhs, rhs = tem;
3575 else if (swap_commutative_operands_p (lhs, rhs))
3576 tem = lhs, lhs = rhs, rhs = tem;
3578 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3579 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3581 rtx tem_lhs, tem_rhs;
3583 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3584 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3585 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3587 if (tem && !CONSTANT_P (tem))
3588 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3591 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3593 /* Reject "simplifications" that just wrap the two
3594 arguments in a CONST. Failure to do so can result
3595 in infinite recursion with simplify_binary_operation
3596 when it calls us to simplify CONST operations. */
3598 && ! (GET_CODE (tem) == CONST
3599 && GET_CODE (XEXP (tem, 0)) == ncode
3600 && XEXP (XEXP (tem, 0), 0) == lhs
3601 && XEXP (XEXP (tem, 0), 1) == rhs))
3604 if (GET_CODE (tem) == NEG)
3605 tem = XEXP (tem, 0), lneg = !lneg;
3606 if (GET_CODE (tem) == CONST_INT && lneg)
3607 tem = neg_const_int (mode, tem), lneg = 0;
3611 ops[j].op = NULL_RTX;
3617 /* Pack all the operands to the lower-numbered entries. */
3618 for (i = 0, j = 0; j < n_ops; j++)
3628 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3630 && GET_CODE (ops[1].op) == CONST_INT
3631 && CONSTANT_P (ops[0].op)
3633 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3635 /* We suppressed creation of trivial CONST expressions in the
3636 combination loop to avoid recursion. Create one manually now.
3637 The combination loop should have ensured that there is exactly
3638 one CONST_INT, and the sort will have ensured that it is last
3639 in the array and that any other constant will be next-to-last. */
3642 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3643 && CONSTANT_P (ops[n_ops - 2].op))
3645 rtx value = ops[n_ops - 1].op;
3646 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3647 value = neg_const_int (mode, value);
3648 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3652 /* Put a non-negated operand first, if possible. */
3654 for (i = 0; i < n_ops && ops[i].neg; i++)
3657 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3666 /* Now make the result by performing the requested operations. */
3668 for (i = 1; i < n_ops; i++)
3669 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3670 mode, result, ops[i].op);
3675 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3677 plus_minus_operand_p (const_rtx x)
3679 return GET_CODE (x) == PLUS
3680 || GET_CODE (x) == MINUS
3681 || (GET_CODE (x) == CONST
3682 && GET_CODE (XEXP (x, 0)) == PLUS
3683 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3684 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3687 /* Like simplify_binary_operation except used for relational operators.
3688 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3689 not also be VOIDmode.
3691 CMP_MODE specifies in which mode the comparison is done in, so it is
3692 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3693 the operands or, if both are VOIDmode, the operands are compared in
3694 "infinite precision". */
3696 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3697 enum machine_mode cmp_mode, rtx op0, rtx op1)
3699 rtx tem, trueop0, trueop1;
3701 if (cmp_mode == VOIDmode)
3702 cmp_mode = GET_MODE (op0);
3703 if (cmp_mode == VOIDmode)
3704 cmp_mode = GET_MODE (op1);
3706 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3709 if (SCALAR_FLOAT_MODE_P (mode))
3711 if (tem == const0_rtx)
3712 return CONST0_RTX (mode);
3713 #ifdef FLOAT_STORE_FLAG_VALUE
3715 REAL_VALUE_TYPE val;
3716 val = FLOAT_STORE_FLAG_VALUE (mode);
3717 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3723 if (VECTOR_MODE_P (mode))
3725 if (tem == const0_rtx)
3726 return CONST0_RTX (mode);
3727 #ifdef VECTOR_STORE_FLAG_VALUE
3732 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3733 if (val == NULL_RTX)
3735 if (val == const1_rtx)
3736 return CONST1_RTX (mode);
3738 units = GET_MODE_NUNITS (mode);
3739 v = rtvec_alloc (units);
3740 for (i = 0; i < units; i++)
3741 RTVEC_ELT (v, i) = val;
3742 return gen_rtx_raw_CONST_VECTOR (mode, v);
3752 /* For the following tests, ensure const0_rtx is op1. */
3753 if (swap_commutative_operands_p (op0, op1)
3754 || (op0 == const0_rtx && op1 != const0_rtx))
3755 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3757 /* If op0 is a compare, extract the comparison arguments from it. */
3758 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3759 return simplify_relational_operation (code, mode, VOIDmode,
3760 XEXP (op0, 0), XEXP (op0, 1));
3762 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3766 trueop0 = avoid_constant_pool_reference (op0);
3767 trueop1 = avoid_constant_pool_reference (op1);
3768 return simplify_relational_operation_1 (code, mode, cmp_mode,
3772 /* This part of simplify_relational_operation is only used when CMP_MODE
3773 is not in class MODE_CC (i.e. it is a real comparison).
3775 MODE is the mode of the result, while CMP_MODE specifies in which
3776 mode the comparison is done in, so it is the mode of the operands. */
3779 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3780 enum machine_mode cmp_mode, rtx op0, rtx op1)
3782 enum rtx_code op0code = GET_CODE (op0);
3784 if (op1 == const0_rtx && COMPARISON_P (op0))
3786 /* If op0 is a comparison, extract the comparison arguments
3790 if (GET_MODE (op0) == mode)
3791 return simplify_rtx (op0);
3793 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3794 XEXP (op0, 0), XEXP (op0, 1));
3796 else if (code == EQ)
3798 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3799 if (new_code != UNKNOWN)
3800 return simplify_gen_relational (new_code, mode, VOIDmode,
3801 XEXP (op0, 0), XEXP (op0, 1));
3805 if (op1 == const0_rtx)
3807 /* Canonicalize (GTU x 0) as (NE x 0). */
3809 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3810 /* Canonicalize (LEU x 0) as (EQ x 0). */
3812 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3814 else if (op1 == const1_rtx)
3819 /* Canonicalize (GE x 1) as (GT x 0). */
3820 return simplify_gen_relational (GT, mode, cmp_mode,
3823 /* Canonicalize (GEU x 1) as (NE x 0). */
3824 return simplify_gen_relational (NE, mode, cmp_mode,
3827 /* Canonicalize (LT x 1) as (LE x 0). */
3828 return simplify_gen_relational (LE, mode, cmp_mode,
3831 /* Canonicalize (LTU x 1) as (EQ x 0). */
3832 return simplify_gen_relational (EQ, mode, cmp_mode,
3838 else if (op1 == constm1_rtx)
3840 /* Canonicalize (LE x -1) as (LT x 0). */
3842 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3843 /* Canonicalize (GT x -1) as (GE x 0). */
3845 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3848 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3849 if ((code == EQ || code == NE)
3850 && (op0code == PLUS || op0code == MINUS)
3852 && CONSTANT_P (XEXP (op0, 1))
3853 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3855 rtx x = XEXP (op0, 0);
3856 rtx c = XEXP (op0, 1);
3858 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3860 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3863 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3864 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3866 && op1 == const0_rtx
3867 && GET_MODE_CLASS (mode) == MODE_INT
3868 && cmp_mode != VOIDmode
3869 /* ??? Work-around BImode bugs in the ia64 backend. */
3871 && cmp_mode != BImode
3872 && nonzero_bits (op0, cmp_mode) == 1
3873 && STORE_FLAG_VALUE == 1)
3874 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3875 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3876 : lowpart_subreg (mode, op0, cmp_mode);
3878 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3879 if ((code == EQ || code == NE)
3880 && op1 == const0_rtx
3882 return simplify_gen_relational (code, mode, cmp_mode,
3883 XEXP (op0, 0), XEXP (op0, 1));
3885 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3886 if ((code == EQ || code == NE)
3888 && rtx_equal_p (XEXP (op0, 0), op1)
3889 && !side_effects_p (XEXP (op0, 0)))
3890 return simplify_gen_relational (code, mode, cmp_mode,
3891 XEXP (op0, 1), const0_rtx);
3893 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3894 if ((code == EQ || code == NE)
3896 && rtx_equal_p (XEXP (op0, 1), op1)
3897 && !side_effects_p (XEXP (op0, 1)))
3898 return simplify_gen_relational (code, mode, cmp_mode,
3899 XEXP (op0, 0), const0_rtx);
3901 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3902 if ((code == EQ || code == NE)
3904 && (GET_CODE (op1) == CONST_INT
3905 || GET_CODE (op1) == CONST_DOUBLE)
3906 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3907 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3908 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3909 simplify_gen_binary (XOR, cmp_mode,
3910 XEXP (op0, 1), op1));
3912 if (op0code == POPCOUNT && op1 == const0_rtx)
3918 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3919 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3920 XEXP (op0, 0), const0_rtx);
3925 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3926 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3927 XEXP (op0, 0), const0_rtx);
3936 /* Check if the given comparison (done in the given MODE) is actually a
3937 tautology or a contradiction.
3938 If no simplification is possible, this function returns zero.
3939 Otherwise, it returns either const_true_rtx or const0_rtx. */
3942 simplify_const_relational_operation (enum rtx_code code,
3943 enum machine_mode mode,
3946 int equal, op0lt, op0ltu, op1lt, op1ltu;
3951 gcc_assert (mode != VOIDmode
3952 || (GET_MODE (op0) == VOIDmode
3953 && GET_MODE (op1) == VOIDmode));
3955 /* If op0 is a compare, extract the comparison arguments from it. */
3956 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3958 op1 = XEXP (op0, 1);
3959 op0 = XEXP (op0, 0);
3961 if (GET_MODE (op0) != VOIDmode)
3962 mode = GET_MODE (op0);
3963 else if (GET_MODE (op1) != VOIDmode)
3964 mode = GET_MODE (op1);
3969 /* We can't simplify MODE_CC values since we don't know what the
3970 actual comparison is. */
3971 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3974 /* Make sure the constant is second. */
3975 if (swap_commutative_operands_p (op0, op1))
3977 tem = op0, op0 = op1, op1 = tem;
3978 code = swap_condition (code);
3981 trueop0 = avoid_constant_pool_reference (op0);
3982 trueop1 = avoid_constant_pool_reference (op1);
3984 /* For integer comparisons of A and B maybe we can simplify A - B and can
3985 then simplify a comparison of that with zero. If A and B are both either
3986 a register or a CONST_INT, this can't help; testing for these cases will
3987 prevent infinite recursion here and speed things up.
3989 We can only do this for EQ and NE comparisons as otherwise we may
3990 lose or introduce overflow which we cannot disregard as undefined as
3991 we do not know the signedness of the operation on either the left or
3992 the right hand side of the comparison. */
3994 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3995 && (code == EQ || code == NE)
3996 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3997 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3998 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3999 /* We cannot do this if tem is a nonzero address. */
4000 && ! nonzero_address_p (tem))
4001 return simplify_const_relational_operation (signed_condition (code),
4002 mode, tem, const0_rtx);
4004 if (! HONOR_NANS (mode) && code == ORDERED)
4005 return const_true_rtx;
4007 if (! HONOR_NANS (mode) && code == UNORDERED)
4010 /* For modes without NaNs, if the two operands are equal, we know the
4011 result except if they have side-effects. */
4012 if (! HONOR_NANS (GET_MODE (trueop0))
4013 && rtx_equal_p (trueop0, trueop1)
4014 && ! side_effects_p (trueop0))
4015 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
4017 /* If the operands are floating-point constants, see if we can fold
4019 else if (GET_CODE (trueop0) == CONST_DOUBLE
4020 && GET_CODE (trueop1) == CONST_DOUBLE
4021 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4023 REAL_VALUE_TYPE d0, d1;
4025 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4026 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4028 /* Comparisons are unordered iff at least one of the values is NaN. */
4029 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4039 return const_true_rtx;
4052 equal = REAL_VALUES_EQUAL (d0, d1);
4053 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
4054 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
4057 /* Otherwise, see if the operands are both integers. */
4058 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4059 && (GET_CODE (trueop0) == CONST_DOUBLE
4060 || GET_CODE (trueop0) == CONST_INT)
4061 && (GET_CODE (trueop1) == CONST_DOUBLE
4062 || GET_CODE (trueop1) == CONST_INT))
4064 int width = GET_MODE_BITSIZE (mode);
4065 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4066 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4068 /* Get the two words comprising each integer constant. */
4069 if (GET_CODE (trueop0) == CONST_DOUBLE)
4071 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4072 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4076 l0u = l0s = INTVAL (trueop0);
4077 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4080 if (GET_CODE (trueop1) == CONST_DOUBLE)
4082 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4083 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4087 l1u = l1s = INTVAL (trueop1);
4088 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4091 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4092 we have to sign or zero-extend the values. */
4093 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4095 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4096 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4098 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4099 l0s |= ((HOST_WIDE_INT) (-1) << width);
4101 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4102 l1s |= ((HOST_WIDE_INT) (-1) << width);
4104 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4105 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4107 equal = (h0u == h1u && l0u == l1u);
4108 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4109 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4110 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4111 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4114 /* Otherwise, there are some code-specific tests we can make. */
4117 /* Optimize comparisons with upper and lower bounds. */
4118 if (SCALAR_INT_MODE_P (mode)
4119 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4132 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4139 /* x >= min is always true. */
4140 if (rtx_equal_p (trueop1, mmin))
4141 tem = const_true_rtx;
4147 /* x <= max is always true. */
4148 if (rtx_equal_p (trueop1, mmax))
4149 tem = const_true_rtx;
4154 /* x > max is always false. */
4155 if (rtx_equal_p (trueop1, mmax))
4161 /* x < min is always false. */
4162 if (rtx_equal_p (trueop1, mmin))
4169 if (tem == const0_rtx
4170 || tem == const_true_rtx)
4177 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4182 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4183 return const_true_rtx;
4187 /* Optimize abs(x) < 0.0. */
4188 if (trueop1 == CONST0_RTX (mode)
4189 && !HONOR_SNANS (mode)
4190 && (!INTEGRAL_MODE_P (mode)
4191 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4193 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4195 if (GET_CODE (tem) == ABS)
4197 if (INTEGRAL_MODE_P (mode)
4198 && (issue_strict_overflow_warning
4199 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4200 warning (OPT_Wstrict_overflow,
4201 ("assuming signed overflow does not occur when "
4202 "assuming abs (x) < 0 is false"));
4207 /* Optimize popcount (x) < 0. */
4208 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4209 return const_true_rtx;
4213 /* Optimize abs(x) >= 0.0. */
4214 if (trueop1 == CONST0_RTX (mode)
4215 && !HONOR_NANS (mode)
4216 && (!INTEGRAL_MODE_P (mode)
4217 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4219 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4221 if (GET_CODE (tem) == ABS)
4223 if (INTEGRAL_MODE_P (mode)
4224 && (issue_strict_overflow_warning
4225 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4226 warning (OPT_Wstrict_overflow,
4227 ("assuming signed overflow does not occur when "
4228 "assuming abs (x) >= 0 is true"));
4229 return const_true_rtx;
4233 /* Optimize popcount (x) >= 0. */
4234 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4235 return const_true_rtx;
4239 /* Optimize ! (abs(x) < 0.0). */
4240 if (trueop1 == CONST0_RTX (mode))
4242 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4244 if (GET_CODE (tem) == ABS)
4245 return const_true_rtx;
4256 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4262 return equal ? const_true_rtx : const0_rtx;
4265 return ! equal ? const_true_rtx : const0_rtx;
4268 return op0lt ? const_true_rtx : const0_rtx;
4271 return op1lt ? const_true_rtx : const0_rtx;
4273 return op0ltu ? const_true_rtx : const0_rtx;
4275 return op1ltu ? const_true_rtx : const0_rtx;
4278 return equal || op0lt ? const_true_rtx : const0_rtx;
4281 return equal || op1lt ? const_true_rtx : const0_rtx;
4283 return equal || op0ltu ? const_true_rtx : const0_rtx;
4285 return equal || op1ltu ? const_true_rtx : const0_rtx;
4287 return const_true_rtx;
4295 /* Simplify CODE, an operation with result mode MODE and three operands,
4296 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4297 a constant. Return 0 if no simplifications is possible. */
4300 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4301 enum machine_mode op0_mode, rtx op0, rtx op1,
4304 unsigned int width = GET_MODE_BITSIZE (mode);
4306 /* VOIDmode means "infinite" precision. */
4308 width = HOST_BITS_PER_WIDE_INT;
4314 if (GET_CODE (op0) == CONST_INT
4315 && GET_CODE (op1) == CONST_INT
4316 && GET_CODE (op2) == CONST_INT
4317 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4318 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4320 /* Extracting a bit-field from a constant */
4321 HOST_WIDE_INT val = INTVAL (op0);
4323 if (BITS_BIG_ENDIAN)
4324 val >>= (GET_MODE_BITSIZE (op0_mode)
4325 - INTVAL (op2) - INTVAL (op1));
4327 val >>= INTVAL (op2);
4329 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4331 /* First zero-extend. */
4332 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4333 /* If desired, propagate sign bit. */
4334 if (code == SIGN_EXTRACT
4335 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4336 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4339 /* Clear the bits that don't belong in our mode,
4340 unless they and our sign bit are all one.
4341 So we get either a reasonable negative value or a reasonable
4342 unsigned value for this mode. */
4343 if (width < HOST_BITS_PER_WIDE_INT
4344 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4345 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4346 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4348 return gen_int_mode (val, mode);
4353 if (GET_CODE (op0) == CONST_INT)
4354 return op0 != const0_rtx ? op1 : op2;
4356 /* Convert c ? a : a into "a". */
4357 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4360 /* Convert a != b ? a : b into "a". */
4361 if (GET_CODE (op0) == NE
4362 && ! side_effects_p (op0)
4363 && ! HONOR_NANS (mode)
4364 && ! HONOR_SIGNED_ZEROS (mode)
4365 && ((rtx_equal_p (XEXP (op0, 0), op1)
4366 && rtx_equal_p (XEXP (op0, 1), op2))
4367 || (rtx_equal_p (XEXP (op0, 0), op2)
4368 && rtx_equal_p (XEXP (op0, 1), op1))))
4371 /* Convert a == b ? a : b into "b". */
4372 if (GET_CODE (op0) == EQ
4373 && ! side_effects_p (op0)
4374 && ! HONOR_NANS (mode)
4375 && ! HONOR_SIGNED_ZEROS (mode)
4376 && ((rtx_equal_p (XEXP (op0, 0), op1)
4377 && rtx_equal_p (XEXP (op0, 1), op2))
4378 || (rtx_equal_p (XEXP (op0, 0), op2)
4379 && rtx_equal_p (XEXP (op0, 1), op1))))
4382 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4384 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4385 ? GET_MODE (XEXP (op0, 1))
4386 : GET_MODE (XEXP (op0, 0)));
4389 /* Look for happy constants in op1 and op2. */
4390 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4392 HOST_WIDE_INT t = INTVAL (op1);
4393 HOST_WIDE_INT f = INTVAL (op2);
4395 if (t == STORE_FLAG_VALUE && f == 0)
4396 code = GET_CODE (op0);
4397 else if (t == 0 && f == STORE_FLAG_VALUE)
4400 tmp = reversed_comparison_code (op0, NULL_RTX);
4408 return simplify_gen_relational (code, mode, cmp_mode,
4409 XEXP (op0, 0), XEXP (op0, 1));
4412 if (cmp_mode == VOIDmode)
4413 cmp_mode = op0_mode;
4414 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4415 cmp_mode, XEXP (op0, 0),
4418 /* See if any simplifications were possible. */
4421 if (GET_CODE (temp) == CONST_INT)
4422 return temp == const0_rtx ? op2 : op1;
4424 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4430 gcc_assert (GET_MODE (op0) == mode);
4431 gcc_assert (GET_MODE (op1) == mode);
4432 gcc_assert (VECTOR_MODE_P (mode));
4433 op2 = avoid_constant_pool_reference (op2);
4434 if (GET_CODE (op2) == CONST_INT)
4436 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4437 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4438 int mask = (1 << n_elts) - 1;
4440 if (!(INTVAL (op2) & mask))
4442 if ((INTVAL (op2) & mask) == mask)
4445 op0 = avoid_constant_pool_reference (op0);
4446 op1 = avoid_constant_pool_reference (op1);
4447 if (GET_CODE (op0) == CONST_VECTOR
4448 && GET_CODE (op1) == CONST_VECTOR)
4450 rtvec v = rtvec_alloc (n_elts);
4453 for (i = 0; i < n_elts; i++)
4454 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4455 ? CONST_VECTOR_ELT (op0, i)
4456 : CONST_VECTOR_ELT (op1, i));
4457 return gen_rtx_CONST_VECTOR (mode, v);
4469 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4470 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4472 Works by unpacking OP into a collection of 8-bit values
4473 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4474 and then repacking them again for OUTERMODE. */
4477 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4478 enum machine_mode innermode, unsigned int byte)
4480 /* We support up to 512-bit values (for V8DFmode). */
4484 value_mask = (1 << value_bit) - 1
4486 unsigned char value[max_bitsize / value_bit];
4495 rtvec result_v = NULL;
4496 enum mode_class outer_class;
4497 enum machine_mode outer_submode;
4499 /* Some ports misuse CCmode. */
4500 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4503 /* We have no way to represent a complex constant at the rtl level. */
4504 if (COMPLEX_MODE_P (outermode))
4507 /* Unpack the value. */
4509 if (GET_CODE (op) == CONST_VECTOR)
4511 num_elem = CONST_VECTOR_NUNITS (op);
4512 elems = &CONST_VECTOR_ELT (op, 0);
4513 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4519 elem_bitsize = max_bitsize;
4521 /* If this asserts, it is too complicated; reducing value_bit may help. */
4522 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4523 /* I don't know how to handle endianness of sub-units. */
4524 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4526 for (elem = 0; elem < num_elem; elem++)
4529 rtx el = elems[elem];
4531 /* Vectors are kept in target memory order. (This is probably
4534 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4535 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4537 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4538 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4539 unsigned bytele = (subword_byte % UNITS_PER_WORD
4540 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4541 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4544 switch (GET_CODE (el))
4548 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4550 *vp++ = INTVAL (el) >> i;
4551 /* CONST_INTs are always logically sign-extended. */
4552 for (; i < elem_bitsize; i += value_bit)
4553 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4557 if (GET_MODE (el) == VOIDmode)
4559 /* If this triggers, someone should have generated a
4560 CONST_INT instead. */
4561 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4563 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4564 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4565 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4568 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4571 /* It shouldn't matter what's done here, so fill it with
4573 for (; i < elem_bitsize; i += value_bit)
4578 long tmp[max_bitsize / 32];
4579 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4581 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4582 gcc_assert (bitsize <= elem_bitsize);
4583 gcc_assert (bitsize % value_bit == 0);
4585 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4588 /* real_to_target produces its result in words affected by
4589 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4590 and use WORDS_BIG_ENDIAN instead; see the documentation
4591 of SUBREG in rtl.texi. */
4592 for (i = 0; i < bitsize; i += value_bit)
4595 if (WORDS_BIG_ENDIAN)
4596 ibase = bitsize - 1 - i;
4599 *vp++ = tmp[ibase / 32] >> i % 32;
4602 /* It shouldn't matter what's done here, so fill it with
4604 for (; i < elem_bitsize; i += value_bit)
4614 /* Now, pick the right byte to start with. */
4615 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4616 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4617 will already have offset 0. */
4618 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4620 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4622 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4623 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4624 byte = (subword_byte % UNITS_PER_WORD
4625 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4628 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4629 so if it's become negative it will instead be very large.) */
4630 gcc_assert (byte < GET_MODE_SIZE (innermode));
4632 /* Convert from bytes to chunks of size value_bit. */
4633 value_start = byte * (BITS_PER_UNIT / value_bit);
4635 /* Re-pack the value. */
4637 if (VECTOR_MODE_P (outermode))
4639 num_elem = GET_MODE_NUNITS (outermode);
4640 result_v = rtvec_alloc (num_elem);
4641 elems = &RTVEC_ELT (result_v, 0);
4642 outer_submode = GET_MODE_INNER (outermode);
4648 outer_submode = outermode;
4651 outer_class = GET_MODE_CLASS (outer_submode);
4652 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4654 gcc_assert (elem_bitsize % value_bit == 0);
4655 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4657 for (elem = 0; elem < num_elem; elem++)
4661 /* Vectors are stored in target memory order. (This is probably
4664 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4665 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4667 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4668 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4669 unsigned bytele = (subword_byte % UNITS_PER_WORD
4670 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4671 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4674 switch (outer_class)
4677 case MODE_PARTIAL_INT:
4679 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4682 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4684 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4685 for (; i < elem_bitsize; i += value_bit)
4686 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4687 << (i - HOST_BITS_PER_WIDE_INT));
4689 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4691 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4692 elems[elem] = gen_int_mode (lo, outer_submode);
4693 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4694 elems[elem] = immed_double_const (lo, hi, outer_submode);
4701 case MODE_DECIMAL_FLOAT:
4704 long tmp[max_bitsize / 32];
4706 /* real_from_target wants its input in words affected by
4707 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4708 and use WORDS_BIG_ENDIAN instead; see the documentation
4709 of SUBREG in rtl.texi. */
4710 for (i = 0; i < max_bitsize / 32; i++)
4712 for (i = 0; i < elem_bitsize; i += value_bit)
4715 if (WORDS_BIG_ENDIAN)
4716 ibase = elem_bitsize - 1 - i;
4719 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4722 real_from_target (&r, tmp, outer_submode);
4723 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4731 if (VECTOR_MODE_P (outermode))
4732 return gen_rtx_CONST_VECTOR (outermode, result_v);
4737 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4738 Return 0 if no simplifications are possible. */
4740 simplify_subreg (enum machine_mode outermode, rtx op,
4741 enum machine_mode innermode, unsigned int byte)
4743 /* Little bit of sanity checking. */
4744 gcc_assert (innermode != VOIDmode);
4745 gcc_assert (outermode != VOIDmode);
4746 gcc_assert (innermode != BLKmode);
4747 gcc_assert (outermode != BLKmode);
4749 gcc_assert (GET_MODE (op) == innermode
4750 || GET_MODE (op) == VOIDmode);
4752 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4753 gcc_assert (byte < GET_MODE_SIZE (innermode));
4755 if (outermode == innermode && !byte)
4758 if (GET_CODE (op) == CONST_INT
4759 || GET_CODE (op) == CONST_DOUBLE
4760 || GET_CODE (op) == CONST_VECTOR)
4761 return simplify_immed_subreg (outermode, op, innermode, byte);
4763 /* Changing mode twice with SUBREG => just change it once,
4764 or not at all if changing back op starting mode. */
4765 if (GET_CODE (op) == SUBREG)
4767 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4768 int final_offset = byte + SUBREG_BYTE (op);
4771 if (outermode == innermostmode
4772 && byte == 0 && SUBREG_BYTE (op) == 0)
4773 return SUBREG_REG (op);
4775 /* The SUBREG_BYTE represents offset, as if the value were stored
4776 in memory. Irritating exception is paradoxical subreg, where
4777 we define SUBREG_BYTE to be 0. On big endian machines, this
4778 value should be negative. For a moment, undo this exception. */
4779 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4781 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4782 if (WORDS_BIG_ENDIAN)
4783 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4784 if (BYTES_BIG_ENDIAN)
4785 final_offset += difference % UNITS_PER_WORD;
4787 if (SUBREG_BYTE (op) == 0
4788 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4790 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4791 if (WORDS_BIG_ENDIAN)
4792 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4793 if (BYTES_BIG_ENDIAN)
4794 final_offset += difference % UNITS_PER_WORD;
4797 /* See whether resulting subreg will be paradoxical. */
4798 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4800 /* In nonparadoxical subregs we can't handle negative offsets. */
4801 if (final_offset < 0)
4803 /* Bail out in case resulting subreg would be incorrect. */
4804 if (final_offset % GET_MODE_SIZE (outermode)
4805 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4811 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4813 /* In paradoxical subreg, see if we are still looking on lower part.
4814 If so, our SUBREG_BYTE will be 0. */
4815 if (WORDS_BIG_ENDIAN)
4816 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4817 if (BYTES_BIG_ENDIAN)
4818 offset += difference % UNITS_PER_WORD;
4819 if (offset == final_offset)
4825 /* Recurse for further possible simplifications. */
4826 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4830 if (validate_subreg (outermode, innermostmode,
4831 SUBREG_REG (op), final_offset))
4832 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4836 /* Merge implicit and explicit truncations. */
4838 if (GET_CODE (op) == TRUNCATE
4839 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4840 && subreg_lowpart_offset (outermode, innermode) == byte)
4841 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4842 GET_MODE (XEXP (op, 0)));
4844 /* SUBREG of a hard register => just change the register number
4845 and/or mode. If the hard register is not valid in that mode,
4846 suppress this simplification. If the hard register is the stack,
4847 frame, or argument pointer, leave this as a SUBREG. */
4850 && REGNO (op) < FIRST_PSEUDO_REGISTER
4851 #ifdef CANNOT_CHANGE_MODE_CLASS
4852 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4853 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4854 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4856 && ((reload_completed && !frame_pointer_needed)
4857 || (REGNO (op) != FRAME_POINTER_REGNUM
4858 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4859 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4862 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4863 && REGNO (op) != ARG_POINTER_REGNUM
4865 && REGNO (op) != STACK_POINTER_REGNUM
4866 && subreg_offset_representable_p (REGNO (op), innermode,
4869 unsigned int regno = REGNO (op);
4870 unsigned int final_regno
4871 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4873 /* ??? We do allow it if the current REG is not valid for
4874 its mode. This is a kludge to work around how float/complex
4875 arguments are passed on 32-bit SPARC and should be fixed. */
4876 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4877 || ! HARD_REGNO_MODE_OK (regno, innermode))
4880 int final_offset = byte;
4882 /* Adjust offset for paradoxical subregs. */
4884 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4886 int difference = (GET_MODE_SIZE (innermode)
4887 - GET_MODE_SIZE (outermode));
4888 if (WORDS_BIG_ENDIAN)
4889 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4890 if (BYTES_BIG_ENDIAN)
4891 final_offset += difference % UNITS_PER_WORD;
4894 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4896 /* Propagate original regno. We don't have any way to specify
4897 the offset inside original regno, so do so only for lowpart.
4898 The information is used only by alias analysis that can not
4899 grog partial register anyway. */
4901 if (subreg_lowpart_offset (outermode, innermode) == byte)
4902 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4907 /* If we have a SUBREG of a register that we are replacing and we are
4908 replacing it with a MEM, make a new MEM and try replacing the
4909 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4910 or if we would be widening it. */
4913 && ! mode_dependent_address_p (XEXP (op, 0))
4914 /* Allow splitting of volatile memory references in case we don't
4915 have instruction to move the whole thing. */
4916 && (! MEM_VOLATILE_P (op)
4917 || ! have_insn_for (SET, innermode))
4918 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4919 return adjust_address_nv (op, outermode, byte);
4921 /* Handle complex values represented as CONCAT
4922 of real and imaginary part. */
4923 if (GET_CODE (op) == CONCAT)
4925 unsigned int part_size, final_offset;
4928 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4929 if (byte < part_size)
4931 part = XEXP (op, 0);
4932 final_offset = byte;
4936 part = XEXP (op, 1);
4937 final_offset = byte - part_size;
4940 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4943 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4946 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4947 return gen_rtx_SUBREG (outermode, part, final_offset);
4951 /* Optimize SUBREG truncations of zero and sign extended values. */
4952 if ((GET_CODE (op) == ZERO_EXTEND
4953 || GET_CODE (op) == SIGN_EXTEND)
4954 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4956 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4958 /* If we're requesting the lowpart of a zero or sign extension,
4959 there are three possibilities. If the outermode is the same
4960 as the origmode, we can omit both the extension and the subreg.
4961 If the outermode is not larger than the origmode, we can apply
4962 the truncation without the extension. Finally, if the outermode
4963 is larger than the origmode, but both are integer modes, we
4964 can just extend to the appropriate mode. */
4967 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4968 if (outermode == origmode)
4969 return XEXP (op, 0);
4970 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4971 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4972 subreg_lowpart_offset (outermode,
4974 if (SCALAR_INT_MODE_P (outermode))
4975 return simplify_gen_unary (GET_CODE (op), outermode,
4976 XEXP (op, 0), origmode);
4979 /* A SUBREG resulting from a zero extension may fold to zero if
4980 it extracts higher bits that the ZERO_EXTEND's source bits. */
4981 if (GET_CODE (op) == ZERO_EXTEND
4982 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4983 return CONST0_RTX (outermode);
4986 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4987 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4988 the outer subreg is effectively a truncation to the original mode. */
4989 if ((GET_CODE (op) == LSHIFTRT
4990 || GET_CODE (op) == ASHIFTRT)
4991 && SCALAR_INT_MODE_P (outermode)
4992 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4993 to avoid the possibility that an outer LSHIFTRT shifts by more
4994 than the sign extension's sign_bit_copies and introduces zeros
4995 into the high bits of the result. */
4996 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4997 && GET_CODE (XEXP (op, 1)) == CONST_INT
4998 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4999 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5000 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5001 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5002 return simplify_gen_binary (ASHIFTRT, outermode,
5003 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5005 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5006 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5007 the outer subreg is effectively a truncation to the original mode. */
5008 if ((GET_CODE (op) == LSHIFTRT
5009 || GET_CODE (op) == ASHIFTRT)
5010 && SCALAR_INT_MODE_P (outermode)
5011 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5012 && GET_CODE (XEXP (op, 1)) == CONST_INT
5013 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5014 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5015 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5016 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5017 return simplify_gen_binary (LSHIFTRT, outermode,
5018 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5020 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5021 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5022 the outer subreg is effectively a truncation to the original mode. */
5023 if (GET_CODE (op) == ASHIFT
5024 && SCALAR_INT_MODE_P (outermode)
5025 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5026 && GET_CODE (XEXP (op, 1)) == CONST_INT
5027 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5028 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5029 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5030 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5031 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5032 return simplify_gen_binary (ASHIFT, outermode,
5033 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5038 /* Make a SUBREG operation or equivalent if it folds. */
5041 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5042 enum machine_mode innermode, unsigned int byte)
5046 newx = simplify_subreg (outermode, op, innermode, byte);
5050 if (GET_CODE (op) == SUBREG
5051 || GET_CODE (op) == CONCAT
5052 || GET_MODE (op) == VOIDmode)
5055 if (validate_subreg (outermode, innermode, op, byte))
5056 return gen_rtx_SUBREG (outermode, op, byte);
5061 /* Simplify X, an rtx expression.
5063 Return the simplified expression or NULL if no simplifications
5066 This is the preferred entry point into the simplification routines;
5067 however, we still allow passes to call the more specific routines.
5069 Right now GCC has three (yes, three) major bodies of RTL simplification
5070 code that need to be unified.
5072 1. fold_rtx in cse.c. This code uses various CSE specific
5073 information to aid in RTL simplification.
5075 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5076 it uses combine specific information to aid in RTL
5079 3. The routines in this file.
5082 Long term we want to only have one body of simplification code; to
5083 get to that state I recommend the following steps:
5085 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5086 which are not pass dependent state into these routines.
5088 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5089 use this routine whenever possible.
5091 3. Allow for pass dependent state to be provided to these
5092 routines and add simplifications based on the pass dependent
5093 state. Remove code from cse.c & combine.c that becomes
5096 It will take time, but ultimately the compiler will be easier to
5097 maintain and improve. It's totally silly that when we add a
5098 simplification that it needs to be added to 4 places (3 for RTL
5099 simplification and 1 for tree simplification. */
5102 simplify_rtx (const_rtx x)
5104 const enum rtx_code code = GET_CODE (x);
5105 const enum machine_mode mode = GET_MODE (x);
5107 switch (GET_RTX_CLASS (code))
5110 return simplify_unary_operation (code, mode,
5111 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5112 case RTX_COMM_ARITH:
5113 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5114 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5116 /* Fall through.... */
5119 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5122 case RTX_BITFIELD_OPS:
5123 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5124 XEXP (x, 0), XEXP (x, 1),
5128 case RTX_COMM_COMPARE:
5129 return simplify_relational_operation (code, mode,
5130 ((GET_MODE (XEXP (x, 0))
5132 ? GET_MODE (XEXP (x, 0))
5133 : GET_MODE (XEXP (x, 1))),
5139 return simplify_subreg (mode, SUBREG_REG (x),
5140 GET_MODE (SUBREG_REG (x)),
5147 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5148 if (GET_CODE (XEXP (x, 0)) == HIGH
5149 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))