1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
32 #include "hard-reg-set.h"
35 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
87 width = GET_MODE_BITSIZE (mode);
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x)
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
140 switch (GET_CODE (x))
146 /* Handle float extensions of constant pool references. */
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
162 if (GET_MODE (x) == BLKmode)
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr = targetm.delegitimize_address (addr);
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr) == CONST
172 && GET_CODE (XEXP (addr, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
175 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
176 addr = XEXP (XEXP (addr, 0), 0);
179 if (GET_CODE (addr) == LO_SUM)
180 addr = XEXP (addr, 1);
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr))
187 c = get_pool_constant (addr);
188 cmode = get_pool_mode (addr);
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset != 0 || cmode != GET_MODE (x))
195 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
196 if (tem && CONSTANT_P (tem))
206 /* Make a unary operation by first seeing if it folds and otherwise making
207 the specified operation. */
210 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
211 enum machine_mode op_mode)
215 /* If this simplifies, use it. */
216 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
219 return gen_rtx_fmt_e (code, mode, op);
222 /* Likewise for ternary operations. */
225 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
226 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
230 /* If this simplifies, use it. */
231 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
235 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
238 /* Likewise, for relational operations.
239 CMP_MODE specifies mode comparison is done in. */
242 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
243 enum machine_mode cmp_mode, rtx op0, rtx op1)
247 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
251 return gen_rtx_fmt_ee (code, mode, op0, op1);
254 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
255 resulting RTX. Return a new RTX which is as simplified as possible. */
258 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
260 enum rtx_code code = GET_CODE (x);
261 enum machine_mode mode = GET_MODE (x);
262 enum machine_mode op_mode;
265 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
266 to build a new expression substituting recursively. If we can't do
267 anything, return our input. */
272 switch (GET_RTX_CLASS (code))
276 op_mode = GET_MODE (op0);
277 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
278 if (op0 == XEXP (x, 0))
280 return simplify_gen_unary (code, mode, op0, op_mode);
284 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
285 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
288 return simplify_gen_binary (code, mode, op0, op1);
291 case RTX_COMM_COMPARE:
294 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
295 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
296 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
297 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
299 return simplify_gen_relational (code, mode, op_mode, op0, op1);
302 case RTX_BITFIELD_OPS:
304 op_mode = GET_MODE (op0);
305 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
306 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
307 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
308 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
310 if (op_mode == VOIDmode)
311 op_mode = GET_MODE (op0);
312 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
315 /* The only case we try to handle is a SUBREG. */
318 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
319 if (op0 == SUBREG_REG (x))
321 op0 = simplify_gen_subreg (GET_MODE (x), op0,
322 GET_MODE (SUBREG_REG (x)),
324 return op0 ? op0 : x;
331 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
332 if (op0 == XEXP (x, 0))
334 return replace_equiv_address_nv (x, op0);
336 else if (code == LO_SUM)
338 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
339 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
341 /* (lo_sum (high x) x) -> x */
342 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
345 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
347 return gen_rtx_LO_SUM (mode, op0, op1);
349 else if (code == REG)
351 if (rtx_equal_p (x, old_rtx))
362 /* Try to simplify a unary operation CODE whose output mode is to be
363 MODE with input operand OP whose mode was originally OP_MODE.
364 Return zero if no simplification can be made. */
366 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
367 rtx op, enum machine_mode op_mode)
371 if (GET_CODE (op) == CONST)
374 trueop = avoid_constant_pool_reference (op);
376 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
380 return simplify_unary_operation_1 (code, mode, op);
383 /* Perform some simplifications we can do even if the operands
386 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
388 enum rtx_code reversed;
394 /* (not (not X)) == X. */
395 if (GET_CODE (op) == NOT)
398 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
399 comparison is all ones. */
400 if (COMPARISON_P (op)
401 && (mode == BImode || STORE_FLAG_VALUE == -1)
402 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
403 return simplify_gen_relational (reversed, mode, VOIDmode,
404 XEXP (op, 0), XEXP (op, 1));
406 /* (not (plus X -1)) can become (neg X). */
407 if (GET_CODE (op) == PLUS
408 && XEXP (op, 1) == constm1_rtx)
409 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
411 /* Similarly, (not (neg X)) is (plus X -1). */
412 if (GET_CODE (op) == NEG)
413 return plus_constant (XEXP (op, 0), -1);
415 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
416 if (GET_CODE (op) == XOR
417 && GET_CODE (XEXP (op, 1)) == CONST_INT
418 && (temp = simplify_unary_operation (NOT, mode,
419 XEXP (op, 1), mode)) != 0)
420 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
422 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
423 if (GET_CODE (op) == PLUS
424 && GET_CODE (XEXP (op, 1)) == CONST_INT
425 && mode_signbit_p (mode, XEXP (op, 1))
426 && (temp = simplify_unary_operation (NOT, mode,
427 XEXP (op, 1), mode)) != 0)
428 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
431 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
432 operands other than 1, but that is not valid. We could do a
433 similar simplification for (not (lshiftrt C X)) where C is
434 just the sign bit, but this doesn't seem common enough to
436 if (GET_CODE (op) == ASHIFT
437 && XEXP (op, 0) == const1_rtx)
439 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
440 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
443 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
444 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
445 so we can perform the above simplification. */
447 if (STORE_FLAG_VALUE == -1
448 && GET_CODE (op) == ASHIFTRT
449 && GET_CODE (XEXP (op, 1)) == CONST_INT
450 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
451 return simplify_gen_relational (GE, mode, VOIDmode,
452 XEXP (op, 0), const0_rtx);
455 if (GET_CODE (op) == SUBREG
456 && subreg_lowpart_p (op)
457 && (GET_MODE_SIZE (GET_MODE (op))
458 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
459 && GET_CODE (SUBREG_REG (op)) == ASHIFT
460 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
462 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
465 x = gen_rtx_ROTATE (inner_mode,
466 simplify_gen_unary (NOT, inner_mode, const1_rtx,
468 XEXP (SUBREG_REG (op), 1));
469 return rtl_hooks.gen_lowpart_no_emit (mode, x);
472 /* Apply De Morgan's laws to reduce number of patterns for machines
473 with negating logical insns (and-not, nand, etc.). If result has
474 only one NOT, put it first, since that is how the patterns are
477 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
479 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
480 enum machine_mode op_mode;
482 op_mode = GET_MODE (in1);
483 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
485 op_mode = GET_MODE (in2);
486 if (op_mode == VOIDmode)
488 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
490 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
493 in2 = in1; in1 = tem;
496 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
502 /* (neg (neg X)) == X. */
503 if (GET_CODE (op) == NEG)
506 /* (neg (plus X 1)) can become (not X). */
507 if (GET_CODE (op) == PLUS
508 && XEXP (op, 1) == const1_rtx)
509 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
511 /* Similarly, (neg (not X)) is (plus X 1). */
512 if (GET_CODE (op) == NOT)
513 return plus_constant (XEXP (op, 0), 1);
515 /* (neg (minus X Y)) can become (minus Y X). This transformation
516 isn't safe for modes with signed zeros, since if X and Y are
517 both +0, (minus Y X) is the same as (minus X Y). If the
518 rounding mode is towards +infinity (or -infinity) then the two
519 expressions will be rounded differently. */
520 if (GET_CODE (op) == MINUS
521 && !HONOR_SIGNED_ZEROS (mode)
522 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
523 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
525 if (GET_CODE (op) == PLUS
526 && !HONOR_SIGNED_ZEROS (mode)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
529 /* (neg (plus A C)) is simplified to (minus -C A). */
530 if (GET_CODE (XEXP (op, 1)) == CONST_INT
531 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
533 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
535 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
538 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
539 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
540 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
543 /* (neg (mult A B)) becomes (mult (neg A) B).
544 This works even for floating-point values. */
545 if (GET_CODE (op) == MULT
546 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
548 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
549 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
552 /* NEG commutes with ASHIFT since it is multiplication. Only do
553 this if we can then eliminate the NEG (e.g., if the operand
555 if (GET_CODE (op) == ASHIFT)
557 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
559 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
562 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
563 C is equal to the width of MODE minus 1. */
564 if (GET_CODE (op) == ASHIFTRT
565 && GET_CODE (XEXP (op, 1)) == CONST_INT
566 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
567 return simplify_gen_binary (LSHIFTRT, mode,
568 XEXP (op, 0), XEXP (op, 1));
570 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op) == LSHIFTRT
573 && GET_CODE (XEXP (op, 1)) == CONST_INT
574 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
575 return simplify_gen_binary (ASHIFTRT, mode,
576 XEXP (op, 0), XEXP (op, 1));
578 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
579 if (GET_CODE (op) == XOR
580 && XEXP (op, 1) == const1_rtx
581 && nonzero_bits (XEXP (op, 0), mode) == 1)
582 return plus_constant (XEXP (op, 0), -1);
584 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
585 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
586 if (GET_CODE (op) == LT
587 && XEXP (op, 1) == const0_rtx)
589 enum machine_mode inner = GET_MODE (XEXP (op, 0));
590 int isize = GET_MODE_BITSIZE (inner);
591 if (STORE_FLAG_VALUE == 1)
593 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
594 GEN_INT (isize - 1));
597 if (GET_MODE_BITSIZE (mode) > isize)
598 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
599 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
601 else if (STORE_FLAG_VALUE == -1)
603 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
604 GEN_INT (isize - 1));
607 if (GET_MODE_BITSIZE (mode) > isize)
608 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
609 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
615 /* We can't handle truncation to a partial integer mode here
616 because we don't know the real bitsize of the partial
618 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
621 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
622 if ((GET_CODE (op) == SIGN_EXTEND
623 || GET_CODE (op) == ZERO_EXTEND)
624 && GET_MODE (XEXP (op, 0)) == mode)
627 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
628 (OP:SI foo:SI) if OP is NEG or ABS. */
629 if ((GET_CODE (op) == ABS
630 || GET_CODE (op) == NEG)
631 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
632 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
633 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
634 return simplify_gen_unary (GET_CODE (op), mode,
635 XEXP (XEXP (op, 0), 0), mode);
637 /* (truncate:A (subreg:B (truncate:C X) 0)) is
639 if (GET_CODE (op) == SUBREG
640 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
641 && subreg_lowpart_p (op))
642 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
643 GET_MODE (XEXP (SUBREG_REG (op), 0)));
645 /* If we know that the value is already truncated, we can
646 replace the TRUNCATE with a SUBREG. Note that this is also
647 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
648 modes we just have to apply a different definition for
649 truncation. But don't do this for an (LSHIFTRT (MULT ...))
650 since this will cause problems with the umulXi3_highpart
652 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
653 GET_MODE_BITSIZE (GET_MODE (op)))
654 ? (num_sign_bit_copies (op, GET_MODE (op))
655 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
656 - GET_MODE_BITSIZE (mode)))
657 : truncated_to_mode (mode, op))
658 && ! (GET_CODE (op) == LSHIFTRT
659 && GET_CODE (XEXP (op, 0)) == MULT))
660 return rtl_hooks.gen_lowpart_no_emit (mode, op);
662 /* A truncate of a comparison can be replaced with a subreg if
663 STORE_FLAG_VALUE permits. This is like the previous test,
664 but it works even if the comparison is done in a mode larger
665 than HOST_BITS_PER_WIDE_INT. */
666 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
668 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
669 return rtl_hooks.gen_lowpart_no_emit (mode, op);
673 if (DECIMAL_FLOAT_MODE_P (mode))
676 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
677 if (GET_CODE (op) == FLOAT_EXTEND
678 && GET_MODE (XEXP (op, 0)) == mode)
681 /* (float_truncate:SF (float_truncate:DF foo:XF))
682 = (float_truncate:SF foo:XF).
683 This may eliminate double rounding, so it is unsafe.
685 (float_truncate:SF (float_extend:XF foo:DF))
686 = (float_truncate:SF foo:DF).
688 (float_truncate:DF (float_extend:XF foo:SF))
689 = (float_extend:SF foo:DF). */
690 if ((GET_CODE (op) == FLOAT_TRUNCATE
691 && flag_unsafe_math_optimizations)
692 || GET_CODE (op) == FLOAT_EXTEND)
693 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
695 > GET_MODE_SIZE (mode)
696 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
700 /* (float_truncate (float x)) is (float x) */
701 if (GET_CODE (op) == FLOAT
702 && (flag_unsafe_math_optimizations
703 || ((unsigned)significand_size (GET_MODE (op))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
705 - num_sign_bit_copies (XEXP (op, 0),
706 GET_MODE (XEXP (op, 0)))))))
707 return simplify_gen_unary (FLOAT, mode,
709 GET_MODE (XEXP (op, 0)));
711 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
712 (OP:SF foo:SF) if OP is NEG or ABS. */
713 if ((GET_CODE (op) == ABS
714 || GET_CODE (op) == NEG)
715 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
717 return simplify_gen_unary (GET_CODE (op), mode,
718 XEXP (XEXP (op, 0), 0), mode);
720 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
721 is (float_truncate:SF x). */
722 if (GET_CODE (op) == SUBREG
723 && subreg_lowpart_p (op)
724 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
725 return SUBREG_REG (op);
729 if (DECIMAL_FLOAT_MODE_P (mode))
732 /* (float_extend (float_extend x)) is (float_extend x)
734 (float_extend (float x)) is (float x) assuming that double
735 rounding can't happen.
737 if (GET_CODE (op) == FLOAT_EXTEND
738 || (GET_CODE (op) == FLOAT
739 && ((unsigned)significand_size (GET_MODE (op))
740 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
741 - num_sign_bit_copies (XEXP (op, 0),
742 GET_MODE (XEXP (op, 0)))))))
743 return simplify_gen_unary (GET_CODE (op), mode,
745 GET_MODE (XEXP (op, 0)));
750 /* (abs (neg <foo>)) -> (abs <foo>) */
751 if (GET_CODE (op) == NEG)
752 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
753 GET_MODE (XEXP (op, 0)));
755 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
757 if (GET_MODE (op) == VOIDmode)
760 /* If operand is something known to be positive, ignore the ABS. */
761 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
762 || ((GET_MODE_BITSIZE (GET_MODE (op))
763 <= HOST_BITS_PER_WIDE_INT)
764 && ((nonzero_bits (op, GET_MODE (op))
766 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
770 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
771 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
772 return gen_rtx_NEG (mode, op);
777 /* (ffs (*_extend <X>)) = (ffs <X>) */
778 if (GET_CODE (op) == SIGN_EXTEND
779 || GET_CODE (op) == ZERO_EXTEND)
780 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
781 GET_MODE (XEXP (op, 0)));
785 switch (GET_CODE (op))
789 /* (popcount (zero_extend <X>)) = (popcount <X>) */
790 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
791 GET_MODE (XEXP (op, 0)));
795 /* Rotations don't affect popcount. */
796 if (!side_effects_p (XEXP (op, 1)))
797 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
798 GET_MODE (XEXP (op, 0)));
807 switch (GET_CODE (op))
813 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
814 GET_MODE (XEXP (op, 0)));
818 /* Rotations don't affect parity. */
819 if (!side_effects_p (XEXP (op, 1)))
820 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
821 GET_MODE (XEXP (op, 0)));
830 /* (bswap (bswap x)) -> x. */
831 if (GET_CODE (op) == BSWAP)
836 /* (float (sign_extend <X>)) = (float <X>). */
837 if (GET_CODE (op) == SIGN_EXTEND)
838 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
839 GET_MODE (XEXP (op, 0)));
843 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
844 becomes just the MINUS if its mode is MODE. This allows
845 folding switch statements on machines using casesi (such as
847 if (GET_CODE (op) == TRUNCATE
848 && GET_MODE (XEXP (op, 0)) == mode
849 && GET_CODE (XEXP (op, 0)) == MINUS
850 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
851 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
854 /* Check for a sign extension of a subreg of a promoted
855 variable, where the promotion is sign-extended, and the
856 target mode is the same as the variable's promotion. */
857 if (GET_CODE (op) == SUBREG
858 && SUBREG_PROMOTED_VAR_P (op)
859 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
860 && GET_MODE (XEXP (op, 0)) == mode)
863 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
864 if (! POINTERS_EXTEND_UNSIGNED
865 && mode == Pmode && GET_MODE (op) == ptr_mode
867 || (GET_CODE (op) == SUBREG
868 && REG_P (SUBREG_REG (op))
869 && REG_POINTER (SUBREG_REG (op))
870 && GET_MODE (SUBREG_REG (op)) == Pmode)))
871 return convert_memory_address (Pmode, op);
876 /* Check for a zero extension of a subreg of a promoted
877 variable, where the promotion is zero-extended, and the
878 target mode is the same as the variable's promotion. */
879 if (GET_CODE (op) == SUBREG
880 && SUBREG_PROMOTED_VAR_P (op)
881 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
882 && GET_MODE (XEXP (op, 0)) == mode)
885 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
886 if (POINTERS_EXTEND_UNSIGNED > 0
887 && mode == Pmode && GET_MODE (op) == ptr_mode
889 || (GET_CODE (op) == SUBREG
890 && REG_P (SUBREG_REG (op))
891 && REG_POINTER (SUBREG_REG (op))
892 && GET_MODE (SUBREG_REG (op)) == Pmode)))
893 return convert_memory_address (Pmode, op);
904 /* Try to compute the value of a unary operation CODE whose output mode is to
905 be MODE with input operand OP whose mode was originally OP_MODE.
906 Return zero if the value cannot be computed. */
908 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
909 rtx op, enum machine_mode op_mode)
911 unsigned int width = GET_MODE_BITSIZE (mode);
913 if (code == VEC_DUPLICATE)
915 gcc_assert (VECTOR_MODE_P (mode));
916 if (GET_MODE (op) != VOIDmode)
918 if (!VECTOR_MODE_P (GET_MODE (op)))
919 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
921 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
924 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
925 || GET_CODE (op) == CONST_VECTOR)
927 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
928 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
929 rtvec v = rtvec_alloc (n_elts);
932 if (GET_CODE (op) != CONST_VECTOR)
933 for (i = 0; i < n_elts; i++)
934 RTVEC_ELT (v, i) = op;
937 enum machine_mode inmode = GET_MODE (op);
938 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
939 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
941 gcc_assert (in_n_elts < n_elts);
942 gcc_assert ((n_elts % in_n_elts) == 0);
943 for (i = 0; i < n_elts; i++)
944 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
946 return gen_rtx_CONST_VECTOR (mode, v);
950 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
952 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
953 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
954 enum machine_mode opmode = GET_MODE (op);
955 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
956 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
957 rtvec v = rtvec_alloc (n_elts);
960 gcc_assert (op_n_elts == n_elts);
961 for (i = 0; i < n_elts; i++)
963 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
964 CONST_VECTOR_ELT (op, i),
965 GET_MODE_INNER (opmode));
968 RTVEC_ELT (v, i) = x;
970 return gen_rtx_CONST_VECTOR (mode, v);
973 /* The order of these tests is critical so that, for example, we don't
974 check the wrong mode (input vs. output) for a conversion operation,
975 such as FIX. At some point, this should be simplified. */
977 if (code == FLOAT && GET_MODE (op) == VOIDmode
978 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
980 HOST_WIDE_INT hv, lv;
983 if (GET_CODE (op) == CONST_INT)
984 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
986 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
988 REAL_VALUE_FROM_INT (d, lv, hv, mode);
989 d = real_value_truncate (mode, d);
990 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
992 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
993 && (GET_CODE (op) == CONST_DOUBLE
994 || GET_CODE (op) == CONST_INT))
996 HOST_WIDE_INT hv, lv;
999 if (GET_CODE (op) == CONST_INT)
1000 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1002 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1004 if (op_mode == VOIDmode)
1006 /* We don't know how to interpret negative-looking numbers in
1007 this case, so don't try to fold those. */
1011 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1014 hv = 0, lv &= GET_MODE_MASK (op_mode);
1016 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1017 d = real_value_truncate (mode, d);
1018 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1021 if (GET_CODE (op) == CONST_INT
1022 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1024 HOST_WIDE_INT arg0 = INTVAL (op);
1038 val = (arg0 >= 0 ? arg0 : - arg0);
1042 /* Don't use ffs here. Instead, get low order bit and then its
1043 number. If arg0 is zero, this will return 0, as desired. */
1044 arg0 &= GET_MODE_MASK (mode);
1045 val = exact_log2 (arg0 & (- arg0)) + 1;
1049 arg0 &= GET_MODE_MASK (mode);
1050 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1053 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1057 arg0 &= GET_MODE_MASK (mode);
1060 /* Even if the value at zero is undefined, we have to come
1061 up with some replacement. Seems good enough. */
1062 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1063 val = GET_MODE_BITSIZE (mode);
1066 val = exact_log2 (arg0 & -arg0);
1070 arg0 &= GET_MODE_MASK (mode);
1073 val++, arg0 &= arg0 - 1;
1077 arg0 &= GET_MODE_MASK (mode);
1080 val++, arg0 &= arg0 - 1;
1089 for (s = 0; s < width; s += 8)
1091 unsigned int d = width - s - 8;
1092 unsigned HOST_WIDE_INT byte;
1093 byte = (arg0 >> s) & 0xff;
1104 /* When zero-extending a CONST_INT, we need to know its
1106 gcc_assert (op_mode != VOIDmode);
1107 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1109 /* If we were really extending the mode,
1110 we would have to distinguish between zero-extension
1111 and sign-extension. */
1112 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1115 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1116 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1122 if (op_mode == VOIDmode)
1124 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1126 /* If we were really extending the mode,
1127 we would have to distinguish between zero-extension
1128 and sign-extension. */
1129 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1132 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1135 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1137 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1138 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1146 case FLOAT_TRUNCATE:
1156 return gen_int_mode (val, mode);
1159 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1160 for a DImode operation on a CONST_INT. */
1161 else if (GET_MODE (op) == VOIDmode
1162 && width <= HOST_BITS_PER_WIDE_INT * 2
1163 && (GET_CODE (op) == CONST_DOUBLE
1164 || GET_CODE (op) == CONST_INT))
1166 unsigned HOST_WIDE_INT l1, lv;
1167 HOST_WIDE_INT h1, hv;
1169 if (GET_CODE (op) == CONST_DOUBLE)
1170 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1172 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1182 neg_double (l1, h1, &lv, &hv);
1187 neg_double (l1, h1, &lv, &hv);
1199 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1202 lv = exact_log2 (l1 & -l1) + 1;
1208 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1209 - HOST_BITS_PER_WIDE_INT;
1211 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1212 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1213 lv = GET_MODE_BITSIZE (mode);
1219 lv = exact_log2 (l1 & -l1);
1221 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1222 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1223 lv = GET_MODE_BITSIZE (mode);
1251 for (s = 0; s < width; s += 8)
1253 unsigned int d = width - s - 8;
1254 unsigned HOST_WIDE_INT byte;
1256 if (s < HOST_BITS_PER_WIDE_INT)
1257 byte = (l1 >> s) & 0xff;
1259 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1261 if (d < HOST_BITS_PER_WIDE_INT)
1264 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1270 /* This is just a change-of-mode, so do nothing. */
1275 gcc_assert (op_mode != VOIDmode);
1277 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1281 lv = l1 & GET_MODE_MASK (op_mode);
1285 if (op_mode == VOIDmode
1286 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1290 lv = l1 & GET_MODE_MASK (op_mode);
1291 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1292 && (lv & ((HOST_WIDE_INT) 1
1293 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1294 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1296 hv = HWI_SIGN_EXTEND (lv);
1307 return immed_double_const (lv, hv, mode);
1310 else if (GET_CODE (op) == CONST_DOUBLE
1311 && SCALAR_FLOAT_MODE_P (mode))
1313 REAL_VALUE_TYPE d, t;
1314 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1319 if (HONOR_SNANS (mode) && real_isnan (&d))
1321 real_sqrt (&t, mode, &d);
1325 d = REAL_VALUE_ABS (d);
1328 d = REAL_VALUE_NEGATE (d);
1330 case FLOAT_TRUNCATE:
1331 d = real_value_truncate (mode, d);
1334 /* All this does is change the mode. */
1337 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1344 real_to_target (tmp, &d, GET_MODE (op));
1345 for (i = 0; i < 4; i++)
1347 real_from_target (&d, tmp, mode);
1353 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1356 else if (GET_CODE (op) == CONST_DOUBLE
1357 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1358 && GET_MODE_CLASS (mode) == MODE_INT
1359 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1361 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1362 operators are intentionally left unspecified (to ease implementation
1363 by target backends), for consistency, this routine implements the
1364 same semantics for constant folding as used by the middle-end. */
1366 /* This was formerly used only for non-IEEE float.
1367 eggert@twinsun.com says it is safe for IEEE also. */
1368 HOST_WIDE_INT xh, xl, th, tl;
1369 REAL_VALUE_TYPE x, t;
1370 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1374 if (REAL_VALUE_ISNAN (x))
1377 /* Test against the signed upper bound. */
1378 if (width > HOST_BITS_PER_WIDE_INT)
1380 th = ((unsigned HOST_WIDE_INT) 1
1381 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1387 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1389 real_from_integer (&t, VOIDmode, tl, th, 0);
1390 if (REAL_VALUES_LESS (t, x))
1397 /* Test against the signed lower bound. */
1398 if (width > HOST_BITS_PER_WIDE_INT)
1400 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1406 tl = (HOST_WIDE_INT) -1 << (width - 1);
1408 real_from_integer (&t, VOIDmode, tl, th, 0);
1409 if (REAL_VALUES_LESS (x, t))
1415 REAL_VALUE_TO_INT (&xl, &xh, x);
1419 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1422 /* Test against the unsigned upper bound. */
1423 if (width == 2*HOST_BITS_PER_WIDE_INT)
1428 else if (width >= HOST_BITS_PER_WIDE_INT)
1430 th = ((unsigned HOST_WIDE_INT) 1
1431 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1437 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1439 real_from_integer (&t, VOIDmode, tl, th, 1);
1440 if (REAL_VALUES_LESS (t, x))
1447 REAL_VALUE_TO_INT (&xl, &xh, x);
1453 return immed_double_const (xl, xh, mode);
1459 /* Subroutine of simplify_binary_operation to simplify a commutative,
1460 associative binary operation CODE with result mode MODE, operating
1461 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1462 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1463 canonicalization is possible. */
1466 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1471 /* Linearize the operator to the left. */
1472 if (GET_CODE (op1) == code)
1474 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1475 if (GET_CODE (op0) == code)
1477 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1478 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1481 /* "a op (b op c)" becomes "(b op c) op a". */
1482 if (! swap_commutative_operands_p (op1, op0))
1483 return simplify_gen_binary (code, mode, op1, op0);
1490 if (GET_CODE (op0) == code)
1492 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1493 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1495 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1496 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1499 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1500 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1501 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1502 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1504 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1506 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1507 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1508 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1509 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1511 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1518 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1519 and OP1. Return 0 if no simplification is possible.
1521 Don't use this for relational operations such as EQ or LT.
1522 Use simplify_relational_operation instead. */
1524 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1527 rtx trueop0, trueop1;
1530 /* Relational operations don't work here. We must know the mode
1531 of the operands in order to do the comparison correctly.
1532 Assuming a full word can give incorrect results.
1533 Consider comparing 128 with -128 in QImode. */
1534 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1535 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1537 /* Make sure the constant is second. */
1538 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1539 && swap_commutative_operands_p (op0, op1))
1541 tem = op0, op0 = op1, op1 = tem;
1544 trueop0 = avoid_constant_pool_reference (op0);
1545 trueop1 = avoid_constant_pool_reference (op1);
1547 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1550 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1553 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1554 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1555 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1556 actual constants. */
1559 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1560 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1562 rtx tem, reversed, opleft, opright;
1564 unsigned int width = GET_MODE_BITSIZE (mode);
1566 /* Even if we can't compute a constant result,
1567 there are some cases worth simplifying. */
1572 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1573 when x is NaN, infinite, or finite and nonzero. They aren't
1574 when x is -0 and the rounding mode is not towards -infinity,
1575 since (-0) + 0 is then 0. */
1576 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1579 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1580 transformations are safe even for IEEE. */
1581 if (GET_CODE (op0) == NEG)
1582 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1583 else if (GET_CODE (op1) == NEG)
1584 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1586 /* (~a) + 1 -> -a */
1587 if (INTEGRAL_MODE_P (mode)
1588 && GET_CODE (op0) == NOT
1589 && trueop1 == const1_rtx)
1590 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1592 /* Handle both-operands-constant cases. We can only add
1593 CONST_INTs to constants since the sum of relocatable symbols
1594 can't be handled by most assemblers. Don't add CONST_INT
1595 to CONST_INT since overflow won't be computed properly if wider
1596 than HOST_BITS_PER_WIDE_INT. */
1598 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1599 && GET_CODE (op1) == CONST_INT)
1600 return plus_constant (op0, INTVAL (op1));
1601 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1602 && GET_CODE (op0) == CONST_INT)
1603 return plus_constant (op1, INTVAL (op0));
1605 /* See if this is something like X * C - X or vice versa or
1606 if the multiplication is written as a shift. If so, we can
1607 distribute and make a new multiply, shift, or maybe just
1608 have X (if C is 2 in the example above). But don't make
1609 something more expensive than we had before. */
1611 if (SCALAR_INT_MODE_P (mode))
1613 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1614 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1615 rtx lhs = op0, rhs = op1;
1617 if (GET_CODE (lhs) == NEG)
1621 lhs = XEXP (lhs, 0);
1623 else if (GET_CODE (lhs) == MULT
1624 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1626 coeff0l = INTVAL (XEXP (lhs, 1));
1627 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1628 lhs = XEXP (lhs, 0);
1630 else if (GET_CODE (lhs) == ASHIFT
1631 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1632 && INTVAL (XEXP (lhs, 1)) >= 0
1633 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1635 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1637 lhs = XEXP (lhs, 0);
1640 if (GET_CODE (rhs) == NEG)
1644 rhs = XEXP (rhs, 0);
1646 else if (GET_CODE (rhs) == MULT
1647 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1649 coeff1l = INTVAL (XEXP (rhs, 1));
1650 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1651 rhs = XEXP (rhs, 0);
1653 else if (GET_CODE (rhs) == ASHIFT
1654 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1655 && INTVAL (XEXP (rhs, 1)) >= 0
1656 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1658 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1660 rhs = XEXP (rhs, 0);
1663 if (rtx_equal_p (lhs, rhs))
1665 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1667 unsigned HOST_WIDE_INT l;
1670 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1671 coeff = immed_double_const (l, h, mode);
1673 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1674 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1679 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1680 if ((GET_CODE (op1) == CONST_INT
1681 || GET_CODE (op1) == CONST_DOUBLE)
1682 && GET_CODE (op0) == XOR
1683 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1684 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1685 && mode_signbit_p (mode, op1))
1686 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1687 simplify_gen_binary (XOR, mode, op1,
1690 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1691 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1692 && GET_CODE (op0) == MULT
1693 && GET_CODE (XEXP (op0, 0)) == NEG)
1697 in1 = XEXP (XEXP (op0, 0), 0);
1698 in2 = XEXP (op0, 1);
1699 return simplify_gen_binary (MINUS, mode, op1,
1700 simplify_gen_binary (MULT, mode,
1704 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1705 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1707 if (COMPARISON_P (op0)
1708 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1709 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1710 && (reversed = reversed_comparison (op0, mode)))
1712 simplify_gen_unary (NEG, mode, reversed, mode);
1714 /* If one of the operands is a PLUS or a MINUS, see if we can
1715 simplify this by the associative law.
1716 Don't use the associative law for floating point.
1717 The inaccuracy makes it nonassociative,
1718 and subtle programs can break if operations are associated. */
1720 if (INTEGRAL_MODE_P (mode)
1721 && (plus_minus_operand_p (op0)
1722 || plus_minus_operand_p (op1))
1723 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1726 /* Reassociate floating point addition only when the user
1727 specifies unsafe math optimizations. */
1728 if (FLOAT_MODE_P (mode)
1729 && flag_unsafe_math_optimizations)
1731 tem = simplify_associative_operation (code, mode, op0, op1);
1739 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1740 using cc0, in which case we want to leave it as a COMPARE
1741 so we can distinguish it from a register-register-copy.
1743 In IEEE floating point, x-0 is not the same as x. */
1745 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1746 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1747 && trueop1 == CONST0_RTX (mode))
1751 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1752 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1753 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1754 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1756 rtx xop00 = XEXP (op0, 0);
1757 rtx xop10 = XEXP (op1, 0);
1760 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1762 if (REG_P (xop00) && REG_P (xop10)
1763 && GET_MODE (xop00) == GET_MODE (xop10)
1764 && REGNO (xop00) == REGNO (xop10)
1765 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1766 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1773 /* We can't assume x-x is 0 even with non-IEEE floating point,
1774 but since it is zero except in very strange circumstances, we
1775 will treat it as zero with -funsafe-math-optimizations and
1776 -ffinite-math-only. */
1777 if (rtx_equal_p (trueop0, trueop1)
1778 && ! side_effects_p (op0)
1779 && (! FLOAT_MODE_P (mode)
1780 || (flag_unsafe_math_optimizations
1781 && !HONOR_NANS (mode)
1782 && !HONOR_INFINITIES (mode))))
1783 return CONST0_RTX (mode);
1785 /* Change subtraction from zero into negation. (0 - x) is the
1786 same as -x when x is NaN, infinite, or finite and nonzero.
1787 But if the mode has signed zeros, and does not round towards
1788 -infinity, then 0 - 0 is 0, not -0. */
1789 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1790 return simplify_gen_unary (NEG, mode, op1, mode);
1792 /* (-1 - a) is ~a. */
1793 if (trueop0 == constm1_rtx)
1794 return simplify_gen_unary (NOT, mode, op1, mode);
1796 /* Subtracting 0 has no effect unless the mode has signed zeros
1797 and supports rounding towards -infinity. In such a case,
1799 if (!(HONOR_SIGNED_ZEROS (mode)
1800 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1801 && trueop1 == CONST0_RTX (mode))
1804 /* See if this is something like X * C - X or vice versa or
1805 if the multiplication is written as a shift. If so, we can
1806 distribute and make a new multiply, shift, or maybe just
1807 have X (if C is 2 in the example above). But don't make
1808 something more expensive than we had before. */
1810 if (SCALAR_INT_MODE_P (mode))
1812 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1813 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1814 rtx lhs = op0, rhs = op1;
1816 if (GET_CODE (lhs) == NEG)
1820 lhs = XEXP (lhs, 0);
1822 else if (GET_CODE (lhs) == MULT
1823 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1825 coeff0l = INTVAL (XEXP (lhs, 1));
1826 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1827 lhs = XEXP (lhs, 0);
1829 else if (GET_CODE (lhs) == ASHIFT
1830 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1831 && INTVAL (XEXP (lhs, 1)) >= 0
1832 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1834 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1836 lhs = XEXP (lhs, 0);
1839 if (GET_CODE (rhs) == NEG)
1843 rhs = XEXP (rhs, 0);
1845 else if (GET_CODE (rhs) == MULT
1846 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1848 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1849 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1850 rhs = XEXP (rhs, 0);
1852 else if (GET_CODE (rhs) == ASHIFT
1853 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1854 && INTVAL (XEXP (rhs, 1)) >= 0
1855 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1857 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1859 rhs = XEXP (rhs, 0);
1862 if (rtx_equal_p (lhs, rhs))
1864 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1866 unsigned HOST_WIDE_INT l;
1869 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1870 coeff = immed_double_const (l, h, mode);
1872 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1873 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1878 /* (a - (-b)) -> (a + b). True even for IEEE. */
1879 if (GET_CODE (op1) == NEG)
1880 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1882 /* (-x - c) may be simplified as (-c - x). */
1883 if (GET_CODE (op0) == NEG
1884 && (GET_CODE (op1) == CONST_INT
1885 || GET_CODE (op1) == CONST_DOUBLE))
1887 tem = simplify_unary_operation (NEG, mode, op1, mode);
1889 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1892 /* Don't let a relocatable value get a negative coeff. */
1893 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1894 return simplify_gen_binary (PLUS, mode,
1896 neg_const_int (mode, op1));
1898 /* (x - (x & y)) -> (x & ~y) */
1899 if (GET_CODE (op1) == AND)
1901 if (rtx_equal_p (op0, XEXP (op1, 0)))
1903 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1904 GET_MODE (XEXP (op1, 1)));
1905 return simplify_gen_binary (AND, mode, op0, tem);
1907 if (rtx_equal_p (op0, XEXP (op1, 1)))
1909 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1910 GET_MODE (XEXP (op1, 0)));
1911 return simplify_gen_binary (AND, mode, op0, tem);
1915 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1916 by reversing the comparison code if valid. */
1917 if (STORE_FLAG_VALUE == 1
1918 && trueop0 == const1_rtx
1919 && COMPARISON_P (op1)
1920 && (reversed = reversed_comparison (op1, mode)))
1923 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1924 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1925 && GET_CODE (op1) == MULT
1926 && GET_CODE (XEXP (op1, 0)) == NEG)
1930 in1 = XEXP (XEXP (op1, 0), 0);
1931 in2 = XEXP (op1, 1);
1932 return simplify_gen_binary (PLUS, mode,
1933 simplify_gen_binary (MULT, mode,
1938 /* Canonicalize (minus (neg A) (mult B C)) to
1939 (minus (mult (neg B) C) A). */
1940 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1941 && GET_CODE (op1) == MULT
1942 && GET_CODE (op0) == NEG)
1946 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1947 in2 = XEXP (op1, 1);
1948 return simplify_gen_binary (MINUS, mode,
1949 simplify_gen_binary (MULT, mode,
1954 /* If one of the operands is a PLUS or a MINUS, see if we can
1955 simplify this by the associative law. This will, for example,
1956 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1957 Don't use the associative law for floating point.
1958 The inaccuracy makes it nonassociative,
1959 and subtle programs can break if operations are associated. */
1961 if (INTEGRAL_MODE_P (mode)
1962 && (plus_minus_operand_p (op0)
1963 || plus_minus_operand_p (op1))
1964 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1969 if (trueop1 == constm1_rtx)
1970 return simplify_gen_unary (NEG, mode, op0, mode);
1972 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1973 x is NaN, since x * 0 is then also NaN. Nor is it valid
1974 when the mode has signed zeros, since multiplying a negative
1975 number by 0 will give -0, not 0. */
1976 if (!HONOR_NANS (mode)
1977 && !HONOR_SIGNED_ZEROS (mode)
1978 && trueop1 == CONST0_RTX (mode)
1979 && ! side_effects_p (op0))
1982 /* In IEEE floating point, x*1 is not equivalent to x for
1984 if (!HONOR_SNANS (mode)
1985 && trueop1 == CONST1_RTX (mode))
1988 /* Convert multiply by constant power of two into shift unless
1989 we are still generating RTL. This test is a kludge. */
1990 if (GET_CODE (trueop1) == CONST_INT
1991 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1992 /* If the mode is larger than the host word size, and the
1993 uppermost bit is set, then this isn't a power of two due
1994 to implicit sign extension. */
1995 && (width <= HOST_BITS_PER_WIDE_INT
1996 || val != HOST_BITS_PER_WIDE_INT - 1))
1997 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1999 /* Likewise for multipliers wider than a word. */
2000 if (GET_CODE (trueop1) == CONST_DOUBLE
2001 && (GET_MODE (trueop1) == VOIDmode
2002 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2003 && GET_MODE (op0) == mode
2004 && CONST_DOUBLE_LOW (trueop1) == 0
2005 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2006 return simplify_gen_binary (ASHIFT, mode, op0,
2007 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2009 /* x*2 is x+x and x*(-1) is -x */
2010 if (GET_CODE (trueop1) == CONST_DOUBLE
2011 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2012 && GET_MODE (op0) == mode)
2015 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2017 if (REAL_VALUES_EQUAL (d, dconst2))
2018 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2020 if (!HONOR_SNANS (mode)
2021 && REAL_VALUES_EQUAL (d, dconstm1))
2022 return simplify_gen_unary (NEG, mode, op0, mode);
2025 /* Optimize -x * -x as x * x. */
2026 if (FLOAT_MODE_P (mode)
2027 && GET_CODE (op0) == NEG
2028 && GET_CODE (op1) == NEG
2029 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2030 && !side_effects_p (XEXP (op0, 0)))
2031 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2033 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2034 if (SCALAR_FLOAT_MODE_P (mode)
2035 && GET_CODE (op0) == ABS
2036 && GET_CODE (op1) == ABS
2037 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2038 && !side_effects_p (XEXP (op0, 0)))
2039 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2041 /* Reassociate multiplication, but for floating point MULTs
2042 only when the user specifies unsafe math optimizations. */
2043 if (! FLOAT_MODE_P (mode)
2044 || flag_unsafe_math_optimizations)
2046 tem = simplify_associative_operation (code, mode, op0, op1);
2053 if (trueop1 == const0_rtx)
2055 if (GET_CODE (trueop1) == CONST_INT
2056 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2057 == GET_MODE_MASK (mode)))
2059 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2061 /* A | (~A) -> -1 */
2062 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2063 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2064 && ! side_effects_p (op0)
2065 && SCALAR_INT_MODE_P (mode))
2068 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2069 if (GET_CODE (op1) == CONST_INT
2070 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2071 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2074 /* Canonicalize (X & C1) | C2. */
2075 if (GET_CODE (op0) == AND
2076 && GET_CODE (trueop1) == CONST_INT
2077 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2079 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2080 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2081 HOST_WIDE_INT c2 = INTVAL (trueop1);
2083 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2085 && !side_effects_p (XEXP (op0, 0)))
2088 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2089 if (((c1|c2) & mask) == mask)
2090 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2092 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2093 if (((c1 & ~c2) & mask) != (c1 & mask))
2095 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2096 gen_int_mode (c1 & ~c2, mode));
2097 return simplify_gen_binary (IOR, mode, tem, op1);
2101 /* Convert (A & B) | A to A. */
2102 if (GET_CODE (op0) == AND
2103 && (rtx_equal_p (XEXP (op0, 0), op1)
2104 || rtx_equal_p (XEXP (op0, 1), op1))
2105 && ! side_effects_p (XEXP (op0, 0))
2106 && ! side_effects_p (XEXP (op0, 1)))
2109 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2110 mode size to (rotate A CX). */
2112 if (GET_CODE (op1) == ASHIFT
2113 || GET_CODE (op1) == SUBREG)
2124 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2125 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2126 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2127 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2128 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2129 == GET_MODE_BITSIZE (mode)))
2130 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2132 /* Same, but for ashift that has been "simplified" to a wider mode
2133 by simplify_shift_const. */
2135 if (GET_CODE (opleft) == SUBREG
2136 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2137 && GET_CODE (opright) == LSHIFTRT
2138 && GET_CODE (XEXP (opright, 0)) == SUBREG
2139 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2140 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2141 && (GET_MODE_SIZE (GET_MODE (opleft))
2142 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2143 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2144 SUBREG_REG (XEXP (opright, 0)))
2145 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2146 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2147 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2148 == GET_MODE_BITSIZE (mode)))
2149 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2150 XEXP (SUBREG_REG (opleft), 1));
2152 /* If we have (ior (and (X C1) C2)), simplify this by making
2153 C1 as small as possible if C1 actually changes. */
2154 if (GET_CODE (op1) == CONST_INT
2155 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2156 || INTVAL (op1) > 0)
2157 && GET_CODE (op0) == AND
2158 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2159 && GET_CODE (op1) == CONST_INT
2160 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2161 return simplify_gen_binary (IOR, mode,
2163 (AND, mode, XEXP (op0, 0),
2164 GEN_INT (INTVAL (XEXP (op0, 1))
2168 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2169 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2170 the PLUS does not affect any of the bits in OP1: then we can do
2171 the IOR as a PLUS and we can associate. This is valid if OP1
2172 can be safely shifted left C bits. */
2173 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2174 && GET_CODE (XEXP (op0, 0)) == PLUS
2175 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2176 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2177 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2179 int count = INTVAL (XEXP (op0, 1));
2180 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2182 if (mask >> count == INTVAL (trueop1)
2183 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2184 return simplify_gen_binary (ASHIFTRT, mode,
2185 plus_constant (XEXP (op0, 0), mask),
2189 tem = simplify_associative_operation (code, mode, op0, op1);
2195 if (trueop1 == const0_rtx)
2197 if (GET_CODE (trueop1) == CONST_INT
2198 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2199 == GET_MODE_MASK (mode)))
2200 return simplify_gen_unary (NOT, mode, op0, mode);
2201 if (rtx_equal_p (trueop0, trueop1)
2202 && ! side_effects_p (op0)
2203 && GET_MODE_CLASS (mode) != MODE_CC)
2204 return CONST0_RTX (mode);
2206 /* Canonicalize XOR of the most significant bit to PLUS. */
2207 if ((GET_CODE (op1) == CONST_INT
2208 || GET_CODE (op1) == CONST_DOUBLE)
2209 && mode_signbit_p (mode, op1))
2210 return simplify_gen_binary (PLUS, mode, op0, op1);
2211 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2212 if ((GET_CODE (op1) == CONST_INT
2213 || GET_CODE (op1) == CONST_DOUBLE)
2214 && GET_CODE (op0) == PLUS
2215 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2216 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2217 && mode_signbit_p (mode, XEXP (op0, 1)))
2218 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2219 simplify_gen_binary (XOR, mode, op1,
2222 /* If we are XORing two things that have no bits in common,
2223 convert them into an IOR. This helps to detect rotation encoded
2224 using those methods and possibly other simplifications. */
2226 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2227 && (nonzero_bits (op0, mode)
2228 & nonzero_bits (op1, mode)) == 0)
2229 return (simplify_gen_binary (IOR, mode, op0, op1));
2231 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2232 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2235 int num_negated = 0;
2237 if (GET_CODE (op0) == NOT)
2238 num_negated++, op0 = XEXP (op0, 0);
2239 if (GET_CODE (op1) == NOT)
2240 num_negated++, op1 = XEXP (op1, 0);
2242 if (num_negated == 2)
2243 return simplify_gen_binary (XOR, mode, op0, op1);
2244 else if (num_negated == 1)
2245 return simplify_gen_unary (NOT, mode,
2246 simplify_gen_binary (XOR, mode, op0, op1),
2250 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2251 correspond to a machine insn or result in further simplifications
2252 if B is a constant. */
2254 if (GET_CODE (op0) == AND
2255 && rtx_equal_p (XEXP (op0, 1), op1)
2256 && ! side_effects_p (op1))
2257 return simplify_gen_binary (AND, mode,
2258 simplify_gen_unary (NOT, mode,
2259 XEXP (op0, 0), mode),
2262 else if (GET_CODE (op0) == AND
2263 && rtx_equal_p (XEXP (op0, 0), op1)
2264 && ! side_effects_p (op1))
2265 return simplify_gen_binary (AND, mode,
2266 simplify_gen_unary (NOT, mode,
2267 XEXP (op0, 1), mode),
2270 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2271 comparison if STORE_FLAG_VALUE is 1. */
2272 if (STORE_FLAG_VALUE == 1
2273 && trueop1 == const1_rtx
2274 && COMPARISON_P (op0)
2275 && (reversed = reversed_comparison (op0, mode)))
2278 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2279 is (lt foo (const_int 0)), so we can perform the above
2280 simplification if STORE_FLAG_VALUE is 1. */
2282 if (STORE_FLAG_VALUE == 1
2283 && trueop1 == const1_rtx
2284 && GET_CODE (op0) == LSHIFTRT
2285 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2286 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2287 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2289 /* (xor (comparison foo bar) (const_int sign-bit))
2290 when STORE_FLAG_VALUE is the sign bit. */
2291 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2292 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2293 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2294 && trueop1 == const_true_rtx
2295 && COMPARISON_P (op0)
2296 && (reversed = reversed_comparison (op0, mode)))
2301 tem = simplify_associative_operation (code, mode, op0, op1);
2307 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2309 /* If we are turning off bits already known off in OP0, we need
2311 if (GET_CODE (trueop1) == CONST_INT
2312 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2313 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2315 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2316 && GET_MODE_CLASS (mode) != MODE_CC)
2319 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2320 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2321 && ! side_effects_p (op0)
2322 && GET_MODE_CLASS (mode) != MODE_CC)
2323 return CONST0_RTX (mode);
2325 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2326 there are no nonzero bits of C outside of X's mode. */
2327 if ((GET_CODE (op0) == SIGN_EXTEND
2328 || GET_CODE (op0) == ZERO_EXTEND)
2329 && GET_CODE (trueop1) == CONST_INT
2330 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2331 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2332 & INTVAL (trueop1)) == 0)
2334 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2335 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2336 gen_int_mode (INTVAL (trueop1),
2338 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2341 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2342 if (GET_CODE (op0) == IOR
2343 && GET_CODE (trueop1) == CONST_INT
2344 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2346 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2347 return simplify_gen_binary (IOR, mode,
2348 simplify_gen_binary (AND, mode,
2349 XEXP (op0, 0), op1),
2350 gen_int_mode (tmp, mode));
2353 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2354 insn (and may simplify more). */
2355 if (GET_CODE (op0) == XOR
2356 && rtx_equal_p (XEXP (op0, 0), op1)
2357 && ! side_effects_p (op1))
2358 return simplify_gen_binary (AND, mode,
2359 simplify_gen_unary (NOT, mode,
2360 XEXP (op0, 1), mode),
2363 if (GET_CODE (op0) == XOR
2364 && rtx_equal_p (XEXP (op0, 1), op1)
2365 && ! side_effects_p (op1))
2366 return simplify_gen_binary (AND, mode,
2367 simplify_gen_unary (NOT, mode,
2368 XEXP (op0, 0), mode),
2371 /* Similarly for (~(A ^ B)) & A. */
2372 if (GET_CODE (op0) == NOT
2373 && GET_CODE (XEXP (op0, 0)) == XOR
2374 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2375 && ! side_effects_p (op1))
2376 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2378 if (GET_CODE (op0) == NOT
2379 && GET_CODE (XEXP (op0, 0)) == XOR
2380 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2381 && ! side_effects_p (op1))
2382 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2384 /* Convert (A | B) & A to A. */
2385 if (GET_CODE (op0) == IOR
2386 && (rtx_equal_p (XEXP (op0, 0), op1)
2387 || rtx_equal_p (XEXP (op0, 1), op1))
2388 && ! side_effects_p (XEXP (op0, 0))
2389 && ! side_effects_p (XEXP (op0, 1)))
2392 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2393 ((A & N) + B) & M -> (A + B) & M
2394 Similarly if (N & M) == 0,
2395 ((A | N) + B) & M -> (A + B) & M
2396 and for - instead of + and/or ^ instead of |. */
2397 if (GET_CODE (trueop1) == CONST_INT
2398 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2399 && ~INTVAL (trueop1)
2400 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2401 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2406 pmop[0] = XEXP (op0, 0);
2407 pmop[1] = XEXP (op0, 1);
2409 for (which = 0; which < 2; which++)
2412 switch (GET_CODE (tem))
2415 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2416 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2417 == INTVAL (trueop1))
2418 pmop[which] = XEXP (tem, 0);
2422 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2423 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2424 pmop[which] = XEXP (tem, 0);
2431 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2433 tem = simplify_gen_binary (GET_CODE (op0), mode,
2435 return simplify_gen_binary (code, mode, tem, op1);
2438 tem = simplify_associative_operation (code, mode, op0, op1);
2444 /* 0/x is 0 (or x&0 if x has side-effects). */
2445 if (trueop0 == CONST0_RTX (mode))
2447 if (side_effects_p (op1))
2448 return simplify_gen_binary (AND, mode, op1, trueop0);
2452 if (trueop1 == CONST1_RTX (mode))
2453 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2454 /* Convert divide by power of two into shift. */
2455 if (GET_CODE (trueop1) == CONST_INT
2456 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2457 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2461 /* Handle floating point and integers separately. */
2462 if (SCALAR_FLOAT_MODE_P (mode))
2464 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2465 safe for modes with NaNs, since 0.0 / 0.0 will then be
2466 NaN rather than 0.0. Nor is it safe for modes with signed
2467 zeros, since dividing 0 by a negative number gives -0.0 */
2468 if (trueop0 == CONST0_RTX (mode)
2469 && !HONOR_NANS (mode)
2470 && !HONOR_SIGNED_ZEROS (mode)
2471 && ! side_effects_p (op1))
2474 if (trueop1 == CONST1_RTX (mode)
2475 && !HONOR_SNANS (mode))
2478 if (GET_CODE (trueop1) == CONST_DOUBLE
2479 && trueop1 != CONST0_RTX (mode))
2482 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2485 if (REAL_VALUES_EQUAL (d, dconstm1)
2486 && !HONOR_SNANS (mode))
2487 return simplify_gen_unary (NEG, mode, op0, mode);
2489 /* Change FP division by a constant into multiplication.
2490 Only do this with -funsafe-math-optimizations. */
2491 if (flag_unsafe_math_optimizations
2492 && !REAL_VALUES_EQUAL (d, dconst0))
2494 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2495 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2496 return simplify_gen_binary (MULT, mode, op0, tem);
2502 /* 0/x is 0 (or x&0 if x has side-effects). */
2503 if (trueop0 == CONST0_RTX (mode))
2505 if (side_effects_p (op1))
2506 return simplify_gen_binary (AND, mode, op1, trueop0);
2510 if (trueop1 == CONST1_RTX (mode))
2511 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2513 if (trueop1 == constm1_rtx)
2515 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2516 return simplify_gen_unary (NEG, mode, x, mode);
2522 /* 0%x is 0 (or x&0 if x has side-effects). */
2523 if (trueop0 == CONST0_RTX (mode))
2525 if (side_effects_p (op1))
2526 return simplify_gen_binary (AND, mode, op1, trueop0);
2529 /* x%1 is 0 (of x&0 if x has side-effects). */
2530 if (trueop1 == CONST1_RTX (mode))
2532 if (side_effects_p (op0))
2533 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2534 return CONST0_RTX (mode);
2536 /* Implement modulus by power of two as AND. */
2537 if (GET_CODE (trueop1) == CONST_INT
2538 && exact_log2 (INTVAL (trueop1)) > 0)
2539 return simplify_gen_binary (AND, mode, op0,
2540 GEN_INT (INTVAL (op1) - 1));
2544 /* 0%x is 0 (or x&0 if x has side-effects). */
2545 if (trueop0 == CONST0_RTX (mode))
2547 if (side_effects_p (op1))
2548 return simplify_gen_binary (AND, mode, op1, trueop0);
2551 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2552 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2554 if (side_effects_p (op0))
2555 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2556 return CONST0_RTX (mode);
2563 if (trueop1 == CONST0_RTX (mode))
2565 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2567 /* Rotating ~0 always results in ~0. */
2568 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2569 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2570 && ! side_effects_p (op1))
2576 if (trueop1 == CONST0_RTX (mode))
2578 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2583 if (trueop1 == CONST0_RTX (mode))
2585 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2587 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2588 if (GET_CODE (op0) == CLZ
2589 && GET_CODE (trueop1) == CONST_INT
2590 && STORE_FLAG_VALUE == 1
2591 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2593 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2594 unsigned HOST_WIDE_INT zero_val = 0;
2596 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2597 && zero_val == GET_MODE_BITSIZE (imode)
2598 && INTVAL (trueop1) == exact_log2 (zero_val))
2599 return simplify_gen_relational (EQ, mode, imode,
2600 XEXP (op0, 0), const0_rtx);
2605 if (width <= HOST_BITS_PER_WIDE_INT
2606 && GET_CODE (trueop1) == CONST_INT
2607 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2608 && ! side_effects_p (op0))
2610 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2612 tem = simplify_associative_operation (code, mode, op0, op1);
2618 if (width <= HOST_BITS_PER_WIDE_INT
2619 && GET_CODE (trueop1) == CONST_INT
2620 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2621 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2622 && ! side_effects_p (op0))
2624 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2626 tem = simplify_associative_operation (code, mode, op0, op1);
2632 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2634 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2636 tem = simplify_associative_operation (code, mode, op0, op1);
2642 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2644 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2646 tem = simplify_associative_operation (code, mode, op0, op1);
2655 /* ??? There are simplifications that can be done. */
2659 if (!VECTOR_MODE_P (mode))
2661 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2662 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2663 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2664 gcc_assert (XVECLEN (trueop1, 0) == 1);
2665 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2667 if (GET_CODE (trueop0) == CONST_VECTOR)
2668 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2673 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2674 gcc_assert (GET_MODE_INNER (mode)
2675 == GET_MODE_INNER (GET_MODE (trueop0)));
2676 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2678 if (GET_CODE (trueop0) == CONST_VECTOR)
2680 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2681 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2682 rtvec v = rtvec_alloc (n_elts);
2685 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2686 for (i = 0; i < n_elts; i++)
2688 rtx x = XVECEXP (trueop1, 0, i);
2690 gcc_assert (GET_CODE (x) == CONST_INT);
2691 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2695 return gen_rtx_CONST_VECTOR (mode, v);
2699 if (XVECLEN (trueop1, 0) == 1
2700 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2701 && GET_CODE (trueop0) == VEC_CONCAT)
2704 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2706 /* Try to find the element in the VEC_CONCAT. */
2707 while (GET_MODE (vec) != mode
2708 && GET_CODE (vec) == VEC_CONCAT)
2710 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2711 if (offset < vec_size)
2712 vec = XEXP (vec, 0);
2716 vec = XEXP (vec, 1);
2718 vec = avoid_constant_pool_reference (vec);
2721 if (GET_MODE (vec) == mode)
2728 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2729 ? GET_MODE (trueop0)
2730 : GET_MODE_INNER (mode));
2731 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2732 ? GET_MODE (trueop1)
2733 : GET_MODE_INNER (mode));
2735 gcc_assert (VECTOR_MODE_P (mode));
2736 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2737 == GET_MODE_SIZE (mode));
2739 if (VECTOR_MODE_P (op0_mode))
2740 gcc_assert (GET_MODE_INNER (mode)
2741 == GET_MODE_INNER (op0_mode));
2743 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2745 if (VECTOR_MODE_P (op1_mode))
2746 gcc_assert (GET_MODE_INNER (mode)
2747 == GET_MODE_INNER (op1_mode));
2749 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2751 if ((GET_CODE (trueop0) == CONST_VECTOR
2752 || GET_CODE (trueop0) == CONST_INT
2753 || GET_CODE (trueop0) == CONST_DOUBLE)
2754 && (GET_CODE (trueop1) == CONST_VECTOR
2755 || GET_CODE (trueop1) == CONST_INT
2756 || GET_CODE (trueop1) == CONST_DOUBLE))
2758 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2759 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2760 rtvec v = rtvec_alloc (n_elts);
2762 unsigned in_n_elts = 1;
2764 if (VECTOR_MODE_P (op0_mode))
2765 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2766 for (i = 0; i < n_elts; i++)
2770 if (!VECTOR_MODE_P (op0_mode))
2771 RTVEC_ELT (v, i) = trueop0;
2773 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2777 if (!VECTOR_MODE_P (op1_mode))
2778 RTVEC_ELT (v, i) = trueop1;
2780 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2785 return gen_rtx_CONST_VECTOR (mode, v);
2798 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2801 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2803 unsigned int width = GET_MODE_BITSIZE (mode);
2805 if (VECTOR_MODE_P (mode)
2806 && code != VEC_CONCAT
2807 && GET_CODE (op0) == CONST_VECTOR
2808 && GET_CODE (op1) == CONST_VECTOR)
2810 unsigned n_elts = GET_MODE_NUNITS (mode);
2811 enum machine_mode op0mode = GET_MODE (op0);
2812 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2813 enum machine_mode op1mode = GET_MODE (op1);
2814 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2815 rtvec v = rtvec_alloc (n_elts);
2818 gcc_assert (op0_n_elts == n_elts);
2819 gcc_assert (op1_n_elts == n_elts);
2820 for (i = 0; i < n_elts; i++)
2822 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2823 CONST_VECTOR_ELT (op0, i),
2824 CONST_VECTOR_ELT (op1, i));
2827 RTVEC_ELT (v, i) = x;
2830 return gen_rtx_CONST_VECTOR (mode, v);
2833 if (VECTOR_MODE_P (mode)
2834 && code == VEC_CONCAT
2835 && CONSTANT_P (op0) && CONSTANT_P (op1))
2837 unsigned n_elts = GET_MODE_NUNITS (mode);
2838 rtvec v = rtvec_alloc (n_elts);
2840 gcc_assert (n_elts >= 2);
2843 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2844 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2846 RTVEC_ELT (v, 0) = op0;
2847 RTVEC_ELT (v, 1) = op1;
2851 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2852 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2855 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2856 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2857 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2859 for (i = 0; i < op0_n_elts; ++i)
2860 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2861 for (i = 0; i < op1_n_elts; ++i)
2862 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2865 return gen_rtx_CONST_VECTOR (mode, v);
2868 if (SCALAR_FLOAT_MODE_P (mode)
2869 && GET_CODE (op0) == CONST_DOUBLE
2870 && GET_CODE (op1) == CONST_DOUBLE
2871 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2882 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2884 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2886 for (i = 0; i < 4; i++)
2903 real_from_target (&r, tmp0, mode);
2904 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2908 REAL_VALUE_TYPE f0, f1, value, result;
2911 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2912 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2913 real_convert (&f0, mode, &f0);
2914 real_convert (&f1, mode, &f1);
2916 if (HONOR_SNANS (mode)
2917 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2921 && REAL_VALUES_EQUAL (f1, dconst0)
2922 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2925 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2926 && flag_trapping_math
2927 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2929 int s0 = REAL_VALUE_NEGATIVE (f0);
2930 int s1 = REAL_VALUE_NEGATIVE (f1);
2935 /* Inf + -Inf = NaN plus exception. */
2940 /* Inf - Inf = NaN plus exception. */
2945 /* Inf / Inf = NaN plus exception. */
2952 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2953 && flag_trapping_math
2954 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2955 || (REAL_VALUE_ISINF (f1)
2956 && REAL_VALUES_EQUAL (f0, dconst0))))
2957 /* Inf * 0 = NaN plus exception. */
2960 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2962 real_convert (&result, mode, &value);
2964 /* Don't constant fold this floating point operation if
2965 the result has overflowed and flag_trapping_math. */
2967 if (flag_trapping_math
2968 && MODE_HAS_INFINITIES (mode)
2969 && REAL_VALUE_ISINF (result)
2970 && !REAL_VALUE_ISINF (f0)
2971 && !REAL_VALUE_ISINF (f1))
2972 /* Overflow plus exception. */
2975 /* Don't constant fold this floating point operation if the
2976 result may dependent upon the run-time rounding mode and
2977 flag_rounding_math is set, or if GCC's software emulation
2978 is unable to accurately represent the result. */
2980 if ((flag_rounding_math
2981 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2982 && !flag_unsafe_math_optimizations))
2983 && (inexact || !real_identical (&result, &value)))
2986 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2990 /* We can fold some multi-word operations. */
2991 if (GET_MODE_CLASS (mode) == MODE_INT
2992 && width == HOST_BITS_PER_WIDE_INT * 2
2993 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2994 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2996 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2997 HOST_WIDE_INT h1, h2, hv, ht;
2999 if (GET_CODE (op0) == CONST_DOUBLE)
3000 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3002 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3004 if (GET_CODE (op1) == CONST_DOUBLE)
3005 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3007 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3012 /* A - B == A + (-B). */
3013 neg_double (l2, h2, &lv, &hv);
3016 /* Fall through.... */
3019 add_double (l1, h1, l2, h2, &lv, &hv);
3023 mul_double (l1, h1, l2, h2, &lv, &hv);
3027 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3028 &lv, &hv, <, &ht))
3033 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3034 <, &ht, &lv, &hv))
3039 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3040 &lv, &hv, <, &ht))
3045 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3046 <, &ht, &lv, &hv))
3051 lv = l1 & l2, hv = h1 & h2;
3055 lv = l1 | l2, hv = h1 | h2;
3059 lv = l1 ^ l2, hv = h1 ^ h2;
3065 && ((unsigned HOST_WIDE_INT) l1
3066 < (unsigned HOST_WIDE_INT) l2)))
3075 && ((unsigned HOST_WIDE_INT) l1
3076 > (unsigned HOST_WIDE_INT) l2)))
3083 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3085 && ((unsigned HOST_WIDE_INT) l1
3086 < (unsigned HOST_WIDE_INT) l2)))
3093 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3095 && ((unsigned HOST_WIDE_INT) l1
3096 > (unsigned HOST_WIDE_INT) l2)))
3102 case LSHIFTRT: case ASHIFTRT:
3104 case ROTATE: case ROTATERT:
3105 if (SHIFT_COUNT_TRUNCATED)
3106 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3108 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3111 if (code == LSHIFTRT || code == ASHIFTRT)
3112 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3114 else if (code == ASHIFT)
3115 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3116 else if (code == ROTATE)
3117 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3118 else /* code == ROTATERT */
3119 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3126 return immed_double_const (lv, hv, mode);
3129 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3130 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3132 /* Get the integer argument values in two forms:
3133 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3135 arg0 = INTVAL (op0);
3136 arg1 = INTVAL (op1);
3138 if (width < HOST_BITS_PER_WIDE_INT)
3140 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3141 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3144 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3145 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3148 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3149 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3157 /* Compute the value of the arithmetic. */
3162 val = arg0s + arg1s;
3166 val = arg0s - arg1s;
3170 val = arg0s * arg1s;
3175 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3178 val = arg0s / arg1s;
3183 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3186 val = arg0s % arg1s;
3191 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3194 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3199 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3202 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3220 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3221 the value is in range. We can't return any old value for
3222 out-of-range arguments because either the middle-end (via
3223 shift_truncation_mask) or the back-end might be relying on
3224 target-specific knowledge. Nor can we rely on
3225 shift_truncation_mask, since the shift might not be part of an
3226 ashlM3, lshrM3 or ashrM3 instruction. */
3227 if (SHIFT_COUNT_TRUNCATED)
3228 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3229 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3232 val = (code == ASHIFT
3233 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3234 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3236 /* Sign-extend the result for arithmetic right shifts. */
3237 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3238 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3246 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3247 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3255 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3256 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3260 /* Do nothing here. */
3264 val = arg0s <= arg1s ? arg0s : arg1s;
3268 val = ((unsigned HOST_WIDE_INT) arg0
3269 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3273 val = arg0s > arg1s ? arg0s : arg1s;
3277 val = ((unsigned HOST_WIDE_INT) arg0
3278 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3286 /* ??? There are simplifications that can be done. */
3293 return gen_int_mode (val, mode);
3301 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3304 Rather than test for specific case, we do this by a brute-force method
3305 and do all possible simplifications until no more changes occur. Then
3306 we rebuild the operation. */
3308 struct simplify_plus_minus_op_data
3315 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3317 const struct simplify_plus_minus_op_data *d1 = p1;
3318 const struct simplify_plus_minus_op_data *d2 = p2;
3321 result = (commutative_operand_precedence (d2->op)
3322 - commutative_operand_precedence (d1->op));
3326 /* Group together equal REGs to do more simplification. */
3327 if (REG_P (d1->op) && REG_P (d2->op))
3328 return REGNO (d1->op) - REGNO (d2->op);
3334 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3337 struct simplify_plus_minus_op_data ops[8];
3339 int n_ops = 2, input_ops = 2;
3340 int changed, n_constants = 0, canonicalized = 0;
3343 memset (ops, 0, sizeof ops);
3345 /* Set up the two operands and then expand them until nothing has been
3346 changed. If we run out of room in our array, give up; this should
3347 almost never happen. */
3352 ops[1].neg = (code == MINUS);
3358 for (i = 0; i < n_ops; i++)
3360 rtx this_op = ops[i].op;
3361 int this_neg = ops[i].neg;
3362 enum rtx_code this_code = GET_CODE (this_op);
3371 ops[n_ops].op = XEXP (this_op, 1);
3372 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3375 ops[i].op = XEXP (this_op, 0);
3378 canonicalized |= this_neg;
3382 ops[i].op = XEXP (this_op, 0);
3383 ops[i].neg = ! this_neg;
3390 && GET_CODE (XEXP (this_op, 0)) == PLUS
3391 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3392 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3394 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3395 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3396 ops[n_ops].neg = this_neg;
3404 /* ~a -> (-a - 1) */
3407 ops[n_ops].op = constm1_rtx;
3408 ops[n_ops++].neg = this_neg;
3409 ops[i].op = XEXP (this_op, 0);
3410 ops[i].neg = !this_neg;
3420 ops[i].op = neg_const_int (mode, this_op);
3434 if (n_constants > 1)
3437 gcc_assert (n_ops >= 2);
3439 /* If we only have two operands, we can avoid the loops. */
3442 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3445 /* Get the two operands. Be careful with the order, especially for
3446 the cases where code == MINUS. */
3447 if (ops[0].neg && ops[1].neg)
3449 lhs = gen_rtx_NEG (mode, ops[0].op);
3452 else if (ops[0].neg)
3463 return simplify_const_binary_operation (code, mode, lhs, rhs);
3466 /* Now simplify each pair of operands until nothing changes. */
3469 /* Insertion sort is good enough for an eight-element array. */
3470 for (i = 1; i < n_ops; i++)
3472 struct simplify_plus_minus_op_data save;
3474 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3480 ops[j + 1] = ops[j];
3481 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3485 /* This is only useful the first time through. */
3490 for (i = n_ops - 1; i > 0; i--)
3491 for (j = i - 1; j >= 0; j--)
3493 rtx lhs = ops[j].op, rhs = ops[i].op;
3494 int lneg = ops[j].neg, rneg = ops[i].neg;
3496 if (lhs != 0 && rhs != 0)
3498 enum rtx_code ncode = PLUS;
3504 tem = lhs, lhs = rhs, rhs = tem;
3506 else if (swap_commutative_operands_p (lhs, rhs))
3507 tem = lhs, lhs = rhs, rhs = tem;
3509 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3510 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3512 rtx tem_lhs, tem_rhs;
3514 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3515 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3516 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3518 if (tem && !CONSTANT_P (tem))
3519 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3522 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3524 /* Reject "simplifications" that just wrap the two
3525 arguments in a CONST. Failure to do so can result
3526 in infinite recursion with simplify_binary_operation
3527 when it calls us to simplify CONST operations. */
3529 && ! (GET_CODE (tem) == CONST
3530 && GET_CODE (XEXP (tem, 0)) == ncode
3531 && XEXP (XEXP (tem, 0), 0) == lhs
3532 && XEXP (XEXP (tem, 0), 1) == rhs))
3535 if (GET_CODE (tem) == NEG)
3536 tem = XEXP (tem, 0), lneg = !lneg;
3537 if (GET_CODE (tem) == CONST_INT && lneg)
3538 tem = neg_const_int (mode, tem), lneg = 0;
3542 ops[j].op = NULL_RTX;
3548 /* Pack all the operands to the lower-numbered entries. */
3549 for (i = 0, j = 0; j < n_ops; j++)
3559 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3561 && GET_CODE (ops[1].op) == CONST_INT
3562 && CONSTANT_P (ops[0].op)
3564 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3566 /* We suppressed creation of trivial CONST expressions in the
3567 combination loop to avoid recursion. Create one manually now.
3568 The combination loop should have ensured that there is exactly
3569 one CONST_INT, and the sort will have ensured that it is last
3570 in the array and that any other constant will be next-to-last. */
3573 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3574 && CONSTANT_P (ops[n_ops - 2].op))
3576 rtx value = ops[n_ops - 1].op;
3577 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3578 value = neg_const_int (mode, value);
3579 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3583 /* Put a non-negated operand first, if possible. */
3585 for (i = 0; i < n_ops && ops[i].neg; i++)
3588 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3597 /* Now make the result by performing the requested operations. */
3599 for (i = 1; i < n_ops; i++)
3600 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3601 mode, result, ops[i].op);
3606 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3608 plus_minus_operand_p (rtx x)
3610 return GET_CODE (x) == PLUS
3611 || GET_CODE (x) == MINUS
3612 || (GET_CODE (x) == CONST
3613 && GET_CODE (XEXP (x, 0)) == PLUS
3614 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3615 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3618 /* Like simplify_binary_operation except used for relational operators.
3619 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3620 not also be VOIDmode.
3622 CMP_MODE specifies in which mode the comparison is done in, so it is
3623 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3624 the operands or, if both are VOIDmode, the operands are compared in
3625 "infinite precision". */
3627 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3628 enum machine_mode cmp_mode, rtx op0, rtx op1)
3630 rtx tem, trueop0, trueop1;
3632 if (cmp_mode == VOIDmode)
3633 cmp_mode = GET_MODE (op0);
3634 if (cmp_mode == VOIDmode)
3635 cmp_mode = GET_MODE (op1);
3637 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3640 if (SCALAR_FLOAT_MODE_P (mode))
3642 if (tem == const0_rtx)
3643 return CONST0_RTX (mode);
3644 #ifdef FLOAT_STORE_FLAG_VALUE
3646 REAL_VALUE_TYPE val;
3647 val = FLOAT_STORE_FLAG_VALUE (mode);
3648 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3654 if (VECTOR_MODE_P (mode))
3656 if (tem == const0_rtx)
3657 return CONST0_RTX (mode);
3658 #ifdef VECTOR_STORE_FLAG_VALUE
3663 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3664 if (val == NULL_RTX)
3666 if (val == const1_rtx)
3667 return CONST1_RTX (mode);
3669 units = GET_MODE_NUNITS (mode);
3670 v = rtvec_alloc (units);
3671 for (i = 0; i < units; i++)
3672 RTVEC_ELT (v, i) = val;
3673 return gen_rtx_raw_CONST_VECTOR (mode, v);
3683 /* For the following tests, ensure const0_rtx is op1. */
3684 if (swap_commutative_operands_p (op0, op1)
3685 || (op0 == const0_rtx && op1 != const0_rtx))
3686 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3688 /* If op0 is a compare, extract the comparison arguments from it. */
3689 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3690 return simplify_relational_operation (code, mode, VOIDmode,
3691 XEXP (op0, 0), XEXP (op0, 1));
3693 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3697 trueop0 = avoid_constant_pool_reference (op0);
3698 trueop1 = avoid_constant_pool_reference (op1);
3699 return simplify_relational_operation_1 (code, mode, cmp_mode,
3703 /* This part of simplify_relational_operation is only used when CMP_MODE
3704 is not in class MODE_CC (i.e. it is a real comparison).
3706 MODE is the mode of the result, while CMP_MODE specifies in which
3707 mode the comparison is done in, so it is the mode of the operands. */
3710 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3711 enum machine_mode cmp_mode, rtx op0, rtx op1)
3713 enum rtx_code op0code = GET_CODE (op0);
3715 if (op1 == const0_rtx && COMPARISON_P (op0))
3717 /* If op0 is a comparison, extract the comparison arguments
3721 if (GET_MODE (op0) == mode)
3722 return simplify_rtx (op0);
3724 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3725 XEXP (op0, 0), XEXP (op0, 1));
3727 else if (code == EQ)
3729 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3730 if (new_code != UNKNOWN)
3731 return simplify_gen_relational (new_code, mode, VOIDmode,
3732 XEXP (op0, 0), XEXP (op0, 1));
3736 if (op1 == const0_rtx)
3738 /* Canonicalize (GTU x 0) as (NE x 0). */
3740 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3741 /* Canonicalize (LEU x 0) as (EQ x 0). */
3743 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3745 else if (op1 == const1_rtx)
3750 /* Canonicalize (GE x 1) as (GT x 0). */
3751 return simplify_gen_relational (GT, mode, cmp_mode,
3754 /* Canonicalize (GEU x 1) as (NE x 0). */
3755 return simplify_gen_relational (NE, mode, cmp_mode,
3758 /* Canonicalize (LT x 1) as (LE x 0). */
3759 return simplify_gen_relational (LE, mode, cmp_mode,
3762 /* Canonicalize (LTU x 1) as (EQ x 0). */
3763 return simplify_gen_relational (EQ, mode, cmp_mode,
3769 else if (op1 == constm1_rtx)
3771 /* Canonicalize (LE x -1) as (LT x 0). */
3773 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3774 /* Canonicalize (GT x -1) as (GE x 0). */
3776 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3779 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3780 if ((code == EQ || code == NE)
3781 && (op0code == PLUS || op0code == MINUS)
3783 && CONSTANT_P (XEXP (op0, 1))
3784 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3786 rtx x = XEXP (op0, 0);
3787 rtx c = XEXP (op0, 1);
3789 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3791 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3794 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3795 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3797 && op1 == const0_rtx
3798 && GET_MODE_CLASS (mode) == MODE_INT
3799 && cmp_mode != VOIDmode
3800 /* ??? Work-around BImode bugs in the ia64 backend. */
3802 && cmp_mode != BImode
3803 && nonzero_bits (op0, cmp_mode) == 1
3804 && STORE_FLAG_VALUE == 1)
3805 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3806 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3807 : lowpart_subreg (mode, op0, cmp_mode);
3809 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3810 if ((code == EQ || code == NE)
3811 && op1 == const0_rtx
3813 return simplify_gen_relational (code, mode, cmp_mode,
3814 XEXP (op0, 0), XEXP (op0, 1));
3816 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3817 if ((code == EQ || code == NE)
3819 && rtx_equal_p (XEXP (op0, 0), op1)
3820 && !side_effects_p (XEXP (op0, 0)))
3821 return simplify_gen_relational (code, mode, cmp_mode,
3822 XEXP (op0, 1), const0_rtx);
3824 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3825 if ((code == EQ || code == NE)
3827 && rtx_equal_p (XEXP (op0, 1), op1)
3828 && !side_effects_p (XEXP (op0, 1)))
3829 return simplify_gen_relational (code, mode, cmp_mode,
3830 XEXP (op0, 0), const0_rtx);
3832 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3833 if ((code == EQ || code == NE)
3835 && (GET_CODE (op1) == CONST_INT
3836 || GET_CODE (op1) == CONST_DOUBLE)
3837 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3838 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3839 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3840 simplify_gen_binary (XOR, cmp_mode,
3841 XEXP (op0, 1), op1));
3843 if (op0code == POPCOUNT && op1 == const0_rtx)
3849 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3850 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3851 XEXP (op0, 0), const0_rtx);
3856 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3857 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3858 XEXP (op0, 0), const0_rtx);
3867 /* Check if the given comparison (done in the given MODE) is actually a
3868 tautology or a contradiction.
3869 If no simplification is possible, this function returns zero.
3870 Otherwise, it returns either const_true_rtx or const0_rtx. */
3873 simplify_const_relational_operation (enum rtx_code code,
3874 enum machine_mode mode,
3877 int equal, op0lt, op0ltu, op1lt, op1ltu;
3882 gcc_assert (mode != VOIDmode
3883 || (GET_MODE (op0) == VOIDmode
3884 && GET_MODE (op1) == VOIDmode));
3886 /* If op0 is a compare, extract the comparison arguments from it. */
3887 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3889 op1 = XEXP (op0, 1);
3890 op0 = XEXP (op0, 0);
3892 if (GET_MODE (op0) != VOIDmode)
3893 mode = GET_MODE (op0);
3894 else if (GET_MODE (op1) != VOIDmode)
3895 mode = GET_MODE (op1);
3900 /* We can't simplify MODE_CC values since we don't know what the
3901 actual comparison is. */
3902 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3905 /* Make sure the constant is second. */
3906 if (swap_commutative_operands_p (op0, op1))
3908 tem = op0, op0 = op1, op1 = tem;
3909 code = swap_condition (code);
3912 trueop0 = avoid_constant_pool_reference (op0);
3913 trueop1 = avoid_constant_pool_reference (op1);
3915 /* For integer comparisons of A and B maybe we can simplify A - B and can
3916 then simplify a comparison of that with zero. If A and B are both either
3917 a register or a CONST_INT, this can't help; testing for these cases will
3918 prevent infinite recursion here and speed things up.
3920 We can only do this for EQ and NE comparisons as otherwise we may
3921 lose or introduce overflow which we cannot disregard as undefined as
3922 we do not know the signedness of the operation on either the left or
3923 the right hand side of the comparison. */
3925 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3926 && (code == EQ || code == NE)
3927 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3928 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3929 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3930 /* We cannot do this if tem is a nonzero address. */
3931 && ! nonzero_address_p (tem))
3932 return simplify_const_relational_operation (signed_condition (code),
3933 mode, tem, const0_rtx);
3935 if (! HONOR_NANS (mode) && code == ORDERED)
3936 return const_true_rtx;
3938 if (! HONOR_NANS (mode) && code == UNORDERED)
3941 /* For modes without NaNs, if the two operands are equal, we know the
3942 result except if they have side-effects. */
3943 if (! HONOR_NANS (GET_MODE (trueop0))
3944 && rtx_equal_p (trueop0, trueop1)
3945 && ! side_effects_p (trueop0))
3946 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3948 /* If the operands are floating-point constants, see if we can fold
3950 else if (GET_CODE (trueop0) == CONST_DOUBLE
3951 && GET_CODE (trueop1) == CONST_DOUBLE
3952 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3954 REAL_VALUE_TYPE d0, d1;
3956 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3957 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3959 /* Comparisons are unordered iff at least one of the values is NaN. */
3960 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3970 return const_true_rtx;
3983 equal = REAL_VALUES_EQUAL (d0, d1);
3984 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3985 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3988 /* Otherwise, see if the operands are both integers. */
3989 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3990 && (GET_CODE (trueop0) == CONST_DOUBLE
3991 || GET_CODE (trueop0) == CONST_INT)
3992 && (GET_CODE (trueop1) == CONST_DOUBLE
3993 || GET_CODE (trueop1) == CONST_INT))
3995 int width = GET_MODE_BITSIZE (mode);
3996 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3997 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3999 /* Get the two words comprising each integer constant. */
4000 if (GET_CODE (trueop0) == CONST_DOUBLE)
4002 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4003 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4007 l0u = l0s = INTVAL (trueop0);
4008 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4011 if (GET_CODE (trueop1) == CONST_DOUBLE)
4013 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4014 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4018 l1u = l1s = INTVAL (trueop1);
4019 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4022 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4023 we have to sign or zero-extend the values. */
4024 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4026 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4027 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4029 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4030 l0s |= ((HOST_WIDE_INT) (-1) << width);
4032 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4033 l1s |= ((HOST_WIDE_INT) (-1) << width);
4035 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4036 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4038 equal = (h0u == h1u && l0u == l1u);
4039 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4040 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4041 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4042 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4045 /* Otherwise, there are some code-specific tests we can make. */
4048 /* Optimize comparisons with upper and lower bounds. */
4049 if (SCALAR_INT_MODE_P (mode)
4050 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4063 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4070 /* x >= min is always true. */
4071 if (rtx_equal_p (trueop1, mmin))
4072 tem = const_true_rtx;
4078 /* x <= max is always true. */
4079 if (rtx_equal_p (trueop1, mmax))
4080 tem = const_true_rtx;
4085 /* x > max is always false. */
4086 if (rtx_equal_p (trueop1, mmax))
4092 /* x < min is always false. */
4093 if (rtx_equal_p (trueop1, mmin))
4100 if (tem == const0_rtx
4101 || tem == const_true_rtx)
4108 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4113 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4114 return const_true_rtx;
4118 /* Optimize abs(x) < 0.0. */
4119 if (trueop1 == CONST0_RTX (mode)
4120 && !HONOR_SNANS (mode)
4121 && (!INTEGRAL_MODE_P (mode)
4122 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4124 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4126 if (GET_CODE (tem) == ABS)
4128 if (INTEGRAL_MODE_P (mode)
4129 && (issue_strict_overflow_warning
4130 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4131 warning (OPT_Wstrict_overflow,
4132 ("assuming signed overflow does not occur when "
4133 "assuming abs (x) < 0 is false"));
4138 /* Optimize popcount (x) < 0. */
4139 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4140 return const_true_rtx;
4144 /* Optimize abs(x) >= 0.0. */
4145 if (trueop1 == CONST0_RTX (mode)
4146 && !HONOR_NANS (mode)
4147 && (!INTEGRAL_MODE_P (mode)
4148 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4150 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4152 if (GET_CODE (tem) == ABS)
4154 if (INTEGRAL_MODE_P (mode)
4155 && (issue_strict_overflow_warning
4156 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4157 warning (OPT_Wstrict_overflow,
4158 ("assuming signed overflow does not occur when "
4159 "assuming abs (x) >= 0 is true"));
4160 return const_true_rtx;
4164 /* Optimize popcount (x) >= 0. */
4165 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4166 return const_true_rtx;
4170 /* Optimize ! (abs(x) < 0.0). */
4171 if (trueop1 == CONST0_RTX (mode))
4173 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4175 if (GET_CODE (tem) == ABS)
4176 return const_true_rtx;
4187 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4193 return equal ? const_true_rtx : const0_rtx;
4196 return ! equal ? const_true_rtx : const0_rtx;
4199 return op0lt ? const_true_rtx : const0_rtx;
4202 return op1lt ? const_true_rtx : const0_rtx;
4204 return op0ltu ? const_true_rtx : const0_rtx;
4206 return op1ltu ? const_true_rtx : const0_rtx;
4209 return equal || op0lt ? const_true_rtx : const0_rtx;
4212 return equal || op1lt ? const_true_rtx : const0_rtx;
4214 return equal || op0ltu ? const_true_rtx : const0_rtx;
4216 return equal || op1ltu ? const_true_rtx : const0_rtx;
4218 return const_true_rtx;
4226 /* Simplify CODE, an operation with result mode MODE and three operands,
4227 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4228 a constant. Return 0 if no simplifications is possible. */
4231 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4232 enum machine_mode op0_mode, rtx op0, rtx op1,
4235 unsigned int width = GET_MODE_BITSIZE (mode);
4237 /* VOIDmode means "infinite" precision. */
4239 width = HOST_BITS_PER_WIDE_INT;
4245 if (GET_CODE (op0) == CONST_INT
4246 && GET_CODE (op1) == CONST_INT
4247 && GET_CODE (op2) == CONST_INT
4248 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4249 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4251 /* Extracting a bit-field from a constant */
4252 HOST_WIDE_INT val = INTVAL (op0);
4254 if (BITS_BIG_ENDIAN)
4255 val >>= (GET_MODE_BITSIZE (op0_mode)
4256 - INTVAL (op2) - INTVAL (op1));
4258 val >>= INTVAL (op2);
4260 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4262 /* First zero-extend. */
4263 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4264 /* If desired, propagate sign bit. */
4265 if (code == SIGN_EXTRACT
4266 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4267 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4270 /* Clear the bits that don't belong in our mode,
4271 unless they and our sign bit are all one.
4272 So we get either a reasonable negative value or a reasonable
4273 unsigned value for this mode. */
4274 if (width < HOST_BITS_PER_WIDE_INT
4275 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4276 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4277 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4279 return gen_int_mode (val, mode);
4284 if (GET_CODE (op0) == CONST_INT)
4285 return op0 != const0_rtx ? op1 : op2;
4287 /* Convert c ? a : a into "a". */
4288 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4291 /* Convert a != b ? a : b into "a". */
4292 if (GET_CODE (op0) == NE
4293 && ! side_effects_p (op0)
4294 && ! HONOR_NANS (mode)
4295 && ! HONOR_SIGNED_ZEROS (mode)
4296 && ((rtx_equal_p (XEXP (op0, 0), op1)
4297 && rtx_equal_p (XEXP (op0, 1), op2))
4298 || (rtx_equal_p (XEXP (op0, 0), op2)
4299 && rtx_equal_p (XEXP (op0, 1), op1))))
4302 /* Convert a == b ? a : b into "b". */
4303 if (GET_CODE (op0) == EQ
4304 && ! side_effects_p (op0)
4305 && ! HONOR_NANS (mode)
4306 && ! HONOR_SIGNED_ZEROS (mode)
4307 && ((rtx_equal_p (XEXP (op0, 0), op1)
4308 && rtx_equal_p (XEXP (op0, 1), op2))
4309 || (rtx_equal_p (XEXP (op0, 0), op2)
4310 && rtx_equal_p (XEXP (op0, 1), op1))))
4313 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4315 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4316 ? GET_MODE (XEXP (op0, 1))
4317 : GET_MODE (XEXP (op0, 0)));
4320 /* Look for happy constants in op1 and op2. */
4321 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4323 HOST_WIDE_INT t = INTVAL (op1);
4324 HOST_WIDE_INT f = INTVAL (op2);
4326 if (t == STORE_FLAG_VALUE && f == 0)
4327 code = GET_CODE (op0);
4328 else if (t == 0 && f == STORE_FLAG_VALUE)
4331 tmp = reversed_comparison_code (op0, NULL_RTX);
4339 return simplify_gen_relational (code, mode, cmp_mode,
4340 XEXP (op0, 0), XEXP (op0, 1));
4343 if (cmp_mode == VOIDmode)
4344 cmp_mode = op0_mode;
4345 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4346 cmp_mode, XEXP (op0, 0),
4349 /* See if any simplifications were possible. */
4352 if (GET_CODE (temp) == CONST_INT)
4353 return temp == const0_rtx ? op2 : op1;
4355 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4361 gcc_assert (GET_MODE (op0) == mode);
4362 gcc_assert (GET_MODE (op1) == mode);
4363 gcc_assert (VECTOR_MODE_P (mode));
4364 op2 = avoid_constant_pool_reference (op2);
4365 if (GET_CODE (op2) == CONST_INT)
4367 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4368 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4369 int mask = (1 << n_elts) - 1;
4371 if (!(INTVAL (op2) & mask))
4373 if ((INTVAL (op2) & mask) == mask)
4376 op0 = avoid_constant_pool_reference (op0);
4377 op1 = avoid_constant_pool_reference (op1);
4378 if (GET_CODE (op0) == CONST_VECTOR
4379 && GET_CODE (op1) == CONST_VECTOR)
4381 rtvec v = rtvec_alloc (n_elts);
4384 for (i = 0; i < n_elts; i++)
4385 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4386 ? CONST_VECTOR_ELT (op0, i)
4387 : CONST_VECTOR_ELT (op1, i));
4388 return gen_rtx_CONST_VECTOR (mode, v);
4400 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4401 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4403 Works by unpacking OP into a collection of 8-bit values
4404 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4405 and then repacking them again for OUTERMODE. */
4408 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4409 enum machine_mode innermode, unsigned int byte)
4411 /* We support up to 512-bit values (for V8DFmode). */
4415 value_mask = (1 << value_bit) - 1
4417 unsigned char value[max_bitsize / value_bit];
4426 rtvec result_v = NULL;
4427 enum mode_class outer_class;
4428 enum machine_mode outer_submode;
4430 /* Some ports misuse CCmode. */
4431 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4434 /* We have no way to represent a complex constant at the rtl level. */
4435 if (COMPLEX_MODE_P (outermode))
4438 /* Unpack the value. */
4440 if (GET_CODE (op) == CONST_VECTOR)
4442 num_elem = CONST_VECTOR_NUNITS (op);
4443 elems = &CONST_VECTOR_ELT (op, 0);
4444 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4450 elem_bitsize = max_bitsize;
4452 /* If this asserts, it is too complicated; reducing value_bit may help. */
4453 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4454 /* I don't know how to handle endianness of sub-units. */
4455 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4457 for (elem = 0; elem < num_elem; elem++)
4460 rtx el = elems[elem];
4462 /* Vectors are kept in target memory order. (This is probably
4465 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4466 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4468 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4469 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4470 unsigned bytele = (subword_byte % UNITS_PER_WORD
4471 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4472 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4475 switch (GET_CODE (el))
4479 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4481 *vp++ = INTVAL (el) >> i;
4482 /* CONST_INTs are always logically sign-extended. */
4483 for (; i < elem_bitsize; i += value_bit)
4484 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4488 if (GET_MODE (el) == VOIDmode)
4490 /* If this triggers, someone should have generated a
4491 CONST_INT instead. */
4492 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4494 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4495 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4496 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4499 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4502 /* It shouldn't matter what's done here, so fill it with
4504 for (; i < elem_bitsize; i += value_bit)
4509 long tmp[max_bitsize / 32];
4510 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4512 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4513 gcc_assert (bitsize <= elem_bitsize);
4514 gcc_assert (bitsize % value_bit == 0);
4516 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4519 /* real_to_target produces its result in words affected by
4520 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4521 and use WORDS_BIG_ENDIAN instead; see the documentation
4522 of SUBREG in rtl.texi. */
4523 for (i = 0; i < bitsize; i += value_bit)
4526 if (WORDS_BIG_ENDIAN)
4527 ibase = bitsize - 1 - i;
4530 *vp++ = tmp[ibase / 32] >> i % 32;
4533 /* It shouldn't matter what's done here, so fill it with
4535 for (; i < elem_bitsize; i += value_bit)
4545 /* Now, pick the right byte to start with. */
4546 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4547 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4548 will already have offset 0. */
4549 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4551 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4553 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4554 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4555 byte = (subword_byte % UNITS_PER_WORD
4556 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4559 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4560 so if it's become negative it will instead be very large.) */
4561 gcc_assert (byte < GET_MODE_SIZE (innermode));
4563 /* Convert from bytes to chunks of size value_bit. */
4564 value_start = byte * (BITS_PER_UNIT / value_bit);
4566 /* Re-pack the value. */
4568 if (VECTOR_MODE_P (outermode))
4570 num_elem = GET_MODE_NUNITS (outermode);
4571 result_v = rtvec_alloc (num_elem);
4572 elems = &RTVEC_ELT (result_v, 0);
4573 outer_submode = GET_MODE_INNER (outermode);
4579 outer_submode = outermode;
4582 outer_class = GET_MODE_CLASS (outer_submode);
4583 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4585 gcc_assert (elem_bitsize % value_bit == 0);
4586 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4588 for (elem = 0; elem < num_elem; elem++)
4592 /* Vectors are stored in target memory order. (This is probably
4595 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4596 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4598 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4599 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4600 unsigned bytele = (subword_byte % UNITS_PER_WORD
4601 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4602 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4605 switch (outer_class)
4608 case MODE_PARTIAL_INT:
4610 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4613 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4615 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4616 for (; i < elem_bitsize; i += value_bit)
4617 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4618 << (i - HOST_BITS_PER_WIDE_INT));
4620 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4622 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4623 elems[elem] = gen_int_mode (lo, outer_submode);
4624 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4625 elems[elem] = immed_double_const (lo, hi, outer_submode);
4632 case MODE_DECIMAL_FLOAT:
4635 long tmp[max_bitsize / 32];
4637 /* real_from_target wants its input in words affected by
4638 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4639 and use WORDS_BIG_ENDIAN instead; see the documentation
4640 of SUBREG in rtl.texi. */
4641 for (i = 0; i < max_bitsize / 32; i++)
4643 for (i = 0; i < elem_bitsize; i += value_bit)
4646 if (WORDS_BIG_ENDIAN)
4647 ibase = elem_bitsize - 1 - i;
4650 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4653 real_from_target (&r, tmp, outer_submode);
4654 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4662 if (VECTOR_MODE_P (outermode))
4663 return gen_rtx_CONST_VECTOR (outermode, result_v);
4668 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4669 Return 0 if no simplifications are possible. */
4671 simplify_subreg (enum machine_mode outermode, rtx op,
4672 enum machine_mode innermode, unsigned int byte)
4674 /* Little bit of sanity checking. */
4675 gcc_assert (innermode != VOIDmode);
4676 gcc_assert (outermode != VOIDmode);
4677 gcc_assert (innermode != BLKmode);
4678 gcc_assert (outermode != BLKmode);
4680 gcc_assert (GET_MODE (op) == innermode
4681 || GET_MODE (op) == VOIDmode);
4683 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4684 gcc_assert (byte < GET_MODE_SIZE (innermode));
4686 if (outermode == innermode && !byte)
4689 if (GET_CODE (op) == CONST_INT
4690 || GET_CODE (op) == CONST_DOUBLE
4691 || GET_CODE (op) == CONST_VECTOR)
4692 return simplify_immed_subreg (outermode, op, innermode, byte);
4694 /* Changing mode twice with SUBREG => just change it once,
4695 or not at all if changing back op starting mode. */
4696 if (GET_CODE (op) == SUBREG)
4698 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4699 int final_offset = byte + SUBREG_BYTE (op);
4702 if (outermode == innermostmode
4703 && byte == 0 && SUBREG_BYTE (op) == 0)
4704 return SUBREG_REG (op);
4706 /* The SUBREG_BYTE represents offset, as if the value were stored
4707 in memory. Irritating exception is paradoxical subreg, where
4708 we define SUBREG_BYTE to be 0. On big endian machines, this
4709 value should be negative. For a moment, undo this exception. */
4710 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4712 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4713 if (WORDS_BIG_ENDIAN)
4714 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4715 if (BYTES_BIG_ENDIAN)
4716 final_offset += difference % UNITS_PER_WORD;
4718 if (SUBREG_BYTE (op) == 0
4719 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4721 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4722 if (WORDS_BIG_ENDIAN)
4723 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4724 if (BYTES_BIG_ENDIAN)
4725 final_offset += difference % UNITS_PER_WORD;
4728 /* See whether resulting subreg will be paradoxical. */
4729 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4731 /* In nonparadoxical subregs we can't handle negative offsets. */
4732 if (final_offset < 0)
4734 /* Bail out in case resulting subreg would be incorrect. */
4735 if (final_offset % GET_MODE_SIZE (outermode)
4736 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4742 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4744 /* In paradoxical subreg, see if we are still looking on lower part.
4745 If so, our SUBREG_BYTE will be 0. */
4746 if (WORDS_BIG_ENDIAN)
4747 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4748 if (BYTES_BIG_ENDIAN)
4749 offset += difference % UNITS_PER_WORD;
4750 if (offset == final_offset)
4756 /* Recurse for further possible simplifications. */
4757 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4761 if (validate_subreg (outermode, innermostmode,
4762 SUBREG_REG (op), final_offset))
4763 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4767 /* Merge implicit and explicit truncations. */
4769 if (GET_CODE (op) == TRUNCATE
4770 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4771 && subreg_lowpart_offset (outermode, innermode) == byte)
4772 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4773 GET_MODE (XEXP (op, 0)));
4775 /* SUBREG of a hard register => just change the register number
4776 and/or mode. If the hard register is not valid in that mode,
4777 suppress this simplification. If the hard register is the stack,
4778 frame, or argument pointer, leave this as a SUBREG. */
4781 && REGNO (op) < FIRST_PSEUDO_REGISTER
4782 #ifdef CANNOT_CHANGE_MODE_CLASS
4783 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4784 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4785 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4787 && ((reload_completed && !frame_pointer_needed)
4788 || (REGNO (op) != FRAME_POINTER_REGNUM
4789 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4790 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4793 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4794 && REGNO (op) != ARG_POINTER_REGNUM
4796 && REGNO (op) != STACK_POINTER_REGNUM
4797 && subreg_offset_representable_p (REGNO (op), innermode,
4800 unsigned int regno = REGNO (op);
4801 unsigned int final_regno
4802 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4804 /* ??? We do allow it if the current REG is not valid for
4805 its mode. This is a kludge to work around how float/complex
4806 arguments are passed on 32-bit SPARC and should be fixed. */
4807 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4808 || ! HARD_REGNO_MODE_OK (regno, innermode))
4811 int final_offset = byte;
4813 /* Adjust offset for paradoxical subregs. */
4815 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4817 int difference = (GET_MODE_SIZE (innermode)
4818 - GET_MODE_SIZE (outermode));
4819 if (WORDS_BIG_ENDIAN)
4820 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4821 if (BYTES_BIG_ENDIAN)
4822 final_offset += difference % UNITS_PER_WORD;
4825 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4827 /* Propagate original regno. We don't have any way to specify
4828 the offset inside original regno, so do so only for lowpart.
4829 The information is used only by alias analysis that can not
4830 grog partial register anyway. */
4832 if (subreg_lowpart_offset (outermode, innermode) == byte)
4833 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4838 /* If we have a SUBREG of a register that we are replacing and we are
4839 replacing it with a MEM, make a new MEM and try replacing the
4840 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4841 or if we would be widening it. */
4844 && ! mode_dependent_address_p (XEXP (op, 0))
4845 /* Allow splitting of volatile memory references in case we don't
4846 have instruction to move the whole thing. */
4847 && (! MEM_VOLATILE_P (op)
4848 || ! have_insn_for (SET, innermode))
4849 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4850 return adjust_address_nv (op, outermode, byte);
4852 /* Handle complex values represented as CONCAT
4853 of real and imaginary part. */
4854 if (GET_CODE (op) == CONCAT)
4856 unsigned int part_size, final_offset;
4859 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4860 if (byte < part_size)
4862 part = XEXP (op, 0);
4863 final_offset = byte;
4867 part = XEXP (op, 1);
4868 final_offset = byte - part_size;
4871 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4874 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4877 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4878 return gen_rtx_SUBREG (outermode, part, final_offset);
4882 /* Optimize SUBREG truncations of zero and sign extended values. */
4883 if ((GET_CODE (op) == ZERO_EXTEND
4884 || GET_CODE (op) == SIGN_EXTEND)
4885 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4887 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4889 /* If we're requesting the lowpart of a zero or sign extension,
4890 there are three possibilities. If the outermode is the same
4891 as the origmode, we can omit both the extension and the subreg.
4892 If the outermode is not larger than the origmode, we can apply
4893 the truncation without the extension. Finally, if the outermode
4894 is larger than the origmode, but both are integer modes, we
4895 can just extend to the appropriate mode. */
4898 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4899 if (outermode == origmode)
4900 return XEXP (op, 0);
4901 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4902 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4903 subreg_lowpart_offset (outermode,
4905 if (SCALAR_INT_MODE_P (outermode))
4906 return simplify_gen_unary (GET_CODE (op), outermode,
4907 XEXP (op, 0), origmode);
4910 /* A SUBREG resulting from a zero extension may fold to zero if
4911 it extracts higher bits that the ZERO_EXTEND's source bits. */
4912 if (GET_CODE (op) == ZERO_EXTEND
4913 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4914 return CONST0_RTX (outermode);
4917 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4918 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4919 the outer subreg is effectively a truncation to the original mode. */
4920 if ((GET_CODE (op) == LSHIFTRT
4921 || GET_CODE (op) == ASHIFTRT)
4922 && SCALAR_INT_MODE_P (outermode)
4923 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4924 to avoid the possibility that an outer LSHIFTRT shifts by more
4925 than the sign extension's sign_bit_copies and introduces zeros
4926 into the high bits of the result. */
4927 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4928 && GET_CODE (XEXP (op, 1)) == CONST_INT
4929 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4930 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4931 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4932 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4933 return simplify_gen_binary (ASHIFTRT, outermode,
4934 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4936 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4937 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4938 the outer subreg is effectively a truncation to the original mode. */
4939 if ((GET_CODE (op) == LSHIFTRT
4940 || GET_CODE (op) == ASHIFTRT)
4941 && SCALAR_INT_MODE_P (outermode)
4942 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4943 && GET_CODE (XEXP (op, 1)) == CONST_INT
4944 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4945 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4946 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4947 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4948 return simplify_gen_binary (LSHIFTRT, outermode,
4949 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4951 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4952 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4953 the outer subreg is effectively a truncation to the original mode. */
4954 if (GET_CODE (op) == ASHIFT
4955 && SCALAR_INT_MODE_P (outermode)
4956 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4957 && GET_CODE (XEXP (op, 1)) == CONST_INT
4958 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4959 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4960 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4961 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4962 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4963 return simplify_gen_binary (ASHIFT, outermode,
4964 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4969 /* Make a SUBREG operation or equivalent if it folds. */
4972 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4973 enum machine_mode innermode, unsigned int byte)
4977 newx = simplify_subreg (outermode, op, innermode, byte);
4981 if (GET_CODE (op) == SUBREG
4982 || GET_CODE (op) == CONCAT
4983 || GET_MODE (op) == VOIDmode)
4986 if (validate_subreg (outermode, innermode, op, byte))
4987 return gen_rtx_SUBREG (outermode, op, byte);
4992 /* Simplify X, an rtx expression.
4994 Return the simplified expression or NULL if no simplifications
4997 This is the preferred entry point into the simplification routines;
4998 however, we still allow passes to call the more specific routines.
5000 Right now GCC has three (yes, three) major bodies of RTL simplification
5001 code that need to be unified.
5003 1. fold_rtx in cse.c. This code uses various CSE specific
5004 information to aid in RTL simplification.
5006 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5007 it uses combine specific information to aid in RTL
5010 3. The routines in this file.
5013 Long term we want to only have one body of simplification code; to
5014 get to that state I recommend the following steps:
5016 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5017 which are not pass dependent state into these routines.
5019 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5020 use this routine whenever possible.
5022 3. Allow for pass dependent state to be provided to these
5023 routines and add simplifications based on the pass dependent
5024 state. Remove code from cse.c & combine.c that becomes
5027 It will take time, but ultimately the compiler will be easier to
5028 maintain and improve. It's totally silly that when we add a
5029 simplification that it needs to be added to 4 places (3 for RTL
5030 simplification and 1 for tree simplification. */
5033 simplify_rtx (rtx x)
5035 enum rtx_code code = GET_CODE (x);
5036 enum machine_mode mode = GET_MODE (x);
5038 switch (GET_RTX_CLASS (code))
5041 return simplify_unary_operation (code, mode,
5042 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5043 case RTX_COMM_ARITH:
5044 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5045 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5047 /* Fall through.... */
5050 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5053 case RTX_BITFIELD_OPS:
5054 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5055 XEXP (x, 0), XEXP (x, 1),
5059 case RTX_COMM_COMPARE:
5060 return simplify_relational_operation (code, mode,
5061 ((GET_MODE (XEXP (x, 0))
5063 ? GET_MODE (XEXP (x, 0))
5064 : GET_MODE (XEXP (x, 1))),
5070 return simplify_subreg (mode, SUBREG_REG (x),
5071 GET_MODE (SUBREG_REG (x)),
5078 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5079 if (GET_CODE (XEXP (x, 0)) == HIGH
5080 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))