1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 if (GET_MODE (x) == BLKmode)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
218 return gen_rtx_fmt_e (code, mode, op);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
271 switch (GET_RTX_CLASS (code))
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
279 return simplify_gen_unary (code, mode, op0, op_mode);
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return simplify_gen_binary (code, mode, op0, op1);
290 case RTX_COMM_COMPARE:
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
301 case RTX_BITFIELD_OPS:
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
314 /* The only case we try to handle is a SUBREG. */
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
323 return op0 ? op0 : x;
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
333 return replace_equiv_address_nv (x, op0);
335 else if (code == LO_SUM)
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
346 return gen_rtx_LO_SUM (mode, op0, op1);
348 else if (code == REG)
350 if (rtx_equal_p (x, old_rtx))
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
370 if (GET_CODE (op) == CONST)
373 trueop = avoid_constant_pool_reference (op);
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
379 return simplify_unary_operation_1 (code, mode, op);
382 /* Perform some simplifications we can do even if the operands
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
387 enum rtx_code reversed;
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && GET_CODE (XEXP (op, 1)) == CONST_INT
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
492 in2 = in1; in1 = tem;
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
554 if (GET_CODE (op) == ASHIFT)
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && GET_CODE (XEXP (op, 1)) == CONST_INT
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx)
588 enum machine_mode inner = GET_MODE (XEXP (op, 0));
589 int isize = GET_MODE_BITSIZE (inner);
590 if (STORE_FLAG_VALUE == 1)
592 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
593 GEN_INT (isize - 1));
596 if (GET_MODE_BITSIZE (mode) > isize)
597 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
598 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
600 else if (STORE_FLAG_VALUE == -1)
602 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
603 GEN_INT (isize - 1));
606 if (GET_MODE_BITSIZE (mode) > isize)
607 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
608 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
614 /* We can't handle truncation to a partial integer mode here
615 because we don't know the real bitsize of the partial
617 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
620 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
621 if ((GET_CODE (op) == SIGN_EXTEND
622 || GET_CODE (op) == ZERO_EXTEND)
623 && GET_MODE (XEXP (op, 0)) == mode)
626 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
627 (OP:SI foo:SI) if OP is NEG or ABS. */
628 if ((GET_CODE (op) == ABS
629 || GET_CODE (op) == NEG)
630 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
631 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
632 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
633 return simplify_gen_unary (GET_CODE (op), mode,
634 XEXP (XEXP (op, 0), 0), mode);
636 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 if (GET_CODE (op) == SUBREG
639 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
640 && subreg_lowpart_p (op))
641 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
642 GET_MODE (XEXP (SUBREG_REG (op), 0)));
644 /* If we know that the value is already truncated, we can
645 replace the TRUNCATE with a SUBREG. Note that this is also
646 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
647 modes we just have to apply a different definition for
648 truncation. But don't do this for an (LSHIFTRT (MULT ...))
649 since this will cause problems with the umulXi3_highpart
651 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
652 GET_MODE_BITSIZE (GET_MODE (op)))
653 ? (num_sign_bit_copies (op, GET_MODE (op))
654 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
655 - GET_MODE_BITSIZE (mode)))
656 : truncated_to_mode (mode, op))
657 && ! (GET_CODE (op) == LSHIFTRT
658 && GET_CODE (XEXP (op, 0)) == MULT))
659 return rtl_hooks.gen_lowpart_no_emit (mode, op);
661 /* A truncate of a comparison can be replaced with a subreg if
662 STORE_FLAG_VALUE permits. This is like the previous test,
663 but it works even if the comparison is done in a mode larger
664 than HOST_BITS_PER_WIDE_INT. */
665 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
668 return rtl_hooks.gen_lowpart_no_emit (mode, op);
672 if (DECIMAL_FLOAT_MODE_P (mode))
675 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
676 if (GET_CODE (op) == FLOAT_EXTEND
677 && GET_MODE (XEXP (op, 0)) == mode)
680 /* (float_truncate:SF (float_truncate:DF foo:XF))
681 = (float_truncate:SF foo:XF).
682 This may eliminate double rounding, so it is unsafe.
684 (float_truncate:SF (float_extend:XF foo:DF))
685 = (float_truncate:SF foo:DF).
687 (float_truncate:DF (float_extend:XF foo:SF))
688 = (float_extend:SF foo:DF). */
689 if ((GET_CODE (op) == FLOAT_TRUNCATE
690 && flag_unsafe_math_optimizations)
691 || GET_CODE (op) == FLOAT_EXTEND)
692 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 > GET_MODE_SIZE (mode)
695 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
699 /* (float_truncate (float x)) is (float x) */
700 if (GET_CODE (op) == FLOAT
701 && (flag_unsafe_math_optimizations
702 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
703 && ((unsigned)significand_size (GET_MODE (op))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
705 - num_sign_bit_copies (XEXP (op, 0),
706 GET_MODE (XEXP (op, 0))))))))
707 return simplify_gen_unary (FLOAT, mode,
709 GET_MODE (XEXP (op, 0)));
711 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
712 (OP:SF foo:SF) if OP is NEG or ABS. */
713 if ((GET_CODE (op) == ABS
714 || GET_CODE (op) == NEG)
715 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
717 return simplify_gen_unary (GET_CODE (op), mode,
718 XEXP (XEXP (op, 0), 0), mode);
720 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
721 is (float_truncate:SF x). */
722 if (GET_CODE (op) == SUBREG
723 && subreg_lowpart_p (op)
724 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
725 return SUBREG_REG (op);
729 if (DECIMAL_FLOAT_MODE_P (mode))
732 /* (float_extend (float_extend x)) is (float_extend x)
734 (float_extend (float x)) is (float x) assuming that double
735 rounding can't happen.
737 if (GET_CODE (op) == FLOAT_EXTEND
738 || (GET_CODE (op) == FLOAT
739 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
740 && ((unsigned)significand_size (GET_MODE (op))
741 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
742 - num_sign_bit_copies (XEXP (op, 0),
743 GET_MODE (XEXP (op, 0)))))))
744 return simplify_gen_unary (GET_CODE (op), mode,
746 GET_MODE (XEXP (op, 0)));
751 /* (abs (neg <foo>)) -> (abs <foo>) */
752 if (GET_CODE (op) == NEG)
753 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
754 GET_MODE (XEXP (op, 0)));
756 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
758 if (GET_MODE (op) == VOIDmode)
761 /* If operand is something known to be positive, ignore the ABS. */
762 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
763 || ((GET_MODE_BITSIZE (GET_MODE (op))
764 <= HOST_BITS_PER_WIDE_INT)
765 && ((nonzero_bits (op, GET_MODE (op))
767 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
771 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
772 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
773 return gen_rtx_NEG (mode, op);
778 /* (ffs (*_extend <X>)) = (ffs <X>) */
779 if (GET_CODE (op) == SIGN_EXTEND
780 || GET_CODE (op) == ZERO_EXTEND)
781 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
782 GET_MODE (XEXP (op, 0)));
786 switch (GET_CODE (op))
790 /* (popcount (zero_extend <X>)) = (popcount <X>) */
791 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
792 GET_MODE (XEXP (op, 0)));
796 /* Rotations don't affect popcount. */
797 if (!side_effects_p (XEXP (op, 1)))
798 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
799 GET_MODE (XEXP (op, 0)));
808 switch (GET_CODE (op))
814 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
815 GET_MODE (XEXP (op, 0)));
819 /* Rotations don't affect parity. */
820 if (!side_effects_p (XEXP (op, 1)))
821 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
831 /* (bswap (bswap x)) -> x. */
832 if (GET_CODE (op) == BSWAP)
837 /* (float (sign_extend <X>)) = (float <X>). */
838 if (GET_CODE (op) == SIGN_EXTEND)
839 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
840 GET_MODE (XEXP (op, 0)));
844 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
845 becomes just the MINUS if its mode is MODE. This allows
846 folding switch statements on machines using casesi (such as
848 if (GET_CODE (op) == TRUNCATE
849 && GET_MODE (XEXP (op, 0)) == mode
850 && GET_CODE (XEXP (op, 0)) == MINUS
851 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
852 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
855 /* Check for a sign extension of a subreg of a promoted
856 variable, where the promotion is sign-extended, and the
857 target mode is the same as the variable's promotion. */
858 if (GET_CODE (op) == SUBREG
859 && SUBREG_PROMOTED_VAR_P (op)
860 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
861 && GET_MODE (XEXP (op, 0)) == mode)
864 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
865 if (! POINTERS_EXTEND_UNSIGNED
866 && mode == Pmode && GET_MODE (op) == ptr_mode
868 || (GET_CODE (op) == SUBREG
869 && REG_P (SUBREG_REG (op))
870 && REG_POINTER (SUBREG_REG (op))
871 && GET_MODE (SUBREG_REG (op)) == Pmode)))
872 return convert_memory_address (Pmode, op);
877 /* Check for a zero extension of a subreg of a promoted
878 variable, where the promotion is zero-extended, and the
879 target mode is the same as the variable's promotion. */
880 if (GET_CODE (op) == SUBREG
881 && SUBREG_PROMOTED_VAR_P (op)
882 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
883 && GET_MODE (XEXP (op, 0)) == mode)
886 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
887 if (POINTERS_EXTEND_UNSIGNED > 0
888 && mode == Pmode && GET_MODE (op) == ptr_mode
890 || (GET_CODE (op) == SUBREG
891 && REG_P (SUBREG_REG (op))
892 && REG_POINTER (SUBREG_REG (op))
893 && GET_MODE (SUBREG_REG (op)) == Pmode)))
894 return convert_memory_address (Pmode, op);
905 /* Try to compute the value of a unary operation CODE whose output mode is to
906 be MODE with input operand OP whose mode was originally OP_MODE.
907 Return zero if the value cannot be computed. */
909 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
910 rtx op, enum machine_mode op_mode)
912 unsigned int width = GET_MODE_BITSIZE (mode);
914 if (code == VEC_DUPLICATE)
916 gcc_assert (VECTOR_MODE_P (mode));
917 if (GET_MODE (op) != VOIDmode)
919 if (!VECTOR_MODE_P (GET_MODE (op)))
920 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
922 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
925 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
926 || GET_CODE (op) == CONST_VECTOR)
928 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
929 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
930 rtvec v = rtvec_alloc (n_elts);
933 if (GET_CODE (op) != CONST_VECTOR)
934 for (i = 0; i < n_elts; i++)
935 RTVEC_ELT (v, i) = op;
938 enum machine_mode inmode = GET_MODE (op);
939 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
940 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
942 gcc_assert (in_n_elts < n_elts);
943 gcc_assert ((n_elts % in_n_elts) == 0);
944 for (i = 0; i < n_elts; i++)
945 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
947 return gen_rtx_CONST_VECTOR (mode, v);
951 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
953 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
954 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
955 enum machine_mode opmode = GET_MODE (op);
956 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
957 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
958 rtvec v = rtvec_alloc (n_elts);
961 gcc_assert (op_n_elts == n_elts);
962 for (i = 0; i < n_elts; i++)
964 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
965 CONST_VECTOR_ELT (op, i),
966 GET_MODE_INNER (opmode));
969 RTVEC_ELT (v, i) = x;
971 return gen_rtx_CONST_VECTOR (mode, v);
974 /* The order of these tests is critical so that, for example, we don't
975 check the wrong mode (input vs. output) for a conversion operation,
976 such as FIX. At some point, this should be simplified. */
978 if (code == FLOAT && GET_MODE (op) == VOIDmode
979 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
981 HOST_WIDE_INT hv, lv;
984 if (GET_CODE (op) == CONST_INT)
985 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
987 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
989 REAL_VALUE_FROM_INT (d, lv, hv, mode);
990 d = real_value_truncate (mode, d);
991 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
993 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
994 && (GET_CODE (op) == CONST_DOUBLE
995 || GET_CODE (op) == CONST_INT))
997 HOST_WIDE_INT hv, lv;
1000 if (GET_CODE (op) == CONST_INT)
1001 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1003 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1005 if (op_mode == VOIDmode)
1007 /* We don't know how to interpret negative-looking numbers in
1008 this case, so don't try to fold those. */
1012 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1015 hv = 0, lv &= GET_MODE_MASK (op_mode);
1017 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1018 d = real_value_truncate (mode, d);
1019 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1022 if (GET_CODE (op) == CONST_INT
1023 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1025 HOST_WIDE_INT arg0 = INTVAL (op);
1039 val = (arg0 >= 0 ? arg0 : - arg0);
1043 /* Don't use ffs here. Instead, get low order bit and then its
1044 number. If arg0 is zero, this will return 0, as desired. */
1045 arg0 &= GET_MODE_MASK (mode);
1046 val = exact_log2 (arg0 & (- arg0)) + 1;
1050 arg0 &= GET_MODE_MASK (mode);
1051 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1054 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1058 arg0 &= GET_MODE_MASK (mode);
1061 /* Even if the value at zero is undefined, we have to come
1062 up with some replacement. Seems good enough. */
1063 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1064 val = GET_MODE_BITSIZE (mode);
1067 val = exact_log2 (arg0 & -arg0);
1071 arg0 &= GET_MODE_MASK (mode);
1074 val++, arg0 &= arg0 - 1;
1078 arg0 &= GET_MODE_MASK (mode);
1081 val++, arg0 &= arg0 - 1;
1090 for (s = 0; s < width; s += 8)
1092 unsigned int d = width - s - 8;
1093 unsigned HOST_WIDE_INT byte;
1094 byte = (arg0 >> s) & 0xff;
1105 /* When zero-extending a CONST_INT, we need to know its
1107 gcc_assert (op_mode != VOIDmode);
1108 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1110 /* If we were really extending the mode,
1111 we would have to distinguish between zero-extension
1112 and sign-extension. */
1113 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1116 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1117 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1123 if (op_mode == VOIDmode)
1125 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1127 /* If we were really extending the mode,
1128 we would have to distinguish between zero-extension
1129 and sign-extension. */
1130 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1133 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1136 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1138 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1139 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1147 case FLOAT_TRUNCATE:
1157 return gen_int_mode (val, mode);
1160 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1161 for a DImode operation on a CONST_INT. */
1162 else if (GET_MODE (op) == VOIDmode
1163 && width <= HOST_BITS_PER_WIDE_INT * 2
1164 && (GET_CODE (op) == CONST_DOUBLE
1165 || GET_CODE (op) == CONST_INT))
1167 unsigned HOST_WIDE_INT l1, lv;
1168 HOST_WIDE_INT h1, hv;
1170 if (GET_CODE (op) == CONST_DOUBLE)
1171 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1173 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1183 neg_double (l1, h1, &lv, &hv);
1188 neg_double (l1, h1, &lv, &hv);
1200 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1203 lv = exact_log2 (l1 & -l1) + 1;
1209 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1210 - HOST_BITS_PER_WIDE_INT;
1212 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1213 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1214 lv = GET_MODE_BITSIZE (mode);
1220 lv = exact_log2 (l1 & -l1);
1222 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1223 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1224 lv = GET_MODE_BITSIZE (mode);
1252 for (s = 0; s < width; s += 8)
1254 unsigned int d = width - s - 8;
1255 unsigned HOST_WIDE_INT byte;
1257 if (s < HOST_BITS_PER_WIDE_INT)
1258 byte = (l1 >> s) & 0xff;
1260 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1262 if (d < HOST_BITS_PER_WIDE_INT)
1265 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1271 /* This is just a change-of-mode, so do nothing. */
1276 gcc_assert (op_mode != VOIDmode);
1278 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1282 lv = l1 & GET_MODE_MASK (op_mode);
1286 if (op_mode == VOIDmode
1287 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1291 lv = l1 & GET_MODE_MASK (op_mode);
1292 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1293 && (lv & ((HOST_WIDE_INT) 1
1294 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1295 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1297 hv = HWI_SIGN_EXTEND (lv);
1308 return immed_double_const (lv, hv, mode);
1311 else if (GET_CODE (op) == CONST_DOUBLE
1312 && SCALAR_FLOAT_MODE_P (mode))
1314 REAL_VALUE_TYPE d, t;
1315 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1320 if (HONOR_SNANS (mode) && real_isnan (&d))
1322 real_sqrt (&t, mode, &d);
1326 d = REAL_VALUE_ABS (d);
1329 d = REAL_VALUE_NEGATE (d);
1331 case FLOAT_TRUNCATE:
1332 d = real_value_truncate (mode, d);
1335 /* All this does is change the mode. */
1338 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1345 real_to_target (tmp, &d, GET_MODE (op));
1346 for (i = 0; i < 4; i++)
1348 real_from_target (&d, tmp, mode);
1354 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1357 else if (GET_CODE (op) == CONST_DOUBLE
1358 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1359 && GET_MODE_CLASS (mode) == MODE_INT
1360 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1362 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1363 operators are intentionally left unspecified (to ease implementation
1364 by target backends), for consistency, this routine implements the
1365 same semantics for constant folding as used by the middle-end. */
1367 /* This was formerly used only for non-IEEE float.
1368 eggert@twinsun.com says it is safe for IEEE also. */
1369 HOST_WIDE_INT xh, xl, th, tl;
1370 REAL_VALUE_TYPE x, t;
1371 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1375 if (REAL_VALUE_ISNAN (x))
1378 /* Test against the signed upper bound. */
1379 if (width > HOST_BITS_PER_WIDE_INT)
1381 th = ((unsigned HOST_WIDE_INT) 1
1382 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1388 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1390 real_from_integer (&t, VOIDmode, tl, th, 0);
1391 if (REAL_VALUES_LESS (t, x))
1398 /* Test against the signed lower bound. */
1399 if (width > HOST_BITS_PER_WIDE_INT)
1401 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1407 tl = (HOST_WIDE_INT) -1 << (width - 1);
1409 real_from_integer (&t, VOIDmode, tl, th, 0);
1410 if (REAL_VALUES_LESS (x, t))
1416 REAL_VALUE_TO_INT (&xl, &xh, x);
1420 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1423 /* Test against the unsigned upper bound. */
1424 if (width == 2*HOST_BITS_PER_WIDE_INT)
1429 else if (width >= HOST_BITS_PER_WIDE_INT)
1431 th = ((unsigned HOST_WIDE_INT) 1
1432 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1438 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1440 real_from_integer (&t, VOIDmode, tl, th, 1);
1441 if (REAL_VALUES_LESS (t, x))
1448 REAL_VALUE_TO_INT (&xl, &xh, x);
1454 return immed_double_const (xl, xh, mode);
1460 /* Subroutine of simplify_binary_operation to simplify a commutative,
1461 associative binary operation CODE with result mode MODE, operating
1462 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1463 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1464 canonicalization is possible. */
1467 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1472 /* Linearize the operator to the left. */
1473 if (GET_CODE (op1) == code)
1475 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1476 if (GET_CODE (op0) == code)
1478 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1479 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1482 /* "a op (b op c)" becomes "(b op c) op a". */
1483 if (! swap_commutative_operands_p (op1, op0))
1484 return simplify_gen_binary (code, mode, op1, op0);
1491 if (GET_CODE (op0) == code)
1493 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1494 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1496 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1497 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1500 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1501 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1503 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1505 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1506 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1508 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1515 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1516 and OP1. Return 0 if no simplification is possible.
1518 Don't use this for relational operations such as EQ or LT.
1519 Use simplify_relational_operation instead. */
1521 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx trueop0, trueop1;
1527 /* Relational operations don't work here. We must know the mode
1528 of the operands in order to do the comparison correctly.
1529 Assuming a full word can give incorrect results.
1530 Consider comparing 128 with -128 in QImode. */
1531 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1532 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1534 /* Make sure the constant is second. */
1535 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1536 && swap_commutative_operands_p (op0, op1))
1538 tem = op0, op0 = op1, op1 = tem;
1541 trueop0 = avoid_constant_pool_reference (op0);
1542 trueop1 = avoid_constant_pool_reference (op1);
1544 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1547 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1550 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1551 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1552 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1553 actual constants. */
1556 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1557 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1559 rtx tem, reversed, opleft, opright;
1561 unsigned int width = GET_MODE_BITSIZE (mode);
1563 /* Even if we can't compute a constant result,
1564 there are some cases worth simplifying. */
1569 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1570 when x is NaN, infinite, or finite and nonzero. They aren't
1571 when x is -0 and the rounding mode is not towards -infinity,
1572 since (-0) + 0 is then 0. */
1573 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1576 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1577 transformations are safe even for IEEE. */
1578 if (GET_CODE (op0) == NEG)
1579 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1580 else if (GET_CODE (op1) == NEG)
1581 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1583 /* (~a) + 1 -> -a */
1584 if (INTEGRAL_MODE_P (mode)
1585 && GET_CODE (op0) == NOT
1586 && trueop1 == const1_rtx)
1587 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1589 /* Handle both-operands-constant cases. We can only add
1590 CONST_INTs to constants since the sum of relocatable symbols
1591 can't be handled by most assemblers. Don't add CONST_INT
1592 to CONST_INT since overflow won't be computed properly if wider
1593 than HOST_BITS_PER_WIDE_INT. */
1595 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1596 && GET_CODE (op1) == CONST_INT)
1597 return plus_constant (op0, INTVAL (op1));
1598 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1599 && GET_CODE (op0) == CONST_INT)
1600 return plus_constant (op1, INTVAL (op0));
1602 /* See if this is something like X * C - X or vice versa or
1603 if the multiplication is written as a shift. If so, we can
1604 distribute and make a new multiply, shift, or maybe just
1605 have X (if C is 2 in the example above). But don't make
1606 something more expensive than we had before. */
1608 if (SCALAR_INT_MODE_P (mode))
1610 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1611 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1612 rtx lhs = op0, rhs = op1;
1614 if (GET_CODE (lhs) == NEG)
1618 lhs = XEXP (lhs, 0);
1620 else if (GET_CODE (lhs) == MULT
1621 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1623 coeff0l = INTVAL (XEXP (lhs, 1));
1624 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1625 lhs = XEXP (lhs, 0);
1627 else if (GET_CODE (lhs) == ASHIFT
1628 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1629 && INTVAL (XEXP (lhs, 1)) >= 0
1630 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1632 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1634 lhs = XEXP (lhs, 0);
1637 if (GET_CODE (rhs) == NEG)
1641 rhs = XEXP (rhs, 0);
1643 else if (GET_CODE (rhs) == MULT
1644 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1646 coeff1l = INTVAL (XEXP (rhs, 1));
1647 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1648 rhs = XEXP (rhs, 0);
1650 else if (GET_CODE (rhs) == ASHIFT
1651 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1652 && INTVAL (XEXP (rhs, 1)) >= 0
1653 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1655 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1657 rhs = XEXP (rhs, 0);
1660 if (rtx_equal_p (lhs, rhs))
1662 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1664 unsigned HOST_WIDE_INT l;
1667 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1668 coeff = immed_double_const (l, h, mode);
1670 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1671 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1676 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1677 if ((GET_CODE (op1) == CONST_INT
1678 || GET_CODE (op1) == CONST_DOUBLE)
1679 && GET_CODE (op0) == XOR
1680 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1681 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1682 && mode_signbit_p (mode, op1))
1683 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1684 simplify_gen_binary (XOR, mode, op1,
1687 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1688 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1689 && GET_CODE (op0) == MULT
1690 && GET_CODE (XEXP (op0, 0)) == NEG)
1694 in1 = XEXP (XEXP (op0, 0), 0);
1695 in2 = XEXP (op0, 1);
1696 return simplify_gen_binary (MINUS, mode, op1,
1697 simplify_gen_binary (MULT, mode,
1701 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1702 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1704 if (COMPARISON_P (op0)
1705 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1706 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1707 && (reversed = reversed_comparison (op0, mode)))
1709 simplify_gen_unary (NEG, mode, reversed, mode);
1711 /* If one of the operands is a PLUS or a MINUS, see if we can
1712 simplify this by the associative law.
1713 Don't use the associative law for floating point.
1714 The inaccuracy makes it nonassociative,
1715 and subtle programs can break if operations are associated. */
1717 if (INTEGRAL_MODE_P (mode)
1718 && (plus_minus_operand_p (op0)
1719 || plus_minus_operand_p (op1))
1720 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1723 /* Reassociate floating point addition only when the user
1724 specifies unsafe math optimizations. */
1725 if (FLOAT_MODE_P (mode)
1726 && flag_unsafe_math_optimizations)
1728 tem = simplify_associative_operation (code, mode, op0, op1);
1736 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1737 using cc0, in which case we want to leave it as a COMPARE
1738 so we can distinguish it from a register-register-copy.
1740 In IEEE floating point, x-0 is not the same as x. */
1742 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1743 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1744 && trueop1 == CONST0_RTX (mode))
1748 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1749 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1750 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1751 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1753 rtx xop00 = XEXP (op0, 0);
1754 rtx xop10 = XEXP (op1, 0);
1757 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1759 if (REG_P (xop00) && REG_P (xop10)
1760 && GET_MODE (xop00) == GET_MODE (xop10)
1761 && REGNO (xop00) == REGNO (xop10)
1762 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1763 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1770 /* We can't assume x-x is 0 even with non-IEEE floating point,
1771 but since it is zero except in very strange circumstances, we
1772 will treat it as zero with -funsafe-math-optimizations and
1773 -ffinite-math-only. */
1774 if (rtx_equal_p (trueop0, trueop1)
1775 && ! side_effects_p (op0)
1776 && (! FLOAT_MODE_P (mode)
1777 || (flag_unsafe_math_optimizations
1778 && !HONOR_NANS (mode)
1779 && !HONOR_INFINITIES (mode))))
1780 return CONST0_RTX (mode);
1782 /* Change subtraction from zero into negation. (0 - x) is the
1783 same as -x when x is NaN, infinite, or finite and nonzero.
1784 But if the mode has signed zeros, and does not round towards
1785 -infinity, then 0 - 0 is 0, not -0. */
1786 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1787 return simplify_gen_unary (NEG, mode, op1, mode);
1789 /* (-1 - a) is ~a. */
1790 if (trueop0 == constm1_rtx)
1791 return simplify_gen_unary (NOT, mode, op1, mode);
1793 /* Subtracting 0 has no effect unless the mode has signed zeros
1794 and supports rounding towards -infinity. In such a case,
1796 if (!(HONOR_SIGNED_ZEROS (mode)
1797 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1798 && trueop1 == CONST0_RTX (mode))
1801 /* See if this is something like X * C - X or vice versa or
1802 if the multiplication is written as a shift. If so, we can
1803 distribute and make a new multiply, shift, or maybe just
1804 have X (if C is 2 in the example above). But don't make
1805 something more expensive than we had before. */
1807 if (SCALAR_INT_MODE_P (mode))
1809 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1810 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1811 rtx lhs = op0, rhs = op1;
1813 if (GET_CODE (lhs) == NEG)
1817 lhs = XEXP (lhs, 0);
1819 else if (GET_CODE (lhs) == MULT
1820 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1822 coeff0l = INTVAL (XEXP (lhs, 1));
1823 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1824 lhs = XEXP (lhs, 0);
1826 else if (GET_CODE (lhs) == ASHIFT
1827 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1828 && INTVAL (XEXP (lhs, 1)) >= 0
1829 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1831 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1833 lhs = XEXP (lhs, 0);
1836 if (GET_CODE (rhs) == NEG)
1840 rhs = XEXP (rhs, 0);
1842 else if (GET_CODE (rhs) == MULT
1843 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1845 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1846 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1847 rhs = XEXP (rhs, 0);
1849 else if (GET_CODE (rhs) == ASHIFT
1850 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1851 && INTVAL (XEXP (rhs, 1)) >= 0
1852 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1854 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1856 rhs = XEXP (rhs, 0);
1859 if (rtx_equal_p (lhs, rhs))
1861 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1863 unsigned HOST_WIDE_INT l;
1866 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1867 coeff = immed_double_const (l, h, mode);
1869 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1870 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1875 /* (a - (-b)) -> (a + b). True even for IEEE. */
1876 if (GET_CODE (op1) == NEG)
1877 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1879 /* (-x - c) may be simplified as (-c - x). */
1880 if (GET_CODE (op0) == NEG
1881 && (GET_CODE (op1) == CONST_INT
1882 || GET_CODE (op1) == CONST_DOUBLE))
1884 tem = simplify_unary_operation (NEG, mode, op1, mode);
1886 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1889 /* Don't let a relocatable value get a negative coeff. */
1890 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1891 return simplify_gen_binary (PLUS, mode,
1893 neg_const_int (mode, op1));
1895 /* (x - (x & y)) -> (x & ~y) */
1896 if (GET_CODE (op1) == AND)
1898 if (rtx_equal_p (op0, XEXP (op1, 0)))
1900 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1901 GET_MODE (XEXP (op1, 1)));
1902 return simplify_gen_binary (AND, mode, op0, tem);
1904 if (rtx_equal_p (op0, XEXP (op1, 1)))
1906 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1907 GET_MODE (XEXP (op1, 0)));
1908 return simplify_gen_binary (AND, mode, op0, tem);
1912 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1913 by reversing the comparison code if valid. */
1914 if (STORE_FLAG_VALUE == 1
1915 && trueop0 == const1_rtx
1916 && COMPARISON_P (op1)
1917 && (reversed = reversed_comparison (op1, mode)))
1920 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1921 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1922 && GET_CODE (op1) == MULT
1923 && GET_CODE (XEXP (op1, 0)) == NEG)
1927 in1 = XEXP (XEXP (op1, 0), 0);
1928 in2 = XEXP (op1, 1);
1929 return simplify_gen_binary (PLUS, mode,
1930 simplify_gen_binary (MULT, mode,
1935 /* Canonicalize (minus (neg A) (mult B C)) to
1936 (minus (mult (neg B) C) A). */
1937 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1938 && GET_CODE (op1) == MULT
1939 && GET_CODE (op0) == NEG)
1943 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1944 in2 = XEXP (op1, 1);
1945 return simplify_gen_binary (MINUS, mode,
1946 simplify_gen_binary (MULT, mode,
1951 /* If one of the operands is a PLUS or a MINUS, see if we can
1952 simplify this by the associative law. This will, for example,
1953 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1954 Don't use the associative law for floating point.
1955 The inaccuracy makes it nonassociative,
1956 and subtle programs can break if operations are associated. */
1958 if (INTEGRAL_MODE_P (mode)
1959 && (plus_minus_operand_p (op0)
1960 || plus_minus_operand_p (op1))
1961 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1966 if (trueop1 == constm1_rtx)
1967 return simplify_gen_unary (NEG, mode, op0, mode);
1969 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1970 x is NaN, since x * 0 is then also NaN. Nor is it valid
1971 when the mode has signed zeros, since multiplying a negative
1972 number by 0 will give -0, not 0. */
1973 if (!HONOR_NANS (mode)
1974 && !HONOR_SIGNED_ZEROS (mode)
1975 && trueop1 == CONST0_RTX (mode)
1976 && ! side_effects_p (op0))
1979 /* In IEEE floating point, x*1 is not equivalent to x for
1981 if (!HONOR_SNANS (mode)
1982 && trueop1 == CONST1_RTX (mode))
1985 /* Convert multiply by constant power of two into shift unless
1986 we are still generating RTL. This test is a kludge. */
1987 if (GET_CODE (trueop1) == CONST_INT
1988 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1989 /* If the mode is larger than the host word size, and the
1990 uppermost bit is set, then this isn't a power of two due
1991 to implicit sign extension. */
1992 && (width <= HOST_BITS_PER_WIDE_INT
1993 || val != HOST_BITS_PER_WIDE_INT - 1))
1994 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1996 /* Likewise for multipliers wider than a word. */
1997 if (GET_CODE (trueop1) == CONST_DOUBLE
1998 && (GET_MODE (trueop1) == VOIDmode
1999 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2000 && GET_MODE (op0) == mode
2001 && CONST_DOUBLE_LOW (trueop1) == 0
2002 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2003 return simplify_gen_binary (ASHIFT, mode, op0,
2004 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2006 /* x*2 is x+x and x*(-1) is -x */
2007 if (GET_CODE (trueop1) == CONST_DOUBLE
2008 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2009 && GET_MODE (op0) == mode)
2012 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2014 if (REAL_VALUES_EQUAL (d, dconst2))
2015 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2017 if (!HONOR_SNANS (mode)
2018 && REAL_VALUES_EQUAL (d, dconstm1))
2019 return simplify_gen_unary (NEG, mode, op0, mode);
2022 /* Optimize -x * -x as x * x. */
2023 if (FLOAT_MODE_P (mode)
2024 && GET_CODE (op0) == NEG
2025 && GET_CODE (op1) == NEG
2026 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2027 && !side_effects_p (XEXP (op0, 0)))
2028 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2030 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2031 if (SCALAR_FLOAT_MODE_P (mode)
2032 && GET_CODE (op0) == ABS
2033 && GET_CODE (op1) == ABS
2034 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2035 && !side_effects_p (XEXP (op0, 0)))
2036 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2038 /* Reassociate multiplication, but for floating point MULTs
2039 only when the user specifies unsafe math optimizations. */
2040 if (! FLOAT_MODE_P (mode)
2041 || flag_unsafe_math_optimizations)
2043 tem = simplify_associative_operation (code, mode, op0, op1);
2050 if (trueop1 == const0_rtx)
2052 if (GET_CODE (trueop1) == CONST_INT
2053 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2054 == GET_MODE_MASK (mode)))
2056 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2058 /* A | (~A) -> -1 */
2059 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2060 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2061 && ! side_effects_p (op0)
2062 && SCALAR_INT_MODE_P (mode))
2065 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2066 if (GET_CODE (op1) == CONST_INT
2067 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2068 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2071 /* Canonicalize (X & C1) | C2. */
2072 if (GET_CODE (op0) == AND
2073 && GET_CODE (trueop1) == CONST_INT
2074 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2076 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2077 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2078 HOST_WIDE_INT c2 = INTVAL (trueop1);
2080 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2082 && !side_effects_p (XEXP (op0, 0)))
2085 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2086 if (((c1|c2) & mask) == mask)
2087 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2089 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2090 if (((c1 & ~c2) & mask) != (c1 & mask))
2092 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2093 gen_int_mode (c1 & ~c2, mode));
2094 return simplify_gen_binary (IOR, mode, tem, op1);
2098 /* Convert (A & B) | A to A. */
2099 if (GET_CODE (op0) == AND
2100 && (rtx_equal_p (XEXP (op0, 0), op1)
2101 || rtx_equal_p (XEXP (op0, 1), op1))
2102 && ! side_effects_p (XEXP (op0, 0))
2103 && ! side_effects_p (XEXP (op0, 1)))
2106 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2107 mode size to (rotate A CX). */
2109 if (GET_CODE (op1) == ASHIFT
2110 || GET_CODE (op1) == SUBREG)
2121 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2122 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2123 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2124 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2125 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2126 == GET_MODE_BITSIZE (mode)))
2127 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2129 /* Same, but for ashift that has been "simplified" to a wider mode
2130 by simplify_shift_const. */
2132 if (GET_CODE (opleft) == SUBREG
2133 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2134 && GET_CODE (opright) == LSHIFTRT
2135 && GET_CODE (XEXP (opright, 0)) == SUBREG
2136 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2137 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2138 && (GET_MODE_SIZE (GET_MODE (opleft))
2139 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2140 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2141 SUBREG_REG (XEXP (opright, 0)))
2142 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2143 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2144 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2145 == GET_MODE_BITSIZE (mode)))
2146 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2147 XEXP (SUBREG_REG (opleft), 1));
2149 /* If we have (ior (and (X C1) C2)), simplify this by making
2150 C1 as small as possible if C1 actually changes. */
2151 if (GET_CODE (op1) == CONST_INT
2152 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2153 || INTVAL (op1) > 0)
2154 && GET_CODE (op0) == AND
2155 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2156 && GET_CODE (op1) == CONST_INT
2157 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2158 return simplify_gen_binary (IOR, mode,
2160 (AND, mode, XEXP (op0, 0),
2161 GEN_INT (INTVAL (XEXP (op0, 1))
2165 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2166 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2167 the PLUS does not affect any of the bits in OP1: then we can do
2168 the IOR as a PLUS and we can associate. This is valid if OP1
2169 can be safely shifted left C bits. */
2170 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2171 && GET_CODE (XEXP (op0, 0)) == PLUS
2172 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2173 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2174 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2176 int count = INTVAL (XEXP (op0, 1));
2177 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2179 if (mask >> count == INTVAL (trueop1)
2180 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2181 return simplify_gen_binary (ASHIFTRT, mode,
2182 plus_constant (XEXP (op0, 0), mask),
2186 tem = simplify_associative_operation (code, mode, op0, op1);
2192 if (trueop1 == const0_rtx)
2194 if (GET_CODE (trueop1) == CONST_INT
2195 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2196 == GET_MODE_MASK (mode)))
2197 return simplify_gen_unary (NOT, mode, op0, mode);
2198 if (rtx_equal_p (trueop0, trueop1)
2199 && ! side_effects_p (op0)
2200 && GET_MODE_CLASS (mode) != MODE_CC)
2201 return CONST0_RTX (mode);
2203 /* Canonicalize XOR of the most significant bit to PLUS. */
2204 if ((GET_CODE (op1) == CONST_INT
2205 || GET_CODE (op1) == CONST_DOUBLE)
2206 && mode_signbit_p (mode, op1))
2207 return simplify_gen_binary (PLUS, mode, op0, op1);
2208 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2209 if ((GET_CODE (op1) == CONST_INT
2210 || GET_CODE (op1) == CONST_DOUBLE)
2211 && GET_CODE (op0) == PLUS
2212 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2213 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2214 && mode_signbit_p (mode, XEXP (op0, 1)))
2215 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2216 simplify_gen_binary (XOR, mode, op1,
2219 /* If we are XORing two things that have no bits in common,
2220 convert them into an IOR. This helps to detect rotation encoded
2221 using those methods and possibly other simplifications. */
2223 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2224 && (nonzero_bits (op0, mode)
2225 & nonzero_bits (op1, mode)) == 0)
2226 return (simplify_gen_binary (IOR, mode, op0, op1));
2228 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2229 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2232 int num_negated = 0;
2234 if (GET_CODE (op0) == NOT)
2235 num_negated++, op0 = XEXP (op0, 0);
2236 if (GET_CODE (op1) == NOT)
2237 num_negated++, op1 = XEXP (op1, 0);
2239 if (num_negated == 2)
2240 return simplify_gen_binary (XOR, mode, op0, op1);
2241 else if (num_negated == 1)
2242 return simplify_gen_unary (NOT, mode,
2243 simplify_gen_binary (XOR, mode, op0, op1),
2247 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2248 correspond to a machine insn or result in further simplifications
2249 if B is a constant. */
2251 if (GET_CODE (op0) == AND
2252 && rtx_equal_p (XEXP (op0, 1), op1)
2253 && ! side_effects_p (op1))
2254 return simplify_gen_binary (AND, mode,
2255 simplify_gen_unary (NOT, mode,
2256 XEXP (op0, 0), mode),
2259 else if (GET_CODE (op0) == AND
2260 && rtx_equal_p (XEXP (op0, 0), op1)
2261 && ! side_effects_p (op1))
2262 return simplify_gen_binary (AND, mode,
2263 simplify_gen_unary (NOT, mode,
2264 XEXP (op0, 1), mode),
2267 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2268 comparison if STORE_FLAG_VALUE is 1. */
2269 if (STORE_FLAG_VALUE == 1
2270 && trueop1 == const1_rtx
2271 && COMPARISON_P (op0)
2272 && (reversed = reversed_comparison (op0, mode)))
2275 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2276 is (lt foo (const_int 0)), so we can perform the above
2277 simplification if STORE_FLAG_VALUE is 1. */
2279 if (STORE_FLAG_VALUE == 1
2280 && trueop1 == const1_rtx
2281 && GET_CODE (op0) == LSHIFTRT
2282 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2283 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2284 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2286 /* (xor (comparison foo bar) (const_int sign-bit))
2287 when STORE_FLAG_VALUE is the sign bit. */
2288 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2289 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2290 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2291 && trueop1 == const_true_rtx
2292 && COMPARISON_P (op0)
2293 && (reversed = reversed_comparison (op0, mode)))
2298 tem = simplify_associative_operation (code, mode, op0, op1);
2304 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2306 /* If we are turning off bits already known off in OP0, we need
2308 if (GET_CODE (trueop1) == CONST_INT
2309 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2310 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2312 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2313 && GET_MODE_CLASS (mode) != MODE_CC)
2316 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2317 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2318 && ! side_effects_p (op0)
2319 && GET_MODE_CLASS (mode) != MODE_CC)
2320 return CONST0_RTX (mode);
2322 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2323 there are no nonzero bits of C outside of X's mode. */
2324 if ((GET_CODE (op0) == SIGN_EXTEND
2325 || GET_CODE (op0) == ZERO_EXTEND)
2326 && GET_CODE (trueop1) == CONST_INT
2327 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2328 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2329 & INTVAL (trueop1)) == 0)
2331 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2332 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2333 gen_int_mode (INTVAL (trueop1),
2335 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2338 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2339 if (GET_CODE (op0) == IOR
2340 && GET_CODE (trueop1) == CONST_INT
2341 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2343 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2344 return simplify_gen_binary (IOR, mode,
2345 simplify_gen_binary (AND, mode,
2346 XEXP (op0, 0), op1),
2347 gen_int_mode (tmp, mode));
2350 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2351 insn (and may simplify more). */
2352 if (GET_CODE (op0) == XOR
2353 && rtx_equal_p (XEXP (op0, 0), op1)
2354 && ! side_effects_p (op1))
2355 return simplify_gen_binary (AND, mode,
2356 simplify_gen_unary (NOT, mode,
2357 XEXP (op0, 1), mode),
2360 if (GET_CODE (op0) == XOR
2361 && rtx_equal_p (XEXP (op0, 1), op1)
2362 && ! side_effects_p (op1))
2363 return simplify_gen_binary (AND, mode,
2364 simplify_gen_unary (NOT, mode,
2365 XEXP (op0, 0), mode),
2368 /* Similarly for (~(A ^ B)) & A. */
2369 if (GET_CODE (op0) == NOT
2370 && GET_CODE (XEXP (op0, 0)) == XOR
2371 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2372 && ! side_effects_p (op1))
2373 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2375 if (GET_CODE (op0) == NOT
2376 && GET_CODE (XEXP (op0, 0)) == XOR
2377 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2378 && ! side_effects_p (op1))
2379 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2381 /* Convert (A | B) & A to A. */
2382 if (GET_CODE (op0) == IOR
2383 && (rtx_equal_p (XEXP (op0, 0), op1)
2384 || rtx_equal_p (XEXP (op0, 1), op1))
2385 && ! side_effects_p (XEXP (op0, 0))
2386 && ! side_effects_p (XEXP (op0, 1)))
2389 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2390 ((A & N) + B) & M -> (A + B) & M
2391 Similarly if (N & M) == 0,
2392 ((A | N) + B) & M -> (A + B) & M
2393 and for - instead of + and/or ^ instead of |. */
2394 if (GET_CODE (trueop1) == CONST_INT
2395 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2396 && ~INTVAL (trueop1)
2397 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2398 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2403 pmop[0] = XEXP (op0, 0);
2404 pmop[1] = XEXP (op0, 1);
2406 for (which = 0; which < 2; which++)
2409 switch (GET_CODE (tem))
2412 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2413 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2414 == INTVAL (trueop1))
2415 pmop[which] = XEXP (tem, 0);
2419 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2420 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2421 pmop[which] = XEXP (tem, 0);
2428 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2430 tem = simplify_gen_binary (GET_CODE (op0), mode,
2432 return simplify_gen_binary (code, mode, tem, op1);
2435 tem = simplify_associative_operation (code, mode, op0, op1);
2441 /* 0/x is 0 (or x&0 if x has side-effects). */
2442 if (trueop0 == CONST0_RTX (mode))
2444 if (side_effects_p (op1))
2445 return simplify_gen_binary (AND, mode, op1, trueop0);
2449 if (trueop1 == CONST1_RTX (mode))
2450 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2451 /* Convert divide by power of two into shift. */
2452 if (GET_CODE (trueop1) == CONST_INT
2453 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2454 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2458 /* Handle floating point and integers separately. */
2459 if (SCALAR_FLOAT_MODE_P (mode))
2461 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2462 safe for modes with NaNs, since 0.0 / 0.0 will then be
2463 NaN rather than 0.0. Nor is it safe for modes with signed
2464 zeros, since dividing 0 by a negative number gives -0.0 */
2465 if (trueop0 == CONST0_RTX (mode)
2466 && !HONOR_NANS (mode)
2467 && !HONOR_SIGNED_ZEROS (mode)
2468 && ! side_effects_p (op1))
2471 if (trueop1 == CONST1_RTX (mode)
2472 && !HONOR_SNANS (mode))
2475 if (GET_CODE (trueop1) == CONST_DOUBLE
2476 && trueop1 != CONST0_RTX (mode))
2479 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2482 if (REAL_VALUES_EQUAL (d, dconstm1)
2483 && !HONOR_SNANS (mode))
2484 return simplify_gen_unary (NEG, mode, op0, mode);
2486 /* Change FP division by a constant into multiplication.
2487 Only do this with -funsafe-math-optimizations. */
2488 if (flag_unsafe_math_optimizations
2489 && !REAL_VALUES_EQUAL (d, dconst0))
2491 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2492 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2493 return simplify_gen_binary (MULT, mode, op0, tem);
2499 /* 0/x is 0 (or x&0 if x has side-effects). */
2500 if (trueop0 == CONST0_RTX (mode))
2502 if (side_effects_p (op1))
2503 return simplify_gen_binary (AND, mode, op1, trueop0);
2507 if (trueop1 == CONST1_RTX (mode))
2508 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2510 if (trueop1 == constm1_rtx)
2512 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2513 return simplify_gen_unary (NEG, mode, x, mode);
2519 /* 0%x is 0 (or x&0 if x has side-effects). */
2520 if (trueop0 == CONST0_RTX (mode))
2522 if (side_effects_p (op1))
2523 return simplify_gen_binary (AND, mode, op1, trueop0);
2526 /* x%1 is 0 (of x&0 if x has side-effects). */
2527 if (trueop1 == CONST1_RTX (mode))
2529 if (side_effects_p (op0))
2530 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2531 return CONST0_RTX (mode);
2533 /* Implement modulus by power of two as AND. */
2534 if (GET_CODE (trueop1) == CONST_INT
2535 && exact_log2 (INTVAL (trueop1)) > 0)
2536 return simplify_gen_binary (AND, mode, op0,
2537 GEN_INT (INTVAL (op1) - 1));
2541 /* 0%x is 0 (or x&0 if x has side-effects). */
2542 if (trueop0 == CONST0_RTX (mode))
2544 if (side_effects_p (op1))
2545 return simplify_gen_binary (AND, mode, op1, trueop0);
2548 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2549 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2551 if (side_effects_p (op0))
2552 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2553 return CONST0_RTX (mode);
2560 if (trueop1 == CONST0_RTX (mode))
2562 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2564 /* Rotating ~0 always results in ~0. */
2565 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2566 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2567 && ! side_effects_p (op1))
2573 if (trueop1 == CONST0_RTX (mode))
2575 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2580 if (trueop1 == CONST0_RTX (mode))
2582 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2584 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2585 if (GET_CODE (op0) == CLZ
2586 && GET_CODE (trueop1) == CONST_INT
2587 && STORE_FLAG_VALUE == 1
2588 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2590 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2591 unsigned HOST_WIDE_INT zero_val = 0;
2593 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2594 && zero_val == GET_MODE_BITSIZE (imode)
2595 && INTVAL (trueop1) == exact_log2 (zero_val))
2596 return simplify_gen_relational (EQ, mode, imode,
2597 XEXP (op0, 0), const0_rtx);
2602 if (width <= HOST_BITS_PER_WIDE_INT
2603 && GET_CODE (trueop1) == CONST_INT
2604 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2605 && ! side_effects_p (op0))
2607 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2609 tem = simplify_associative_operation (code, mode, op0, op1);
2615 if (width <= HOST_BITS_PER_WIDE_INT
2616 && GET_CODE (trueop1) == CONST_INT
2617 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2618 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2619 && ! side_effects_p (op0))
2621 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2623 tem = simplify_associative_operation (code, mode, op0, op1);
2629 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2631 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2633 tem = simplify_associative_operation (code, mode, op0, op1);
2639 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2641 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2643 tem = simplify_associative_operation (code, mode, op0, op1);
2652 /* ??? There are simplifications that can be done. */
2656 if (!VECTOR_MODE_P (mode))
2658 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2659 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2660 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2661 gcc_assert (XVECLEN (trueop1, 0) == 1);
2662 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2664 if (GET_CODE (trueop0) == CONST_VECTOR)
2665 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2670 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2671 gcc_assert (GET_MODE_INNER (mode)
2672 == GET_MODE_INNER (GET_MODE (trueop0)));
2673 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2675 if (GET_CODE (trueop0) == CONST_VECTOR)
2677 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2678 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2679 rtvec v = rtvec_alloc (n_elts);
2682 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2683 for (i = 0; i < n_elts; i++)
2685 rtx x = XVECEXP (trueop1, 0, i);
2687 gcc_assert (GET_CODE (x) == CONST_INT);
2688 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2692 return gen_rtx_CONST_VECTOR (mode, v);
2696 if (XVECLEN (trueop1, 0) == 1
2697 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2698 && GET_CODE (trueop0) == VEC_CONCAT)
2701 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2703 /* Try to find the element in the VEC_CONCAT. */
2704 while (GET_MODE (vec) != mode
2705 && GET_CODE (vec) == VEC_CONCAT)
2707 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2708 if (offset < vec_size)
2709 vec = XEXP (vec, 0);
2713 vec = XEXP (vec, 1);
2715 vec = avoid_constant_pool_reference (vec);
2718 if (GET_MODE (vec) == mode)
2725 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2726 ? GET_MODE (trueop0)
2727 : GET_MODE_INNER (mode));
2728 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2729 ? GET_MODE (trueop1)
2730 : GET_MODE_INNER (mode));
2732 gcc_assert (VECTOR_MODE_P (mode));
2733 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2734 == GET_MODE_SIZE (mode));
2736 if (VECTOR_MODE_P (op0_mode))
2737 gcc_assert (GET_MODE_INNER (mode)
2738 == GET_MODE_INNER (op0_mode));
2740 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2742 if (VECTOR_MODE_P (op1_mode))
2743 gcc_assert (GET_MODE_INNER (mode)
2744 == GET_MODE_INNER (op1_mode));
2746 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2748 if ((GET_CODE (trueop0) == CONST_VECTOR
2749 || GET_CODE (trueop0) == CONST_INT
2750 || GET_CODE (trueop0) == CONST_DOUBLE)
2751 && (GET_CODE (trueop1) == CONST_VECTOR
2752 || GET_CODE (trueop1) == CONST_INT
2753 || GET_CODE (trueop1) == CONST_DOUBLE))
2755 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2756 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2757 rtvec v = rtvec_alloc (n_elts);
2759 unsigned in_n_elts = 1;
2761 if (VECTOR_MODE_P (op0_mode))
2762 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2763 for (i = 0; i < n_elts; i++)
2767 if (!VECTOR_MODE_P (op0_mode))
2768 RTVEC_ELT (v, i) = trueop0;
2770 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2774 if (!VECTOR_MODE_P (op1_mode))
2775 RTVEC_ELT (v, i) = trueop1;
2777 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2782 return gen_rtx_CONST_VECTOR (mode, v);
2795 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2798 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2800 unsigned int width = GET_MODE_BITSIZE (mode);
2802 if (VECTOR_MODE_P (mode)
2803 && code != VEC_CONCAT
2804 && GET_CODE (op0) == CONST_VECTOR
2805 && GET_CODE (op1) == CONST_VECTOR)
2807 unsigned n_elts = GET_MODE_NUNITS (mode);
2808 enum machine_mode op0mode = GET_MODE (op0);
2809 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2810 enum machine_mode op1mode = GET_MODE (op1);
2811 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2812 rtvec v = rtvec_alloc (n_elts);
2815 gcc_assert (op0_n_elts == n_elts);
2816 gcc_assert (op1_n_elts == n_elts);
2817 for (i = 0; i < n_elts; i++)
2819 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2820 CONST_VECTOR_ELT (op0, i),
2821 CONST_VECTOR_ELT (op1, i));
2824 RTVEC_ELT (v, i) = x;
2827 return gen_rtx_CONST_VECTOR (mode, v);
2830 if (VECTOR_MODE_P (mode)
2831 && code == VEC_CONCAT
2832 && CONSTANT_P (op0) && CONSTANT_P (op1))
2834 unsigned n_elts = GET_MODE_NUNITS (mode);
2835 rtvec v = rtvec_alloc (n_elts);
2837 gcc_assert (n_elts >= 2);
2840 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2841 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2843 RTVEC_ELT (v, 0) = op0;
2844 RTVEC_ELT (v, 1) = op1;
2848 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2849 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2852 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2853 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2854 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2856 for (i = 0; i < op0_n_elts; ++i)
2857 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2858 for (i = 0; i < op1_n_elts; ++i)
2859 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2862 return gen_rtx_CONST_VECTOR (mode, v);
2865 if (SCALAR_FLOAT_MODE_P (mode)
2866 && GET_CODE (op0) == CONST_DOUBLE
2867 && GET_CODE (op1) == CONST_DOUBLE
2868 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2879 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2881 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2883 for (i = 0; i < 4; i++)
2900 real_from_target (&r, tmp0, mode);
2901 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2905 REAL_VALUE_TYPE f0, f1, value, result;
2908 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2909 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2910 real_convert (&f0, mode, &f0);
2911 real_convert (&f1, mode, &f1);
2913 if (HONOR_SNANS (mode)
2914 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2918 && REAL_VALUES_EQUAL (f1, dconst0)
2919 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2922 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2923 && flag_trapping_math
2924 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2926 int s0 = REAL_VALUE_NEGATIVE (f0);
2927 int s1 = REAL_VALUE_NEGATIVE (f1);
2932 /* Inf + -Inf = NaN plus exception. */
2937 /* Inf - Inf = NaN plus exception. */
2942 /* Inf / Inf = NaN plus exception. */
2949 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2950 && flag_trapping_math
2951 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2952 || (REAL_VALUE_ISINF (f1)
2953 && REAL_VALUES_EQUAL (f0, dconst0))))
2954 /* Inf * 0 = NaN plus exception. */
2957 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2959 real_convert (&result, mode, &value);
2961 /* Don't constant fold this floating point operation if
2962 the result has overflowed and flag_trapping_math. */
2964 if (flag_trapping_math
2965 && MODE_HAS_INFINITIES (mode)
2966 && REAL_VALUE_ISINF (result)
2967 && !REAL_VALUE_ISINF (f0)
2968 && !REAL_VALUE_ISINF (f1))
2969 /* Overflow plus exception. */
2972 /* Don't constant fold this floating point operation if the
2973 result may dependent upon the run-time rounding mode and
2974 flag_rounding_math is set, or if GCC's software emulation
2975 is unable to accurately represent the result. */
2977 if ((flag_rounding_math
2978 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2979 && !flag_unsafe_math_optimizations))
2980 && (inexact || !real_identical (&result, &value)))
2983 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2987 /* We can fold some multi-word operations. */
2988 if (GET_MODE_CLASS (mode) == MODE_INT
2989 && width == HOST_BITS_PER_WIDE_INT * 2
2990 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2991 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2993 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2994 HOST_WIDE_INT h1, h2, hv, ht;
2996 if (GET_CODE (op0) == CONST_DOUBLE)
2997 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2999 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3001 if (GET_CODE (op1) == CONST_DOUBLE)
3002 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3004 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3009 /* A - B == A + (-B). */
3010 neg_double (l2, h2, &lv, &hv);
3013 /* Fall through.... */
3016 add_double (l1, h1, l2, h2, &lv, &hv);
3020 mul_double (l1, h1, l2, h2, &lv, &hv);
3024 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3025 &lv, &hv, <, &ht))
3030 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3031 <, &ht, &lv, &hv))
3036 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3037 &lv, &hv, <, &ht))
3042 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3043 <, &ht, &lv, &hv))
3048 lv = l1 & l2, hv = h1 & h2;
3052 lv = l1 | l2, hv = h1 | h2;
3056 lv = l1 ^ l2, hv = h1 ^ h2;
3062 && ((unsigned HOST_WIDE_INT) l1
3063 < (unsigned HOST_WIDE_INT) l2)))
3072 && ((unsigned HOST_WIDE_INT) l1
3073 > (unsigned HOST_WIDE_INT) l2)))
3080 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3082 && ((unsigned HOST_WIDE_INT) l1
3083 < (unsigned HOST_WIDE_INT) l2)))
3090 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3092 && ((unsigned HOST_WIDE_INT) l1
3093 > (unsigned HOST_WIDE_INT) l2)))
3099 case LSHIFTRT: case ASHIFTRT:
3101 case ROTATE: case ROTATERT:
3102 if (SHIFT_COUNT_TRUNCATED)
3103 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3105 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3108 if (code == LSHIFTRT || code == ASHIFTRT)
3109 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3111 else if (code == ASHIFT)
3112 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3113 else if (code == ROTATE)
3114 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3115 else /* code == ROTATERT */
3116 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3123 return immed_double_const (lv, hv, mode);
3126 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3127 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3129 /* Get the integer argument values in two forms:
3130 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3132 arg0 = INTVAL (op0);
3133 arg1 = INTVAL (op1);
3135 if (width < HOST_BITS_PER_WIDE_INT)
3137 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3138 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3141 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3142 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3145 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3146 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3154 /* Compute the value of the arithmetic. */
3159 val = arg0s + arg1s;
3163 val = arg0s - arg1s;
3167 val = arg0s * arg1s;
3172 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3175 val = arg0s / arg1s;
3180 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3183 val = arg0s % arg1s;
3188 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3191 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3196 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3199 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3217 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3218 the value is in range. We can't return any old value for
3219 out-of-range arguments because either the middle-end (via
3220 shift_truncation_mask) or the back-end might be relying on
3221 target-specific knowledge. Nor can we rely on
3222 shift_truncation_mask, since the shift might not be part of an
3223 ashlM3, lshrM3 or ashrM3 instruction. */
3224 if (SHIFT_COUNT_TRUNCATED)
3225 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3226 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3229 val = (code == ASHIFT
3230 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3231 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3233 /* Sign-extend the result for arithmetic right shifts. */
3234 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3235 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3243 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3244 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3252 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3253 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3257 /* Do nothing here. */
3261 val = arg0s <= arg1s ? arg0s : arg1s;
3265 val = ((unsigned HOST_WIDE_INT) arg0
3266 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3270 val = arg0s > arg1s ? arg0s : arg1s;
3274 val = ((unsigned HOST_WIDE_INT) arg0
3275 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3283 /* ??? There are simplifications that can be done. */
3290 return gen_int_mode (val, mode);
3298 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3301 Rather than test for specific case, we do this by a brute-force method
3302 and do all possible simplifications until no more changes occur. Then
3303 we rebuild the operation. */
3305 struct simplify_plus_minus_op_data
3312 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3316 result = (commutative_operand_precedence (y)
3317 - commutative_operand_precedence (x));
3321 /* Group together equal REGs to do more simplification. */
3322 if (REG_P (x) && REG_P (y))
3323 return REGNO (x) > REGNO (y);
3329 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3332 struct simplify_plus_minus_op_data ops[8];
3334 int n_ops = 2, input_ops = 2;
3335 int changed, n_constants = 0, canonicalized = 0;
3338 memset (ops, 0, sizeof ops);
3340 /* Set up the two operands and then expand them until nothing has been
3341 changed. If we run out of room in our array, give up; this should
3342 almost never happen. */
3347 ops[1].neg = (code == MINUS);
3353 for (i = 0; i < n_ops; i++)
3355 rtx this_op = ops[i].op;
3356 int this_neg = ops[i].neg;
3357 enum rtx_code this_code = GET_CODE (this_op);
3366 ops[n_ops].op = XEXP (this_op, 1);
3367 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3370 ops[i].op = XEXP (this_op, 0);
3373 canonicalized |= this_neg;
3377 ops[i].op = XEXP (this_op, 0);
3378 ops[i].neg = ! this_neg;
3385 && GET_CODE (XEXP (this_op, 0)) == PLUS
3386 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3387 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3389 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3390 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3391 ops[n_ops].neg = this_neg;
3399 /* ~a -> (-a - 1) */
3402 ops[n_ops].op = constm1_rtx;
3403 ops[n_ops++].neg = this_neg;
3404 ops[i].op = XEXP (this_op, 0);
3405 ops[i].neg = !this_neg;
3415 ops[i].op = neg_const_int (mode, this_op);
3429 if (n_constants > 1)
3432 gcc_assert (n_ops >= 2);
3434 /* If we only have two operands, we can avoid the loops. */
3437 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3440 /* Get the two operands. Be careful with the order, especially for
3441 the cases where code == MINUS. */
3442 if (ops[0].neg && ops[1].neg)
3444 lhs = gen_rtx_NEG (mode, ops[0].op);
3447 else if (ops[0].neg)
3458 return simplify_const_binary_operation (code, mode, lhs, rhs);
3461 /* Now simplify each pair of operands until nothing changes. */
3464 /* Insertion sort is good enough for an eight-element array. */
3465 for (i = 1; i < n_ops; i++)
3467 struct simplify_plus_minus_op_data save;
3469 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3475 ops[j + 1] = ops[j];
3476 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3480 /* This is only useful the first time through. */
3485 for (i = n_ops - 1; i > 0; i--)
3486 for (j = i - 1; j >= 0; j--)
3488 rtx lhs = ops[j].op, rhs = ops[i].op;
3489 int lneg = ops[j].neg, rneg = ops[i].neg;
3491 if (lhs != 0 && rhs != 0)
3493 enum rtx_code ncode = PLUS;
3499 tem = lhs, lhs = rhs, rhs = tem;
3501 else if (swap_commutative_operands_p (lhs, rhs))
3502 tem = lhs, lhs = rhs, rhs = tem;
3504 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3505 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3507 rtx tem_lhs, tem_rhs;
3509 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3510 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3511 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3513 if (tem && !CONSTANT_P (tem))
3514 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3517 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3519 /* Reject "simplifications" that just wrap the two
3520 arguments in a CONST. Failure to do so can result
3521 in infinite recursion with simplify_binary_operation
3522 when it calls us to simplify CONST operations. */
3524 && ! (GET_CODE (tem) == CONST
3525 && GET_CODE (XEXP (tem, 0)) == ncode
3526 && XEXP (XEXP (tem, 0), 0) == lhs
3527 && XEXP (XEXP (tem, 0), 1) == rhs))
3530 if (GET_CODE (tem) == NEG)
3531 tem = XEXP (tem, 0), lneg = !lneg;
3532 if (GET_CODE (tem) == CONST_INT && lneg)
3533 tem = neg_const_int (mode, tem), lneg = 0;
3537 ops[j].op = NULL_RTX;
3543 /* Pack all the operands to the lower-numbered entries. */
3544 for (i = 0, j = 0; j < n_ops; j++)
3554 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3556 && GET_CODE (ops[1].op) == CONST_INT
3557 && CONSTANT_P (ops[0].op)
3559 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3561 /* We suppressed creation of trivial CONST expressions in the
3562 combination loop to avoid recursion. Create one manually now.
3563 The combination loop should have ensured that there is exactly
3564 one CONST_INT, and the sort will have ensured that it is last
3565 in the array and that any other constant will be next-to-last. */
3568 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3569 && CONSTANT_P (ops[n_ops - 2].op))
3571 rtx value = ops[n_ops - 1].op;
3572 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3573 value = neg_const_int (mode, value);
3574 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3578 /* Put a non-negated operand first, if possible. */
3580 for (i = 0; i < n_ops && ops[i].neg; i++)
3583 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3592 /* Now make the result by performing the requested operations. */
3594 for (i = 1; i < n_ops; i++)
3595 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3596 mode, result, ops[i].op);
3601 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3603 plus_minus_operand_p (const_rtx x)
3605 return GET_CODE (x) == PLUS
3606 || GET_CODE (x) == MINUS
3607 || (GET_CODE (x) == CONST
3608 && GET_CODE (XEXP (x, 0)) == PLUS
3609 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3610 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3613 /* Like simplify_binary_operation except used for relational operators.
3614 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3615 not also be VOIDmode.
3617 CMP_MODE specifies in which mode the comparison is done in, so it is
3618 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3619 the operands or, if both are VOIDmode, the operands are compared in
3620 "infinite precision". */
3622 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3623 enum machine_mode cmp_mode, rtx op0, rtx op1)
3625 rtx tem, trueop0, trueop1;
3627 if (cmp_mode == VOIDmode)
3628 cmp_mode = GET_MODE (op0);
3629 if (cmp_mode == VOIDmode)
3630 cmp_mode = GET_MODE (op1);
3632 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3635 if (SCALAR_FLOAT_MODE_P (mode))
3637 if (tem == const0_rtx)
3638 return CONST0_RTX (mode);
3639 #ifdef FLOAT_STORE_FLAG_VALUE
3641 REAL_VALUE_TYPE val;
3642 val = FLOAT_STORE_FLAG_VALUE (mode);
3643 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3649 if (VECTOR_MODE_P (mode))
3651 if (tem == const0_rtx)
3652 return CONST0_RTX (mode);
3653 #ifdef VECTOR_STORE_FLAG_VALUE
3658 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3659 if (val == NULL_RTX)
3661 if (val == const1_rtx)
3662 return CONST1_RTX (mode);
3664 units = GET_MODE_NUNITS (mode);
3665 v = rtvec_alloc (units);
3666 for (i = 0; i < units; i++)
3667 RTVEC_ELT (v, i) = val;
3668 return gen_rtx_raw_CONST_VECTOR (mode, v);
3678 /* For the following tests, ensure const0_rtx is op1. */
3679 if (swap_commutative_operands_p (op0, op1)
3680 || (op0 == const0_rtx && op1 != const0_rtx))
3681 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3683 /* If op0 is a compare, extract the comparison arguments from it. */
3684 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3685 return simplify_relational_operation (code, mode, VOIDmode,
3686 XEXP (op0, 0), XEXP (op0, 1));
3688 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3692 trueop0 = avoid_constant_pool_reference (op0);
3693 trueop1 = avoid_constant_pool_reference (op1);
3694 return simplify_relational_operation_1 (code, mode, cmp_mode,
3698 /* This part of simplify_relational_operation is only used when CMP_MODE
3699 is not in class MODE_CC (i.e. it is a real comparison).
3701 MODE is the mode of the result, while CMP_MODE specifies in which
3702 mode the comparison is done in, so it is the mode of the operands. */
3705 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3706 enum machine_mode cmp_mode, rtx op0, rtx op1)
3708 enum rtx_code op0code = GET_CODE (op0);
3710 if (op1 == const0_rtx && COMPARISON_P (op0))
3712 /* If op0 is a comparison, extract the comparison arguments
3716 if (GET_MODE (op0) == mode)
3717 return simplify_rtx (op0);
3719 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3720 XEXP (op0, 0), XEXP (op0, 1));
3722 else if (code == EQ)
3724 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3725 if (new_code != UNKNOWN)
3726 return simplify_gen_relational (new_code, mode, VOIDmode,
3727 XEXP (op0, 0), XEXP (op0, 1));
3731 if (op1 == const0_rtx)
3733 /* Canonicalize (GTU x 0) as (NE x 0). */
3735 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3736 /* Canonicalize (LEU x 0) as (EQ x 0). */
3738 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3740 else if (op1 == const1_rtx)
3745 /* Canonicalize (GE x 1) as (GT x 0). */
3746 return simplify_gen_relational (GT, mode, cmp_mode,
3749 /* Canonicalize (GEU x 1) as (NE x 0). */
3750 return simplify_gen_relational (NE, mode, cmp_mode,
3753 /* Canonicalize (LT x 1) as (LE x 0). */
3754 return simplify_gen_relational (LE, mode, cmp_mode,
3757 /* Canonicalize (LTU x 1) as (EQ x 0). */
3758 return simplify_gen_relational (EQ, mode, cmp_mode,
3764 else if (op1 == constm1_rtx)
3766 /* Canonicalize (LE x -1) as (LT x 0). */
3768 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3769 /* Canonicalize (GT x -1) as (GE x 0). */
3771 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3774 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3775 if ((code == EQ || code == NE)
3776 && (op0code == PLUS || op0code == MINUS)
3778 && CONSTANT_P (XEXP (op0, 1))
3779 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3781 rtx x = XEXP (op0, 0);
3782 rtx c = XEXP (op0, 1);
3784 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3786 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3789 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3790 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3792 && op1 == const0_rtx
3793 && GET_MODE_CLASS (mode) == MODE_INT
3794 && cmp_mode != VOIDmode
3795 /* ??? Work-around BImode bugs in the ia64 backend. */
3797 && cmp_mode != BImode
3798 && nonzero_bits (op0, cmp_mode) == 1
3799 && STORE_FLAG_VALUE == 1)
3800 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3801 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3802 : lowpart_subreg (mode, op0, cmp_mode);
3804 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3805 if ((code == EQ || code == NE)
3806 && op1 == const0_rtx
3808 return simplify_gen_relational (code, mode, cmp_mode,
3809 XEXP (op0, 0), XEXP (op0, 1));
3811 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3812 if ((code == EQ || code == NE)
3814 && rtx_equal_p (XEXP (op0, 0), op1)
3815 && !side_effects_p (XEXP (op0, 0)))
3816 return simplify_gen_relational (code, mode, cmp_mode,
3817 XEXP (op0, 1), const0_rtx);
3819 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3820 if ((code == EQ || code == NE)
3822 && rtx_equal_p (XEXP (op0, 1), op1)
3823 && !side_effects_p (XEXP (op0, 1)))
3824 return simplify_gen_relational (code, mode, cmp_mode,
3825 XEXP (op0, 0), const0_rtx);
3827 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3828 if ((code == EQ || code == NE)
3830 && (GET_CODE (op1) == CONST_INT
3831 || GET_CODE (op1) == CONST_DOUBLE)
3832 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3833 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3834 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3835 simplify_gen_binary (XOR, cmp_mode,
3836 XEXP (op0, 1), op1));
3838 if (op0code == POPCOUNT && op1 == const0_rtx)
3844 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3845 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3846 XEXP (op0, 0), const0_rtx);
3851 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3852 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3853 XEXP (op0, 0), const0_rtx);
3862 /* Check if the given comparison (done in the given MODE) is actually a
3863 tautology or a contradiction.
3864 If no simplification is possible, this function returns zero.
3865 Otherwise, it returns either const_true_rtx or const0_rtx. */
3868 simplify_const_relational_operation (enum rtx_code code,
3869 enum machine_mode mode,
3872 int equal, op0lt, op0ltu, op1lt, op1ltu;
3877 gcc_assert (mode != VOIDmode
3878 || (GET_MODE (op0) == VOIDmode
3879 && GET_MODE (op1) == VOIDmode));
3881 /* If op0 is a compare, extract the comparison arguments from it. */
3882 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3884 op1 = XEXP (op0, 1);
3885 op0 = XEXP (op0, 0);
3887 if (GET_MODE (op0) != VOIDmode)
3888 mode = GET_MODE (op0);
3889 else if (GET_MODE (op1) != VOIDmode)
3890 mode = GET_MODE (op1);
3895 /* We can't simplify MODE_CC values since we don't know what the
3896 actual comparison is. */
3897 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3900 /* Make sure the constant is second. */
3901 if (swap_commutative_operands_p (op0, op1))
3903 tem = op0, op0 = op1, op1 = tem;
3904 code = swap_condition (code);
3907 trueop0 = avoid_constant_pool_reference (op0);
3908 trueop1 = avoid_constant_pool_reference (op1);
3910 /* For integer comparisons of A and B maybe we can simplify A - B and can
3911 then simplify a comparison of that with zero. If A and B are both either
3912 a register or a CONST_INT, this can't help; testing for these cases will
3913 prevent infinite recursion here and speed things up.
3915 We can only do this for EQ and NE comparisons as otherwise we may
3916 lose or introduce overflow which we cannot disregard as undefined as
3917 we do not know the signedness of the operation on either the left or
3918 the right hand side of the comparison. */
3920 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3921 && (code == EQ || code == NE)
3922 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3923 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3924 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3925 /* We cannot do this if tem is a nonzero address. */
3926 && ! nonzero_address_p (tem))
3927 return simplify_const_relational_operation (signed_condition (code),
3928 mode, tem, const0_rtx);
3930 if (! HONOR_NANS (mode) && code == ORDERED)
3931 return const_true_rtx;
3933 if (! HONOR_NANS (mode) && code == UNORDERED)
3936 /* For modes without NaNs, if the two operands are equal, we know the
3937 result except if they have side-effects. */
3938 if (! HONOR_NANS (GET_MODE (trueop0))
3939 && rtx_equal_p (trueop0, trueop1)
3940 && ! side_effects_p (trueop0))
3941 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3943 /* If the operands are floating-point constants, see if we can fold
3945 else if (GET_CODE (trueop0) == CONST_DOUBLE
3946 && GET_CODE (trueop1) == CONST_DOUBLE
3947 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3949 REAL_VALUE_TYPE d0, d1;
3951 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3952 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3954 /* Comparisons are unordered iff at least one of the values is NaN. */
3955 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3965 return const_true_rtx;
3978 equal = REAL_VALUES_EQUAL (d0, d1);
3979 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3980 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3983 /* Otherwise, see if the operands are both integers. */
3984 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3985 && (GET_CODE (trueop0) == CONST_DOUBLE
3986 || GET_CODE (trueop0) == CONST_INT)
3987 && (GET_CODE (trueop1) == CONST_DOUBLE
3988 || GET_CODE (trueop1) == CONST_INT))
3990 int width = GET_MODE_BITSIZE (mode);
3991 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3992 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3994 /* Get the two words comprising each integer constant. */
3995 if (GET_CODE (trueop0) == CONST_DOUBLE)
3997 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3998 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4002 l0u = l0s = INTVAL (trueop0);
4003 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4006 if (GET_CODE (trueop1) == CONST_DOUBLE)
4008 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4009 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4013 l1u = l1s = INTVAL (trueop1);
4014 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4017 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4018 we have to sign or zero-extend the values. */
4019 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4021 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4022 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4024 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4025 l0s |= ((HOST_WIDE_INT) (-1) << width);
4027 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4028 l1s |= ((HOST_WIDE_INT) (-1) << width);
4030 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4031 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4033 equal = (h0u == h1u && l0u == l1u);
4034 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4035 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4036 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4037 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4040 /* Otherwise, there are some code-specific tests we can make. */
4043 /* Optimize comparisons with upper and lower bounds. */
4044 if (SCALAR_INT_MODE_P (mode)
4045 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4058 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4065 /* x >= min is always true. */
4066 if (rtx_equal_p (trueop1, mmin))
4067 tem = const_true_rtx;
4073 /* x <= max is always true. */
4074 if (rtx_equal_p (trueop1, mmax))
4075 tem = const_true_rtx;
4080 /* x > max is always false. */
4081 if (rtx_equal_p (trueop1, mmax))
4087 /* x < min is always false. */
4088 if (rtx_equal_p (trueop1, mmin))
4095 if (tem == const0_rtx
4096 || tem == const_true_rtx)
4103 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4108 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4109 return const_true_rtx;
4113 /* Optimize abs(x) < 0.0. */
4114 if (trueop1 == CONST0_RTX (mode)
4115 && !HONOR_SNANS (mode)
4116 && (!INTEGRAL_MODE_P (mode)
4117 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4119 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4121 if (GET_CODE (tem) == ABS)
4123 if (INTEGRAL_MODE_P (mode)
4124 && (issue_strict_overflow_warning
4125 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4126 warning (OPT_Wstrict_overflow,
4127 ("assuming signed overflow does not occur when "
4128 "assuming abs (x) < 0 is false"));
4133 /* Optimize popcount (x) < 0. */
4134 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4135 return const_true_rtx;
4139 /* Optimize abs(x) >= 0.0. */
4140 if (trueop1 == CONST0_RTX (mode)
4141 && !HONOR_NANS (mode)
4142 && (!INTEGRAL_MODE_P (mode)
4143 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4145 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4147 if (GET_CODE (tem) == ABS)
4149 if (INTEGRAL_MODE_P (mode)
4150 && (issue_strict_overflow_warning
4151 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4152 warning (OPT_Wstrict_overflow,
4153 ("assuming signed overflow does not occur when "
4154 "assuming abs (x) >= 0 is true"));
4155 return const_true_rtx;
4159 /* Optimize popcount (x) >= 0. */
4160 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4161 return const_true_rtx;
4165 /* Optimize ! (abs(x) < 0.0). */
4166 if (trueop1 == CONST0_RTX (mode))
4168 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4170 if (GET_CODE (tem) == ABS)
4171 return const_true_rtx;
4182 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4188 return equal ? const_true_rtx : const0_rtx;
4191 return ! equal ? const_true_rtx : const0_rtx;
4194 return op0lt ? const_true_rtx : const0_rtx;
4197 return op1lt ? const_true_rtx : const0_rtx;
4199 return op0ltu ? const_true_rtx : const0_rtx;
4201 return op1ltu ? const_true_rtx : const0_rtx;
4204 return equal || op0lt ? const_true_rtx : const0_rtx;
4207 return equal || op1lt ? const_true_rtx : const0_rtx;
4209 return equal || op0ltu ? const_true_rtx : const0_rtx;
4211 return equal || op1ltu ? const_true_rtx : const0_rtx;
4213 return const_true_rtx;
4221 /* Simplify CODE, an operation with result mode MODE and three operands,
4222 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4223 a constant. Return 0 if no simplifications is possible. */
4226 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4227 enum machine_mode op0_mode, rtx op0, rtx op1,
4230 unsigned int width = GET_MODE_BITSIZE (mode);
4232 /* VOIDmode means "infinite" precision. */
4234 width = HOST_BITS_PER_WIDE_INT;
4240 if (GET_CODE (op0) == CONST_INT
4241 && GET_CODE (op1) == CONST_INT
4242 && GET_CODE (op2) == CONST_INT
4243 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4244 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4246 /* Extracting a bit-field from a constant */
4247 HOST_WIDE_INT val = INTVAL (op0);
4249 if (BITS_BIG_ENDIAN)
4250 val >>= (GET_MODE_BITSIZE (op0_mode)
4251 - INTVAL (op2) - INTVAL (op1));
4253 val >>= INTVAL (op2);
4255 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4257 /* First zero-extend. */
4258 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4259 /* If desired, propagate sign bit. */
4260 if (code == SIGN_EXTRACT
4261 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4262 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4265 /* Clear the bits that don't belong in our mode,
4266 unless they and our sign bit are all one.
4267 So we get either a reasonable negative value or a reasonable
4268 unsigned value for this mode. */
4269 if (width < HOST_BITS_PER_WIDE_INT
4270 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4271 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4272 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4274 return gen_int_mode (val, mode);
4279 if (GET_CODE (op0) == CONST_INT)
4280 return op0 != const0_rtx ? op1 : op2;
4282 /* Convert c ? a : a into "a". */
4283 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4286 /* Convert a != b ? a : b into "a". */
4287 if (GET_CODE (op0) == NE
4288 && ! side_effects_p (op0)
4289 && ! HONOR_NANS (mode)
4290 && ! HONOR_SIGNED_ZEROS (mode)
4291 && ((rtx_equal_p (XEXP (op0, 0), op1)
4292 && rtx_equal_p (XEXP (op0, 1), op2))
4293 || (rtx_equal_p (XEXP (op0, 0), op2)
4294 && rtx_equal_p (XEXP (op0, 1), op1))))
4297 /* Convert a == b ? a : b into "b". */
4298 if (GET_CODE (op0) == EQ
4299 && ! side_effects_p (op0)
4300 && ! HONOR_NANS (mode)
4301 && ! HONOR_SIGNED_ZEROS (mode)
4302 && ((rtx_equal_p (XEXP (op0, 0), op1)
4303 && rtx_equal_p (XEXP (op0, 1), op2))
4304 || (rtx_equal_p (XEXP (op0, 0), op2)
4305 && rtx_equal_p (XEXP (op0, 1), op1))))
4308 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4310 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4311 ? GET_MODE (XEXP (op0, 1))
4312 : GET_MODE (XEXP (op0, 0)));
4315 /* Look for happy constants in op1 and op2. */
4316 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4318 HOST_WIDE_INT t = INTVAL (op1);
4319 HOST_WIDE_INT f = INTVAL (op2);
4321 if (t == STORE_FLAG_VALUE && f == 0)
4322 code = GET_CODE (op0);
4323 else if (t == 0 && f == STORE_FLAG_VALUE)
4326 tmp = reversed_comparison_code (op0, NULL_RTX);
4334 return simplify_gen_relational (code, mode, cmp_mode,
4335 XEXP (op0, 0), XEXP (op0, 1));
4338 if (cmp_mode == VOIDmode)
4339 cmp_mode = op0_mode;
4340 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4341 cmp_mode, XEXP (op0, 0),
4344 /* See if any simplifications were possible. */
4347 if (GET_CODE (temp) == CONST_INT)
4348 return temp == const0_rtx ? op2 : op1;
4350 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4356 gcc_assert (GET_MODE (op0) == mode);
4357 gcc_assert (GET_MODE (op1) == mode);
4358 gcc_assert (VECTOR_MODE_P (mode));
4359 op2 = avoid_constant_pool_reference (op2);
4360 if (GET_CODE (op2) == CONST_INT)
4362 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4363 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4364 int mask = (1 << n_elts) - 1;
4366 if (!(INTVAL (op2) & mask))
4368 if ((INTVAL (op2) & mask) == mask)
4371 op0 = avoid_constant_pool_reference (op0);
4372 op1 = avoid_constant_pool_reference (op1);
4373 if (GET_CODE (op0) == CONST_VECTOR
4374 && GET_CODE (op1) == CONST_VECTOR)
4376 rtvec v = rtvec_alloc (n_elts);
4379 for (i = 0; i < n_elts; i++)
4380 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4381 ? CONST_VECTOR_ELT (op0, i)
4382 : CONST_VECTOR_ELT (op1, i));
4383 return gen_rtx_CONST_VECTOR (mode, v);
4395 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4396 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4398 Works by unpacking OP into a collection of 8-bit values
4399 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4400 and then repacking them again for OUTERMODE. */
4403 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4404 enum machine_mode innermode, unsigned int byte)
4406 /* We support up to 512-bit values (for V8DFmode). */
4410 value_mask = (1 << value_bit) - 1
4412 unsigned char value[max_bitsize / value_bit];
4421 rtvec result_v = NULL;
4422 enum mode_class outer_class;
4423 enum machine_mode outer_submode;
4425 /* Some ports misuse CCmode. */
4426 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4429 /* We have no way to represent a complex constant at the rtl level. */
4430 if (COMPLEX_MODE_P (outermode))
4433 /* Unpack the value. */
4435 if (GET_CODE (op) == CONST_VECTOR)
4437 num_elem = CONST_VECTOR_NUNITS (op);
4438 elems = &CONST_VECTOR_ELT (op, 0);
4439 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4445 elem_bitsize = max_bitsize;
4447 /* If this asserts, it is too complicated; reducing value_bit may help. */
4448 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4449 /* I don't know how to handle endianness of sub-units. */
4450 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4452 for (elem = 0; elem < num_elem; elem++)
4455 rtx el = elems[elem];
4457 /* Vectors are kept in target memory order. (This is probably
4460 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4461 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4463 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4464 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4465 unsigned bytele = (subword_byte % UNITS_PER_WORD
4466 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4467 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4470 switch (GET_CODE (el))
4474 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4476 *vp++ = INTVAL (el) >> i;
4477 /* CONST_INTs are always logically sign-extended. */
4478 for (; i < elem_bitsize; i += value_bit)
4479 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4483 if (GET_MODE (el) == VOIDmode)
4485 /* If this triggers, someone should have generated a
4486 CONST_INT instead. */
4487 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4489 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4490 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4491 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4494 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4497 /* It shouldn't matter what's done here, so fill it with
4499 for (; i < elem_bitsize; i += value_bit)
4504 long tmp[max_bitsize / 32];
4505 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4507 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4508 gcc_assert (bitsize <= elem_bitsize);
4509 gcc_assert (bitsize % value_bit == 0);
4511 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4514 /* real_to_target produces its result in words affected by
4515 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4516 and use WORDS_BIG_ENDIAN instead; see the documentation
4517 of SUBREG in rtl.texi. */
4518 for (i = 0; i < bitsize; i += value_bit)
4521 if (WORDS_BIG_ENDIAN)
4522 ibase = bitsize - 1 - i;
4525 *vp++ = tmp[ibase / 32] >> i % 32;
4528 /* It shouldn't matter what's done here, so fill it with
4530 for (; i < elem_bitsize; i += value_bit)
4540 /* Now, pick the right byte to start with. */
4541 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4542 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4543 will already have offset 0. */
4544 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4546 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4548 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4549 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4550 byte = (subword_byte % UNITS_PER_WORD
4551 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4554 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4555 so if it's become negative it will instead be very large.) */
4556 gcc_assert (byte < GET_MODE_SIZE (innermode));
4558 /* Convert from bytes to chunks of size value_bit. */
4559 value_start = byte * (BITS_PER_UNIT / value_bit);
4561 /* Re-pack the value. */
4563 if (VECTOR_MODE_P (outermode))
4565 num_elem = GET_MODE_NUNITS (outermode);
4566 result_v = rtvec_alloc (num_elem);
4567 elems = &RTVEC_ELT (result_v, 0);
4568 outer_submode = GET_MODE_INNER (outermode);
4574 outer_submode = outermode;
4577 outer_class = GET_MODE_CLASS (outer_submode);
4578 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4580 gcc_assert (elem_bitsize % value_bit == 0);
4581 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4583 for (elem = 0; elem < num_elem; elem++)
4587 /* Vectors are stored in target memory order. (This is probably
4590 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4591 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4593 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4594 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4595 unsigned bytele = (subword_byte % UNITS_PER_WORD
4596 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4597 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4600 switch (outer_class)
4603 case MODE_PARTIAL_INT:
4605 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4608 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4610 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4611 for (; i < elem_bitsize; i += value_bit)
4612 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4613 << (i - HOST_BITS_PER_WIDE_INT));
4615 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4617 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4618 elems[elem] = gen_int_mode (lo, outer_submode);
4619 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4620 elems[elem] = immed_double_const (lo, hi, outer_submode);
4627 case MODE_DECIMAL_FLOAT:
4630 long tmp[max_bitsize / 32];
4632 /* real_from_target wants its input in words affected by
4633 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4634 and use WORDS_BIG_ENDIAN instead; see the documentation
4635 of SUBREG in rtl.texi. */
4636 for (i = 0; i < max_bitsize / 32; i++)
4638 for (i = 0; i < elem_bitsize; i += value_bit)
4641 if (WORDS_BIG_ENDIAN)
4642 ibase = elem_bitsize - 1 - i;
4645 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4648 real_from_target (&r, tmp, outer_submode);
4649 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4657 if (VECTOR_MODE_P (outermode))
4658 return gen_rtx_CONST_VECTOR (outermode, result_v);
4663 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4664 Return 0 if no simplifications are possible. */
4666 simplify_subreg (enum machine_mode outermode, rtx op,
4667 enum machine_mode innermode, unsigned int byte)
4669 /* Little bit of sanity checking. */
4670 gcc_assert (innermode != VOIDmode);
4671 gcc_assert (outermode != VOIDmode);
4672 gcc_assert (innermode != BLKmode);
4673 gcc_assert (outermode != BLKmode);
4675 gcc_assert (GET_MODE (op) == innermode
4676 || GET_MODE (op) == VOIDmode);
4678 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4679 gcc_assert (byte < GET_MODE_SIZE (innermode));
4681 if (outermode == innermode && !byte)
4684 if (GET_CODE (op) == CONST_INT
4685 || GET_CODE (op) == CONST_DOUBLE
4686 || GET_CODE (op) == CONST_VECTOR)
4687 return simplify_immed_subreg (outermode, op, innermode, byte);
4689 /* Changing mode twice with SUBREG => just change it once,
4690 or not at all if changing back op starting mode. */
4691 if (GET_CODE (op) == SUBREG)
4693 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4694 int final_offset = byte + SUBREG_BYTE (op);
4697 if (outermode == innermostmode
4698 && byte == 0 && SUBREG_BYTE (op) == 0)
4699 return SUBREG_REG (op);
4701 /* The SUBREG_BYTE represents offset, as if the value were stored
4702 in memory. Irritating exception is paradoxical subreg, where
4703 we define SUBREG_BYTE to be 0. On big endian machines, this
4704 value should be negative. For a moment, undo this exception. */
4705 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4707 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4708 if (WORDS_BIG_ENDIAN)
4709 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4710 if (BYTES_BIG_ENDIAN)
4711 final_offset += difference % UNITS_PER_WORD;
4713 if (SUBREG_BYTE (op) == 0
4714 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4716 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4717 if (WORDS_BIG_ENDIAN)
4718 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4719 if (BYTES_BIG_ENDIAN)
4720 final_offset += difference % UNITS_PER_WORD;
4723 /* See whether resulting subreg will be paradoxical. */
4724 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4726 /* In nonparadoxical subregs we can't handle negative offsets. */
4727 if (final_offset < 0)
4729 /* Bail out in case resulting subreg would be incorrect. */
4730 if (final_offset % GET_MODE_SIZE (outermode)
4731 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4737 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4739 /* In paradoxical subreg, see if we are still looking on lower part.
4740 If so, our SUBREG_BYTE will be 0. */
4741 if (WORDS_BIG_ENDIAN)
4742 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4743 if (BYTES_BIG_ENDIAN)
4744 offset += difference % UNITS_PER_WORD;
4745 if (offset == final_offset)
4751 /* Recurse for further possible simplifications. */
4752 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4756 if (validate_subreg (outermode, innermostmode,
4757 SUBREG_REG (op), final_offset))
4758 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4762 /* Merge implicit and explicit truncations. */
4764 if (GET_CODE (op) == TRUNCATE
4765 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4766 && subreg_lowpart_offset (outermode, innermode) == byte)
4767 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4768 GET_MODE (XEXP (op, 0)));
4770 /* SUBREG of a hard register => just change the register number
4771 and/or mode. If the hard register is not valid in that mode,
4772 suppress this simplification. If the hard register is the stack,
4773 frame, or argument pointer, leave this as a SUBREG. */
4776 && REGNO (op) < FIRST_PSEUDO_REGISTER
4777 #ifdef CANNOT_CHANGE_MODE_CLASS
4778 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4779 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4780 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4782 && ((reload_completed && !frame_pointer_needed)
4783 || (REGNO (op) != FRAME_POINTER_REGNUM
4784 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4785 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4788 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4789 && REGNO (op) != ARG_POINTER_REGNUM
4791 && REGNO (op) != STACK_POINTER_REGNUM
4792 && subreg_offset_representable_p (REGNO (op), innermode,
4795 unsigned int regno = REGNO (op);
4796 unsigned int final_regno
4797 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4799 /* ??? We do allow it if the current REG is not valid for
4800 its mode. This is a kludge to work around how float/complex
4801 arguments are passed on 32-bit SPARC and should be fixed. */
4802 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4803 || ! HARD_REGNO_MODE_OK (regno, innermode))
4806 int final_offset = byte;
4808 /* Adjust offset for paradoxical subregs. */
4810 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4812 int difference = (GET_MODE_SIZE (innermode)
4813 - GET_MODE_SIZE (outermode));
4814 if (WORDS_BIG_ENDIAN)
4815 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4816 if (BYTES_BIG_ENDIAN)
4817 final_offset += difference % UNITS_PER_WORD;
4820 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4822 /* Propagate original regno. We don't have any way to specify
4823 the offset inside original regno, so do so only for lowpart.
4824 The information is used only by alias analysis that can not
4825 grog partial register anyway. */
4827 if (subreg_lowpart_offset (outermode, innermode) == byte)
4828 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4833 /* If we have a SUBREG of a register that we are replacing and we are
4834 replacing it with a MEM, make a new MEM and try replacing the
4835 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4836 or if we would be widening it. */
4839 && ! mode_dependent_address_p (XEXP (op, 0))
4840 /* Allow splitting of volatile memory references in case we don't
4841 have instruction to move the whole thing. */
4842 && (! MEM_VOLATILE_P (op)
4843 || ! have_insn_for (SET, innermode))
4844 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4845 return adjust_address_nv (op, outermode, byte);
4847 /* Handle complex values represented as CONCAT
4848 of real and imaginary part. */
4849 if (GET_CODE (op) == CONCAT)
4851 unsigned int part_size, final_offset;
4854 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4855 if (byte < part_size)
4857 part = XEXP (op, 0);
4858 final_offset = byte;
4862 part = XEXP (op, 1);
4863 final_offset = byte - part_size;
4866 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4869 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4872 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4873 return gen_rtx_SUBREG (outermode, part, final_offset);
4877 /* Optimize SUBREG truncations of zero and sign extended values. */
4878 if ((GET_CODE (op) == ZERO_EXTEND
4879 || GET_CODE (op) == SIGN_EXTEND)
4880 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4882 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4884 /* If we're requesting the lowpart of a zero or sign extension,
4885 there are three possibilities. If the outermode is the same
4886 as the origmode, we can omit both the extension and the subreg.
4887 If the outermode is not larger than the origmode, we can apply
4888 the truncation without the extension. Finally, if the outermode
4889 is larger than the origmode, but both are integer modes, we
4890 can just extend to the appropriate mode. */
4893 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4894 if (outermode == origmode)
4895 return XEXP (op, 0);
4896 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4897 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4898 subreg_lowpart_offset (outermode,
4900 if (SCALAR_INT_MODE_P (outermode))
4901 return simplify_gen_unary (GET_CODE (op), outermode,
4902 XEXP (op, 0), origmode);
4905 /* A SUBREG resulting from a zero extension may fold to zero if
4906 it extracts higher bits that the ZERO_EXTEND's source bits. */
4907 if (GET_CODE (op) == ZERO_EXTEND
4908 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4909 return CONST0_RTX (outermode);
4912 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4913 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4914 the outer subreg is effectively a truncation to the original mode. */
4915 if ((GET_CODE (op) == LSHIFTRT
4916 || GET_CODE (op) == ASHIFTRT)
4917 && SCALAR_INT_MODE_P (outermode)
4918 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4919 to avoid the possibility that an outer LSHIFTRT shifts by more
4920 than the sign extension's sign_bit_copies and introduces zeros
4921 into the high bits of the result. */
4922 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4923 && GET_CODE (XEXP (op, 1)) == CONST_INT
4924 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4925 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4926 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4927 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4928 return simplify_gen_binary (ASHIFTRT, outermode,
4929 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4931 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4932 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4933 the outer subreg is effectively a truncation to the original mode. */
4934 if ((GET_CODE (op) == LSHIFTRT
4935 || GET_CODE (op) == ASHIFTRT)
4936 && SCALAR_INT_MODE_P (outermode)
4937 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4938 && GET_CODE (XEXP (op, 1)) == CONST_INT
4939 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4940 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4941 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4942 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4943 return simplify_gen_binary (LSHIFTRT, outermode,
4944 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4946 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4947 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4948 the outer subreg is effectively a truncation to the original mode. */
4949 if (GET_CODE (op) == ASHIFT
4950 && SCALAR_INT_MODE_P (outermode)
4951 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4952 && GET_CODE (XEXP (op, 1)) == CONST_INT
4953 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4954 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4955 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4956 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4957 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4958 return simplify_gen_binary (ASHIFT, outermode,
4959 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4964 /* Make a SUBREG operation or equivalent if it folds. */
4967 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4968 enum machine_mode innermode, unsigned int byte)
4972 newx = simplify_subreg (outermode, op, innermode, byte);
4976 if (GET_CODE (op) == SUBREG
4977 || GET_CODE (op) == CONCAT
4978 || GET_MODE (op) == VOIDmode)
4981 if (validate_subreg (outermode, innermode, op, byte))
4982 return gen_rtx_SUBREG (outermode, op, byte);
4987 /* Simplify X, an rtx expression.
4989 Return the simplified expression or NULL if no simplifications
4992 This is the preferred entry point into the simplification routines;
4993 however, we still allow passes to call the more specific routines.
4995 Right now GCC has three (yes, three) major bodies of RTL simplification
4996 code that need to be unified.
4998 1. fold_rtx in cse.c. This code uses various CSE specific
4999 information to aid in RTL simplification.
5001 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5002 it uses combine specific information to aid in RTL
5005 3. The routines in this file.
5008 Long term we want to only have one body of simplification code; to
5009 get to that state I recommend the following steps:
5011 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5012 which are not pass dependent state into these routines.
5014 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5015 use this routine whenever possible.
5017 3. Allow for pass dependent state to be provided to these
5018 routines and add simplifications based on the pass dependent
5019 state. Remove code from cse.c & combine.c that becomes
5022 It will take time, but ultimately the compiler will be easier to
5023 maintain and improve. It's totally silly that when we add a
5024 simplification that it needs to be added to 4 places (3 for RTL
5025 simplification and 1 for tree simplification. */
5028 simplify_rtx (rtx x)
5030 enum rtx_code code = GET_CODE (x);
5031 enum machine_mode mode = GET_MODE (x);
5033 switch (GET_RTX_CLASS (code))
5036 return simplify_unary_operation (code, mode,
5037 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5038 case RTX_COMM_ARITH:
5039 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5040 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5042 /* Fall through.... */
5045 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5048 case RTX_BITFIELD_OPS:
5049 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5050 XEXP (x, 0), XEXP (x, 1),
5054 case RTX_COMM_COMPARE:
5055 return simplify_relational_operation (code, mode,
5056 ((GET_MODE (XEXP (x, 0))
5058 ? GET_MODE (XEXP (x, 0))
5059 : GET_MODE (XEXP (x, 1))),
5065 return simplify_subreg (mode, SUBREG_REG (x),
5066 GET_MODE (SUBREG_REG (x)),
5073 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5074 if (GET_CODE (XEXP (x, 0)) == HIGH
5075 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))