1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
91 && GET_CODE (x) == CONST_INT)
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 if (GET_MODE (x) == BLKmode)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Make a unary operation by first seeing if it folds and otherwise making
206 the specified operation. */
209 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
210 enum machine_mode op_mode)
214 /* If this simplifies, use it. */
215 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
218 return gen_rtx_fmt_e (code, mode, op);
221 /* Likewise for ternary operations. */
224 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
225 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
229 /* If this simplifies, use it. */
230 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
234 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
237 /* Likewise, for relational operations.
238 CMP_MODE specifies mode comparison is done in. */
241 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
242 enum machine_mode cmp_mode, rtx op0, rtx op1)
246 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
250 return gen_rtx_fmt_ee (code, mode, op0, op1);
253 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
254 resulting RTX. Return a new RTX which is as simplified as possible. */
257 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
259 enum rtx_code code = GET_CODE (x);
260 enum machine_mode mode = GET_MODE (x);
261 enum machine_mode op_mode;
264 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
265 to build a new expression substituting recursively. If we can't do
266 anything, return our input. */
271 switch (GET_RTX_CLASS (code))
275 op_mode = GET_MODE (op0);
276 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
277 if (op0 == XEXP (x, 0))
279 return simplify_gen_unary (code, mode, op0, op_mode);
283 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
284 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
285 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
287 return simplify_gen_binary (code, mode, op0, op1);
290 case RTX_COMM_COMPARE:
293 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
294 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
295 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
296 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
298 return simplify_gen_relational (code, mode, op_mode, op0, op1);
301 case RTX_BITFIELD_OPS:
303 op_mode = GET_MODE (op0);
304 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
305 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
306 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
307 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
309 if (op_mode == VOIDmode)
310 op_mode = GET_MODE (op0);
311 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
314 /* The only case we try to handle is a SUBREG. */
317 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
318 if (op0 == SUBREG_REG (x))
320 op0 = simplify_gen_subreg (GET_MODE (x), op0,
321 GET_MODE (SUBREG_REG (x)),
323 return op0 ? op0 : x;
330 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
331 if (op0 == XEXP (x, 0))
333 return replace_equiv_address_nv (x, op0);
335 else if (code == LO_SUM)
337 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
338 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
340 /* (lo_sum (high x) x) -> x */
341 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
344 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
346 return gen_rtx_LO_SUM (mode, op0, op1);
348 else if (code == REG)
350 if (rtx_equal_p (x, old_rtx))
361 /* Try to simplify a unary operation CODE whose output mode is to be
362 MODE with input operand OP whose mode was originally OP_MODE.
363 Return zero if no simplification can be made. */
365 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
366 rtx op, enum machine_mode op_mode)
370 if (GET_CODE (op) == CONST)
373 trueop = avoid_constant_pool_reference (op);
375 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
379 return simplify_unary_operation_1 (code, mode, op);
382 /* Perform some simplifications we can do even if the operands
385 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
387 enum rtx_code reversed;
393 /* (not (not X)) == X. */
394 if (GET_CODE (op) == NOT)
397 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
398 comparison is all ones. */
399 if (COMPARISON_P (op)
400 && (mode == BImode || STORE_FLAG_VALUE == -1)
401 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
402 return simplify_gen_relational (reversed, mode, VOIDmode,
403 XEXP (op, 0), XEXP (op, 1));
405 /* (not (plus X -1)) can become (neg X). */
406 if (GET_CODE (op) == PLUS
407 && XEXP (op, 1) == constm1_rtx)
408 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
410 /* Similarly, (not (neg X)) is (plus X -1). */
411 if (GET_CODE (op) == NEG)
412 return plus_constant (XEXP (op, 0), -1);
414 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
415 if (GET_CODE (op) == XOR
416 && GET_CODE (XEXP (op, 1)) == CONST_INT
417 && (temp = simplify_unary_operation (NOT, mode,
418 XEXP (op, 1), mode)) != 0)
419 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
421 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
422 if (GET_CODE (op) == PLUS
423 && GET_CODE (XEXP (op, 1)) == CONST_INT
424 && mode_signbit_p (mode, XEXP (op, 1))
425 && (temp = simplify_unary_operation (NOT, mode,
426 XEXP (op, 1), mode)) != 0)
427 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
431 operands other than 1, but that is not valid. We could do a
432 similar simplification for (not (lshiftrt C X)) where C is
433 just the sign bit, but this doesn't seem common enough to
435 if (GET_CODE (op) == ASHIFT
436 && XEXP (op, 0) == const1_rtx)
438 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
439 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
442 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
443 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
444 so we can perform the above simplification. */
446 if (STORE_FLAG_VALUE == -1
447 && GET_CODE (op) == ASHIFTRT
448 && GET_CODE (XEXP (op, 1)) == CONST_INT
449 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
450 return simplify_gen_relational (GE, mode, VOIDmode,
451 XEXP (op, 0), const0_rtx);
454 if (GET_CODE (op) == SUBREG
455 && subreg_lowpart_p (op)
456 && (GET_MODE_SIZE (GET_MODE (op))
457 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
458 && GET_CODE (SUBREG_REG (op)) == ASHIFT
459 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
461 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
464 x = gen_rtx_ROTATE (inner_mode,
465 simplify_gen_unary (NOT, inner_mode, const1_rtx,
467 XEXP (SUBREG_REG (op), 1));
468 return rtl_hooks.gen_lowpart_no_emit (mode, x);
471 /* Apply De Morgan's laws to reduce number of patterns for machines
472 with negating logical insns (and-not, nand, etc.). If result has
473 only one NOT, put it first, since that is how the patterns are
476 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
478 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
479 enum machine_mode op_mode;
481 op_mode = GET_MODE (in1);
482 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
484 op_mode = GET_MODE (in2);
485 if (op_mode == VOIDmode)
487 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
489 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
492 in2 = in1; in1 = tem;
495 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
501 /* (neg (neg X)) == X. */
502 if (GET_CODE (op) == NEG)
505 /* (neg (plus X 1)) can become (not X). */
506 if (GET_CODE (op) == PLUS
507 && XEXP (op, 1) == const1_rtx)
508 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
510 /* Similarly, (neg (not X)) is (plus X 1). */
511 if (GET_CODE (op) == NOT)
512 return plus_constant (XEXP (op, 0), 1);
514 /* (neg (minus X Y)) can become (minus Y X). This transformation
515 isn't safe for modes with signed zeros, since if X and Y are
516 both +0, (minus Y X) is the same as (minus X Y). If the
517 rounding mode is towards +infinity (or -infinity) then the two
518 expressions will be rounded differently. */
519 if (GET_CODE (op) == MINUS
520 && !HONOR_SIGNED_ZEROS (mode)
521 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
522 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
524 if (GET_CODE (op) == PLUS
525 && !HONOR_SIGNED_ZEROS (mode)
526 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 /* (neg (plus A C)) is simplified to (minus -C A). */
529 if (GET_CODE (XEXP (op, 1)) == CONST_INT
530 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
532 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
534 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
537 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
538 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
539 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
542 /* (neg (mult A B)) becomes (mult (neg A) B).
543 This works even for floating-point values. */
544 if (GET_CODE (op) == MULT
545 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
551 /* NEG commutes with ASHIFT since it is multiplication. Only do
552 this if we can then eliminate the NEG (e.g., if the operand
554 if (GET_CODE (op) == ASHIFT)
556 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
558 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
561 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
562 C is equal to the width of MODE minus 1. */
563 if (GET_CODE (op) == ASHIFTRT
564 && GET_CODE (XEXP (op, 1)) == CONST_INT
565 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
566 return simplify_gen_binary (LSHIFTRT, mode,
567 XEXP (op, 0), XEXP (op, 1));
569 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
570 C is equal to the width of MODE minus 1. */
571 if (GET_CODE (op) == LSHIFTRT
572 && GET_CODE (XEXP (op, 1)) == CONST_INT
573 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
574 return simplify_gen_binary (ASHIFTRT, mode,
575 XEXP (op, 0), XEXP (op, 1));
577 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
578 if (GET_CODE (op) == XOR
579 && XEXP (op, 1) == const1_rtx
580 && nonzero_bits (XEXP (op, 0), mode) == 1)
581 return plus_constant (XEXP (op, 0), -1);
583 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
584 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
585 if (GET_CODE (op) == LT
586 && XEXP (op, 1) == const0_rtx)
588 enum machine_mode inner = GET_MODE (XEXP (op, 0));
589 int isize = GET_MODE_BITSIZE (inner);
590 if (STORE_FLAG_VALUE == 1)
592 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
593 GEN_INT (isize - 1));
596 if (GET_MODE_BITSIZE (mode) > isize)
597 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
598 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
600 else if (STORE_FLAG_VALUE == -1)
602 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
603 GEN_INT (isize - 1));
606 if (GET_MODE_BITSIZE (mode) > isize)
607 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
608 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
614 /* We can't handle truncation to a partial integer mode here
615 because we don't know the real bitsize of the partial
617 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
620 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
621 if ((GET_CODE (op) == SIGN_EXTEND
622 || GET_CODE (op) == ZERO_EXTEND)
623 && GET_MODE (XEXP (op, 0)) == mode)
626 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
627 (OP:SI foo:SI) if OP is NEG or ABS. */
628 if ((GET_CODE (op) == ABS
629 || GET_CODE (op) == NEG)
630 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
631 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
632 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
633 return simplify_gen_unary (GET_CODE (op), mode,
634 XEXP (XEXP (op, 0), 0), mode);
636 /* (truncate:A (subreg:B (truncate:C X) 0)) is
638 if (GET_CODE (op) == SUBREG
639 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
640 && subreg_lowpart_p (op))
641 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
642 GET_MODE (XEXP (SUBREG_REG (op), 0)));
644 /* If we know that the value is already truncated, we can
645 replace the TRUNCATE with a SUBREG. Note that this is also
646 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
647 modes we just have to apply a different definition for
648 truncation. But don't do this for an (LSHIFTRT (MULT ...))
649 since this will cause problems with the umulXi3_highpart
651 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
652 GET_MODE_BITSIZE (GET_MODE (op)))
653 ? (num_sign_bit_copies (op, GET_MODE (op))
654 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
655 - GET_MODE_BITSIZE (mode)))
656 : truncated_to_mode (mode, op))
657 && ! (GET_CODE (op) == LSHIFTRT
658 && GET_CODE (XEXP (op, 0)) == MULT))
659 return rtl_hooks.gen_lowpart_no_emit (mode, op);
661 /* A truncate of a comparison can be replaced with a subreg if
662 STORE_FLAG_VALUE permits. This is like the previous test,
663 but it works even if the comparison is done in a mode larger
664 than HOST_BITS_PER_WIDE_INT. */
665 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
667 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
668 return rtl_hooks.gen_lowpart_no_emit (mode, op);
672 if (DECIMAL_FLOAT_MODE_P (mode))
675 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
676 if (GET_CODE (op) == FLOAT_EXTEND
677 && GET_MODE (XEXP (op, 0)) == mode)
680 /* (float_truncate:SF (float_truncate:DF foo:XF))
681 = (float_truncate:SF foo:XF).
682 This may eliminate double rounding, so it is unsafe.
684 (float_truncate:SF (float_extend:XF foo:DF))
685 = (float_truncate:SF foo:DF).
687 (float_truncate:DF (float_extend:XF foo:SF))
688 = (float_extend:SF foo:DF). */
689 if ((GET_CODE (op) == FLOAT_TRUNCATE
690 && flag_unsafe_math_optimizations)
691 || GET_CODE (op) == FLOAT_EXTEND)
692 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
694 > GET_MODE_SIZE (mode)
695 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
699 /* (float_truncate (float x)) is (float x) */
700 if (GET_CODE (op) == FLOAT
701 && (flag_unsafe_math_optimizations
702 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
703 && ((unsigned)significand_size (GET_MODE (op))
704 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
705 - num_sign_bit_copies (XEXP (op, 0),
706 GET_MODE (XEXP (op, 0))))))))
707 return simplify_gen_unary (FLOAT, mode,
709 GET_MODE (XEXP (op, 0)));
711 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
712 (OP:SF foo:SF) if OP is NEG or ABS. */
713 if ((GET_CODE (op) == ABS
714 || GET_CODE (op) == NEG)
715 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
716 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
717 return simplify_gen_unary (GET_CODE (op), mode,
718 XEXP (XEXP (op, 0), 0), mode);
720 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
721 is (float_truncate:SF x). */
722 if (GET_CODE (op) == SUBREG
723 && subreg_lowpart_p (op)
724 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
725 return SUBREG_REG (op);
729 if (DECIMAL_FLOAT_MODE_P (mode))
732 /* (float_extend (float_extend x)) is (float_extend x)
734 (float_extend (float x)) is (float x) assuming that double
735 rounding can't happen.
737 if (GET_CODE (op) == FLOAT_EXTEND
738 || (GET_CODE (op) == FLOAT
739 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
740 && ((unsigned)significand_size (GET_MODE (op))
741 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
742 - num_sign_bit_copies (XEXP (op, 0),
743 GET_MODE (XEXP (op, 0)))))))
744 return simplify_gen_unary (GET_CODE (op), mode,
746 GET_MODE (XEXP (op, 0)));
751 /* (abs (neg <foo>)) -> (abs <foo>) */
752 if (GET_CODE (op) == NEG)
753 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
754 GET_MODE (XEXP (op, 0)));
756 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
758 if (GET_MODE (op) == VOIDmode)
761 /* If operand is something known to be positive, ignore the ABS. */
762 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
763 || ((GET_MODE_BITSIZE (GET_MODE (op))
764 <= HOST_BITS_PER_WIDE_INT)
765 && ((nonzero_bits (op, GET_MODE (op))
767 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
771 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
772 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
773 return gen_rtx_NEG (mode, op);
778 /* (ffs (*_extend <X>)) = (ffs <X>) */
779 if (GET_CODE (op) == SIGN_EXTEND
780 || GET_CODE (op) == ZERO_EXTEND)
781 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
782 GET_MODE (XEXP (op, 0)));
786 switch (GET_CODE (op))
790 /* (popcount (zero_extend <X>)) = (popcount <X>) */
791 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
792 GET_MODE (XEXP (op, 0)));
796 /* Rotations don't affect popcount. */
797 if (!side_effects_p (XEXP (op, 1)))
798 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
799 GET_MODE (XEXP (op, 0)));
808 switch (GET_CODE (op))
814 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
815 GET_MODE (XEXP (op, 0)));
819 /* Rotations don't affect parity. */
820 if (!side_effects_p (XEXP (op, 1)))
821 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
822 GET_MODE (XEXP (op, 0)));
831 /* (bswap (bswap x)) -> x. */
832 if (GET_CODE (op) == BSWAP)
837 /* (float (sign_extend <X>)) = (float <X>). */
838 if (GET_CODE (op) == SIGN_EXTEND)
839 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
840 GET_MODE (XEXP (op, 0)));
844 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
845 becomes just the MINUS if its mode is MODE. This allows
846 folding switch statements on machines using casesi (such as
848 if (GET_CODE (op) == TRUNCATE
849 && GET_MODE (XEXP (op, 0)) == mode
850 && GET_CODE (XEXP (op, 0)) == MINUS
851 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
852 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
855 /* Check for a sign extension of a subreg of a promoted
856 variable, where the promotion is sign-extended, and the
857 target mode is the same as the variable's promotion. */
858 if (GET_CODE (op) == SUBREG
859 && SUBREG_PROMOTED_VAR_P (op)
860 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
861 && GET_MODE (XEXP (op, 0)) == mode)
864 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
865 if (! POINTERS_EXTEND_UNSIGNED
866 && mode == Pmode && GET_MODE (op) == ptr_mode
868 || (GET_CODE (op) == SUBREG
869 && REG_P (SUBREG_REG (op))
870 && REG_POINTER (SUBREG_REG (op))
871 && GET_MODE (SUBREG_REG (op)) == Pmode)))
872 return convert_memory_address (Pmode, op);
877 /* Check for a zero extension of a subreg of a promoted
878 variable, where the promotion is zero-extended, and the
879 target mode is the same as the variable's promotion. */
880 if (GET_CODE (op) == SUBREG
881 && SUBREG_PROMOTED_VAR_P (op)
882 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
883 && GET_MODE (XEXP (op, 0)) == mode)
886 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
887 if (POINTERS_EXTEND_UNSIGNED > 0
888 && mode == Pmode && GET_MODE (op) == ptr_mode
890 || (GET_CODE (op) == SUBREG
891 && REG_P (SUBREG_REG (op))
892 && REG_POINTER (SUBREG_REG (op))
893 && GET_MODE (SUBREG_REG (op)) == Pmode)))
894 return convert_memory_address (Pmode, op);
905 /* Try to compute the value of a unary operation CODE whose output mode is to
906 be MODE with input operand OP whose mode was originally OP_MODE.
907 Return zero if the value cannot be computed. */
909 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
910 rtx op, enum machine_mode op_mode)
912 unsigned int width = GET_MODE_BITSIZE (mode);
914 if (code == VEC_DUPLICATE)
916 gcc_assert (VECTOR_MODE_P (mode));
917 if (GET_MODE (op) != VOIDmode)
919 if (!VECTOR_MODE_P (GET_MODE (op)))
920 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
922 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
925 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
926 || GET_CODE (op) == CONST_VECTOR)
928 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
929 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
930 rtvec v = rtvec_alloc (n_elts);
933 if (GET_CODE (op) != CONST_VECTOR)
934 for (i = 0; i < n_elts; i++)
935 RTVEC_ELT (v, i) = op;
938 enum machine_mode inmode = GET_MODE (op);
939 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
940 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
942 gcc_assert (in_n_elts < n_elts);
943 gcc_assert ((n_elts % in_n_elts) == 0);
944 for (i = 0; i < n_elts; i++)
945 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
947 return gen_rtx_CONST_VECTOR (mode, v);
951 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
953 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
954 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
955 enum machine_mode opmode = GET_MODE (op);
956 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
957 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
958 rtvec v = rtvec_alloc (n_elts);
961 gcc_assert (op_n_elts == n_elts);
962 for (i = 0; i < n_elts; i++)
964 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
965 CONST_VECTOR_ELT (op, i),
966 GET_MODE_INNER (opmode));
969 RTVEC_ELT (v, i) = x;
971 return gen_rtx_CONST_VECTOR (mode, v);
974 /* The order of these tests is critical so that, for example, we don't
975 check the wrong mode (input vs. output) for a conversion operation,
976 such as FIX. At some point, this should be simplified. */
978 if (code == FLOAT && GET_MODE (op) == VOIDmode
979 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
981 HOST_WIDE_INT hv, lv;
984 if (GET_CODE (op) == CONST_INT)
985 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
987 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
989 REAL_VALUE_FROM_INT (d, lv, hv, mode);
990 d = real_value_truncate (mode, d);
991 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
993 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
994 && (GET_CODE (op) == CONST_DOUBLE
995 || GET_CODE (op) == CONST_INT))
997 HOST_WIDE_INT hv, lv;
1000 if (GET_CODE (op) == CONST_INT)
1001 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1003 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1005 if (op_mode == VOIDmode)
1007 /* We don't know how to interpret negative-looking numbers in
1008 this case, so don't try to fold those. */
1012 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1015 hv = 0, lv &= GET_MODE_MASK (op_mode);
1017 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1018 d = real_value_truncate (mode, d);
1019 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1022 if (GET_CODE (op) == CONST_INT
1023 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1025 HOST_WIDE_INT arg0 = INTVAL (op);
1039 val = (arg0 >= 0 ? arg0 : - arg0);
1043 /* Don't use ffs here. Instead, get low order bit and then its
1044 number. If arg0 is zero, this will return 0, as desired. */
1045 arg0 &= GET_MODE_MASK (mode);
1046 val = exact_log2 (arg0 & (- arg0)) + 1;
1050 arg0 &= GET_MODE_MASK (mode);
1051 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1054 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1058 arg0 &= GET_MODE_MASK (mode);
1061 /* Even if the value at zero is undefined, we have to come
1062 up with some replacement. Seems good enough. */
1063 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1064 val = GET_MODE_BITSIZE (mode);
1067 val = exact_log2 (arg0 & -arg0);
1071 arg0 &= GET_MODE_MASK (mode);
1074 val++, arg0 &= arg0 - 1;
1078 arg0 &= GET_MODE_MASK (mode);
1081 val++, arg0 &= arg0 - 1;
1090 for (s = 0; s < width; s += 8)
1092 unsigned int d = width - s - 8;
1093 unsigned HOST_WIDE_INT byte;
1094 byte = (arg0 >> s) & 0xff;
1105 /* When zero-extending a CONST_INT, we need to know its
1107 gcc_assert (op_mode != VOIDmode);
1108 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1110 /* If we were really extending the mode,
1111 we would have to distinguish between zero-extension
1112 and sign-extension. */
1113 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1116 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1117 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1123 if (op_mode == VOIDmode)
1125 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1127 /* If we were really extending the mode,
1128 we would have to distinguish between zero-extension
1129 and sign-extension. */
1130 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1133 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1136 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1138 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1139 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1147 case FLOAT_TRUNCATE:
1157 return gen_int_mode (val, mode);
1160 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1161 for a DImode operation on a CONST_INT. */
1162 else if (GET_MODE (op) == VOIDmode
1163 && width <= HOST_BITS_PER_WIDE_INT * 2
1164 && (GET_CODE (op) == CONST_DOUBLE
1165 || GET_CODE (op) == CONST_INT))
1167 unsigned HOST_WIDE_INT l1, lv;
1168 HOST_WIDE_INT h1, hv;
1170 if (GET_CODE (op) == CONST_DOUBLE)
1171 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1173 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1183 neg_double (l1, h1, &lv, &hv);
1188 neg_double (l1, h1, &lv, &hv);
1200 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1203 lv = exact_log2 (l1 & -l1) + 1;
1209 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1210 - HOST_BITS_PER_WIDE_INT;
1212 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1213 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1214 lv = GET_MODE_BITSIZE (mode);
1220 lv = exact_log2 (l1 & -l1);
1222 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1223 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1224 lv = GET_MODE_BITSIZE (mode);
1252 for (s = 0; s < width; s += 8)
1254 unsigned int d = width - s - 8;
1255 unsigned HOST_WIDE_INT byte;
1257 if (s < HOST_BITS_PER_WIDE_INT)
1258 byte = (l1 >> s) & 0xff;
1260 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1262 if (d < HOST_BITS_PER_WIDE_INT)
1265 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1271 /* This is just a change-of-mode, so do nothing. */
1276 gcc_assert (op_mode != VOIDmode);
1278 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1282 lv = l1 & GET_MODE_MASK (op_mode);
1286 if (op_mode == VOIDmode
1287 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1291 lv = l1 & GET_MODE_MASK (op_mode);
1292 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1293 && (lv & ((HOST_WIDE_INT) 1
1294 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1295 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1297 hv = HWI_SIGN_EXTEND (lv);
1308 return immed_double_const (lv, hv, mode);
1311 else if (GET_CODE (op) == CONST_DOUBLE
1312 && SCALAR_FLOAT_MODE_P (mode))
1314 REAL_VALUE_TYPE d, t;
1315 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1320 if (HONOR_SNANS (mode) && real_isnan (&d))
1322 real_sqrt (&t, mode, &d);
1326 d = REAL_VALUE_ABS (d);
1329 d = REAL_VALUE_NEGATE (d);
1331 case FLOAT_TRUNCATE:
1332 d = real_value_truncate (mode, d);
1335 /* All this does is change the mode. */
1338 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1345 real_to_target (tmp, &d, GET_MODE (op));
1346 for (i = 0; i < 4; i++)
1348 real_from_target (&d, tmp, mode);
1354 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1357 else if (GET_CODE (op) == CONST_DOUBLE
1358 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1359 && GET_MODE_CLASS (mode) == MODE_INT
1360 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1362 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1363 operators are intentionally left unspecified (to ease implementation
1364 by target backends), for consistency, this routine implements the
1365 same semantics for constant folding as used by the middle-end. */
1367 /* This was formerly used only for non-IEEE float.
1368 eggert@twinsun.com says it is safe for IEEE also. */
1369 HOST_WIDE_INT xh, xl, th, tl;
1370 REAL_VALUE_TYPE x, t;
1371 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1375 if (REAL_VALUE_ISNAN (x))
1378 /* Test against the signed upper bound. */
1379 if (width > HOST_BITS_PER_WIDE_INT)
1381 th = ((unsigned HOST_WIDE_INT) 1
1382 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1388 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1390 real_from_integer (&t, VOIDmode, tl, th, 0);
1391 if (REAL_VALUES_LESS (t, x))
1398 /* Test against the signed lower bound. */
1399 if (width > HOST_BITS_PER_WIDE_INT)
1401 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1407 tl = (HOST_WIDE_INT) -1 << (width - 1);
1409 real_from_integer (&t, VOIDmode, tl, th, 0);
1410 if (REAL_VALUES_LESS (x, t))
1416 REAL_VALUE_TO_INT (&xl, &xh, x);
1420 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1423 /* Test against the unsigned upper bound. */
1424 if (width == 2*HOST_BITS_PER_WIDE_INT)
1429 else if (width >= HOST_BITS_PER_WIDE_INT)
1431 th = ((unsigned HOST_WIDE_INT) 1
1432 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1438 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1440 real_from_integer (&t, VOIDmode, tl, th, 1);
1441 if (REAL_VALUES_LESS (t, x))
1448 REAL_VALUE_TO_INT (&xl, &xh, x);
1454 return immed_double_const (xl, xh, mode);
1460 /* Subroutine of simplify_binary_operation to simplify a commutative,
1461 associative binary operation CODE with result mode MODE, operating
1462 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1463 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1464 canonicalization is possible. */
1467 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1472 /* Linearize the operator to the left. */
1473 if (GET_CODE (op1) == code)
1475 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1476 if (GET_CODE (op0) == code)
1478 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1479 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1482 /* "a op (b op c)" becomes "(b op c) op a". */
1483 if (! swap_commutative_operands_p (op1, op0))
1484 return simplify_gen_binary (code, mode, op1, op0);
1491 if (GET_CODE (op0) == code)
1493 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1494 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1496 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1497 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1500 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1501 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1503 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1505 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1506 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1508 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1515 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1516 and OP1. Return 0 if no simplification is possible.
1518 Don't use this for relational operations such as EQ or LT.
1519 Use simplify_relational_operation instead. */
1521 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1524 rtx trueop0, trueop1;
1527 /* Relational operations don't work here. We must know the mode
1528 of the operands in order to do the comparison correctly.
1529 Assuming a full word can give incorrect results.
1530 Consider comparing 128 with -128 in QImode. */
1531 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1532 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1534 /* Make sure the constant is second. */
1535 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1536 && swap_commutative_operands_p (op0, op1))
1538 tem = op0, op0 = op1, op1 = tem;
1541 trueop0 = avoid_constant_pool_reference (op0);
1542 trueop1 = avoid_constant_pool_reference (op1);
1544 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1547 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1550 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1551 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1552 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1553 actual constants. */
1556 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1557 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1559 rtx tem, reversed, opleft, opright;
1561 unsigned int width = GET_MODE_BITSIZE (mode);
1563 /* Even if we can't compute a constant result,
1564 there are some cases worth simplifying. */
1569 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1570 when x is NaN, infinite, or finite and nonzero. They aren't
1571 when x is -0 and the rounding mode is not towards -infinity,
1572 since (-0) + 0 is then 0. */
1573 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1576 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1577 transformations are safe even for IEEE. */
1578 if (GET_CODE (op0) == NEG)
1579 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1580 else if (GET_CODE (op1) == NEG)
1581 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1583 /* (~a) + 1 -> -a */
1584 if (INTEGRAL_MODE_P (mode)
1585 && GET_CODE (op0) == NOT
1586 && trueop1 == const1_rtx)
1587 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1589 /* Handle both-operands-constant cases. We can only add
1590 CONST_INTs to constants since the sum of relocatable symbols
1591 can't be handled by most assemblers. Don't add CONST_INT
1592 to CONST_INT since overflow won't be computed properly if wider
1593 than HOST_BITS_PER_WIDE_INT. */
1595 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1596 && GET_CODE (op1) == CONST_INT)
1597 return plus_constant (op0, INTVAL (op1));
1598 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1599 && GET_CODE (op0) == CONST_INT)
1600 return plus_constant (op1, INTVAL (op0));
1602 /* See if this is something like X * C - X or vice versa or
1603 if the multiplication is written as a shift. If so, we can
1604 distribute and make a new multiply, shift, or maybe just
1605 have X (if C is 2 in the example above). But don't make
1606 something more expensive than we had before. */
1608 if (SCALAR_INT_MODE_P (mode))
1610 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1611 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1612 rtx lhs = op0, rhs = op1;
1614 if (GET_CODE (lhs) == NEG)
1618 lhs = XEXP (lhs, 0);
1620 else if (GET_CODE (lhs) == MULT
1621 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1623 coeff0l = INTVAL (XEXP (lhs, 1));
1624 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1625 lhs = XEXP (lhs, 0);
1627 else if (GET_CODE (lhs) == ASHIFT
1628 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1629 && INTVAL (XEXP (lhs, 1)) >= 0
1630 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1632 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1634 lhs = XEXP (lhs, 0);
1637 if (GET_CODE (rhs) == NEG)
1641 rhs = XEXP (rhs, 0);
1643 else if (GET_CODE (rhs) == MULT
1644 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1646 coeff1l = INTVAL (XEXP (rhs, 1));
1647 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1648 rhs = XEXP (rhs, 0);
1650 else if (GET_CODE (rhs) == ASHIFT
1651 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1652 && INTVAL (XEXP (rhs, 1)) >= 0
1653 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1655 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1657 rhs = XEXP (rhs, 0);
1660 if (rtx_equal_p (lhs, rhs))
1662 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1664 unsigned HOST_WIDE_INT l;
1667 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1668 coeff = immed_double_const (l, h, mode);
1670 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1671 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1676 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1677 if ((GET_CODE (op1) == CONST_INT
1678 || GET_CODE (op1) == CONST_DOUBLE)
1679 && GET_CODE (op0) == XOR
1680 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1681 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1682 && mode_signbit_p (mode, op1))
1683 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1684 simplify_gen_binary (XOR, mode, op1,
1687 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1688 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1689 && GET_CODE (op0) == MULT
1690 && GET_CODE (XEXP (op0, 0)) == NEG)
1694 in1 = XEXP (XEXP (op0, 0), 0);
1695 in2 = XEXP (op0, 1);
1696 return simplify_gen_binary (MINUS, mode, op1,
1697 simplify_gen_binary (MULT, mode,
1701 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1702 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1704 if (COMPARISON_P (op0)
1705 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1706 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1707 && (reversed = reversed_comparison (op0, mode)))
1709 simplify_gen_unary (NEG, mode, reversed, mode);
1711 /* If one of the operands is a PLUS or a MINUS, see if we can
1712 simplify this by the associative law.
1713 Don't use the associative law for floating point.
1714 The inaccuracy makes it nonassociative,
1715 and subtle programs can break if operations are associated. */
1717 if (INTEGRAL_MODE_P (mode)
1718 && (plus_minus_operand_p (op0)
1719 || plus_minus_operand_p (op1))
1720 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1723 /* Reassociate floating point addition only when the user
1724 specifies unsafe math optimizations. */
1725 if (FLOAT_MODE_P (mode)
1726 && flag_unsafe_math_optimizations)
1728 tem = simplify_associative_operation (code, mode, op0, op1);
1736 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1737 using cc0, in which case we want to leave it as a COMPARE
1738 so we can distinguish it from a register-register-copy.
1740 In IEEE floating point, x-0 is not the same as x. */
1742 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1743 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1744 && trueop1 == CONST0_RTX (mode))
1748 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1749 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1750 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1751 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1753 rtx xop00 = XEXP (op0, 0);
1754 rtx xop10 = XEXP (op1, 0);
1757 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1759 if (REG_P (xop00) && REG_P (xop10)
1760 && GET_MODE (xop00) == GET_MODE (xop10)
1761 && REGNO (xop00) == REGNO (xop10)
1762 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1763 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1770 /* We can't assume x-x is 0 even with non-IEEE floating point,
1771 but since it is zero except in very strange circumstances, we
1772 will treat it as zero with -funsafe-math-optimizations and
1773 -ffinite-math-only. */
1774 if (rtx_equal_p (trueop0, trueop1)
1775 && ! side_effects_p (op0)
1776 && (! FLOAT_MODE_P (mode)
1777 || (flag_unsafe_math_optimizations
1778 && !HONOR_NANS (mode)
1779 && !HONOR_INFINITIES (mode))))
1780 return CONST0_RTX (mode);
1782 /* Change subtraction from zero into negation. (0 - x) is the
1783 same as -x when x is NaN, infinite, or finite and nonzero.
1784 But if the mode has signed zeros, and does not round towards
1785 -infinity, then 0 - 0 is 0, not -0. */
1786 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1787 return simplify_gen_unary (NEG, mode, op1, mode);
1789 /* (-1 - a) is ~a. */
1790 if (trueop0 == constm1_rtx)
1791 return simplify_gen_unary (NOT, mode, op1, mode);
1793 /* Subtracting 0 has no effect unless the mode has signed zeros
1794 and supports rounding towards -infinity. In such a case,
1796 if (!(HONOR_SIGNED_ZEROS (mode)
1797 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1798 && trueop1 == CONST0_RTX (mode))
1801 /* See if this is something like X * C - X or vice versa or
1802 if the multiplication is written as a shift. If so, we can
1803 distribute and make a new multiply, shift, or maybe just
1804 have X (if C is 2 in the example above). But don't make
1805 something more expensive than we had before. */
1807 if (SCALAR_INT_MODE_P (mode))
1809 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1810 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1811 rtx lhs = op0, rhs = op1;
1813 if (GET_CODE (lhs) == NEG)
1817 lhs = XEXP (lhs, 0);
1819 else if (GET_CODE (lhs) == MULT
1820 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1822 coeff0l = INTVAL (XEXP (lhs, 1));
1823 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1824 lhs = XEXP (lhs, 0);
1826 else if (GET_CODE (lhs) == ASHIFT
1827 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1828 && INTVAL (XEXP (lhs, 1)) >= 0
1829 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1831 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1833 lhs = XEXP (lhs, 0);
1836 if (GET_CODE (rhs) == NEG)
1840 rhs = XEXP (rhs, 0);
1842 else if (GET_CODE (rhs) == MULT
1843 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1845 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1846 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1847 rhs = XEXP (rhs, 0);
1849 else if (GET_CODE (rhs) == ASHIFT
1850 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1851 && INTVAL (XEXP (rhs, 1)) >= 0
1852 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1854 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1856 rhs = XEXP (rhs, 0);
1859 if (rtx_equal_p (lhs, rhs))
1861 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1863 unsigned HOST_WIDE_INT l;
1866 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1867 coeff = immed_double_const (l, h, mode);
1869 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1870 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1875 /* (a - (-b)) -> (a + b). True even for IEEE. */
1876 if (GET_CODE (op1) == NEG)
1877 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1879 /* (-x - c) may be simplified as (-c - x). */
1880 if (GET_CODE (op0) == NEG
1881 && (GET_CODE (op1) == CONST_INT
1882 || GET_CODE (op1) == CONST_DOUBLE))
1884 tem = simplify_unary_operation (NEG, mode, op1, mode);
1886 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1889 /* Don't let a relocatable value get a negative coeff. */
1890 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1891 return simplify_gen_binary (PLUS, mode,
1893 neg_const_int (mode, op1));
1895 /* (x - (x & y)) -> (x & ~y) */
1896 if (GET_CODE (op1) == AND)
1898 if (rtx_equal_p (op0, XEXP (op1, 0)))
1900 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1901 GET_MODE (XEXP (op1, 1)));
1902 return simplify_gen_binary (AND, mode, op0, tem);
1904 if (rtx_equal_p (op0, XEXP (op1, 1)))
1906 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1907 GET_MODE (XEXP (op1, 0)));
1908 return simplify_gen_binary (AND, mode, op0, tem);
1912 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1913 by reversing the comparison code if valid. */
1914 if (STORE_FLAG_VALUE == 1
1915 && trueop0 == const1_rtx
1916 && COMPARISON_P (op1)
1917 && (reversed = reversed_comparison (op1, mode)))
1920 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1921 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1922 && GET_CODE (op1) == MULT
1923 && GET_CODE (XEXP (op1, 0)) == NEG)
1927 in1 = XEXP (XEXP (op1, 0), 0);
1928 in2 = XEXP (op1, 1);
1929 return simplify_gen_binary (PLUS, mode,
1930 simplify_gen_binary (MULT, mode,
1935 /* Canonicalize (minus (neg A) (mult B C)) to
1936 (minus (mult (neg B) C) A). */
1937 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1938 && GET_CODE (op1) == MULT
1939 && GET_CODE (op0) == NEG)
1943 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1944 in2 = XEXP (op1, 1);
1945 return simplify_gen_binary (MINUS, mode,
1946 simplify_gen_binary (MULT, mode,
1951 /* If one of the operands is a PLUS or a MINUS, see if we can
1952 simplify this by the associative law. This will, for example,
1953 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1954 Don't use the associative law for floating point.
1955 The inaccuracy makes it nonassociative,
1956 and subtle programs can break if operations are associated. */
1958 if (INTEGRAL_MODE_P (mode)
1959 && (plus_minus_operand_p (op0)
1960 || plus_minus_operand_p (op1))
1961 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1966 if (trueop1 == constm1_rtx)
1967 return simplify_gen_unary (NEG, mode, op0, mode);
1969 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1970 x is NaN, since x * 0 is then also NaN. Nor is it valid
1971 when the mode has signed zeros, since multiplying a negative
1972 number by 0 will give -0, not 0. */
1973 if (!HONOR_NANS (mode)
1974 && !HONOR_SIGNED_ZEROS (mode)
1975 && trueop1 == CONST0_RTX (mode)
1976 && ! side_effects_p (op0))
1979 /* In IEEE floating point, x*1 is not equivalent to x for
1981 if (!HONOR_SNANS (mode)
1982 && trueop1 == CONST1_RTX (mode))
1985 /* Convert multiply by constant power of two into shift unless
1986 we are still generating RTL. This test is a kludge. */
1987 if (GET_CODE (trueop1) == CONST_INT
1988 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1989 /* If the mode is larger than the host word size, and the
1990 uppermost bit is set, then this isn't a power of two due
1991 to implicit sign extension. */
1992 && (width <= HOST_BITS_PER_WIDE_INT
1993 || val != HOST_BITS_PER_WIDE_INT - 1))
1994 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1996 /* Likewise for multipliers wider than a word. */
1997 if (GET_CODE (trueop1) == CONST_DOUBLE
1998 && (GET_MODE (trueop1) == VOIDmode
1999 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2000 && GET_MODE (op0) == mode
2001 && CONST_DOUBLE_LOW (trueop1) == 0
2002 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2003 return simplify_gen_binary (ASHIFT, mode, op0,
2004 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2006 /* x*2 is x+x and x*(-1) is -x */
2007 if (GET_CODE (trueop1) == CONST_DOUBLE
2008 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2009 && GET_MODE (op0) == mode)
2012 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2014 if (REAL_VALUES_EQUAL (d, dconst2))
2015 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2017 if (!HONOR_SNANS (mode)
2018 && REAL_VALUES_EQUAL (d, dconstm1))
2019 return simplify_gen_unary (NEG, mode, op0, mode);
2022 /* Optimize -x * -x as x * x. */
2023 if (FLOAT_MODE_P (mode)
2024 && GET_CODE (op0) == NEG
2025 && GET_CODE (op1) == NEG
2026 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2027 && !side_effects_p (XEXP (op0, 0)))
2028 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2030 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2031 if (SCALAR_FLOAT_MODE_P (mode)
2032 && GET_CODE (op0) == ABS
2033 && GET_CODE (op1) == ABS
2034 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2035 && !side_effects_p (XEXP (op0, 0)))
2036 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2038 /* Reassociate multiplication, but for floating point MULTs
2039 only when the user specifies unsafe math optimizations. */
2040 if (! FLOAT_MODE_P (mode)
2041 || flag_unsafe_math_optimizations)
2043 tem = simplify_associative_operation (code, mode, op0, op1);
2050 if (trueop1 == const0_rtx)
2052 if (GET_CODE (trueop1) == CONST_INT
2053 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2054 == GET_MODE_MASK (mode)))
2056 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2058 /* A | (~A) -> -1 */
2059 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2060 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2061 && ! side_effects_p (op0)
2062 && SCALAR_INT_MODE_P (mode))
2065 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2066 if (GET_CODE (op1) == CONST_INT
2067 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2068 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2071 /* Canonicalize (X & C1) | C2. */
2072 if (GET_CODE (op0) == AND
2073 && GET_CODE (trueop1) == CONST_INT
2074 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2076 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2077 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2078 HOST_WIDE_INT c2 = INTVAL (trueop1);
2080 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2082 && !side_effects_p (XEXP (op0, 0)))
2085 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2086 if (((c1|c2) & mask) == mask)
2087 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2089 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2090 if (((c1 & ~c2) & mask) != (c1 & mask))
2092 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2093 gen_int_mode (c1 & ~c2, mode));
2094 return simplify_gen_binary (IOR, mode, tem, op1);
2098 /* Convert (A & B) | A to A. */
2099 if (GET_CODE (op0) == AND
2100 && (rtx_equal_p (XEXP (op0, 0), op1)
2101 || rtx_equal_p (XEXP (op0, 1), op1))
2102 && ! side_effects_p (XEXP (op0, 0))
2103 && ! side_effects_p (XEXP (op0, 1)))
2106 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2107 mode size to (rotate A CX). */
2109 if (GET_CODE (op1) == ASHIFT
2110 || GET_CODE (op1) == SUBREG)
2121 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2122 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2123 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2124 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2125 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2126 == GET_MODE_BITSIZE (mode)))
2127 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2129 /* Same, but for ashift that has been "simplified" to a wider mode
2130 by simplify_shift_const. */
2132 if (GET_CODE (opleft) == SUBREG
2133 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2134 && GET_CODE (opright) == LSHIFTRT
2135 && GET_CODE (XEXP (opright, 0)) == SUBREG
2136 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2137 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2138 && (GET_MODE_SIZE (GET_MODE (opleft))
2139 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2140 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2141 SUBREG_REG (XEXP (opright, 0)))
2142 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2143 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2144 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2145 == GET_MODE_BITSIZE (mode)))
2146 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2147 XEXP (SUBREG_REG (opleft), 1));
2149 /* If we have (ior (and (X C1) C2)), simplify this by making
2150 C1 as small as possible if C1 actually changes. */
2151 if (GET_CODE (op1) == CONST_INT
2152 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2153 || INTVAL (op1) > 0)
2154 && GET_CODE (op0) == AND
2155 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2156 && GET_CODE (op1) == CONST_INT
2157 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2158 return simplify_gen_binary (IOR, mode,
2160 (AND, mode, XEXP (op0, 0),
2161 GEN_INT (INTVAL (XEXP (op0, 1))
2165 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2166 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2167 the PLUS does not affect any of the bits in OP1: then we can do
2168 the IOR as a PLUS and we can associate. This is valid if OP1
2169 can be safely shifted left C bits. */
2170 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2171 && GET_CODE (XEXP (op0, 0)) == PLUS
2172 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2173 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2174 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2176 int count = INTVAL (XEXP (op0, 1));
2177 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2179 if (mask >> count == INTVAL (trueop1)
2180 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2181 return simplify_gen_binary (ASHIFTRT, mode,
2182 plus_constant (XEXP (op0, 0), mask),
2186 tem = simplify_associative_operation (code, mode, op0, op1);
2192 if (trueop1 == const0_rtx)
2194 if (GET_CODE (trueop1) == CONST_INT
2195 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2196 == GET_MODE_MASK (mode)))
2197 return simplify_gen_unary (NOT, mode, op0, mode);
2198 if (rtx_equal_p (trueop0, trueop1)
2199 && ! side_effects_p (op0)
2200 && GET_MODE_CLASS (mode) != MODE_CC)
2201 return CONST0_RTX (mode);
2203 /* Canonicalize XOR of the most significant bit to PLUS. */
2204 if ((GET_CODE (op1) == CONST_INT
2205 || GET_CODE (op1) == CONST_DOUBLE)
2206 && mode_signbit_p (mode, op1))
2207 return simplify_gen_binary (PLUS, mode, op0, op1);
2208 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2209 if ((GET_CODE (op1) == CONST_INT
2210 || GET_CODE (op1) == CONST_DOUBLE)
2211 && GET_CODE (op0) == PLUS
2212 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2213 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2214 && mode_signbit_p (mode, XEXP (op0, 1)))
2215 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2216 simplify_gen_binary (XOR, mode, op1,
2219 /* If we are XORing two things that have no bits in common,
2220 convert them into an IOR. This helps to detect rotation encoded
2221 using those methods and possibly other simplifications. */
2223 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2224 && (nonzero_bits (op0, mode)
2225 & nonzero_bits (op1, mode)) == 0)
2226 return (simplify_gen_binary (IOR, mode, op0, op1));
2228 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2229 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2232 int num_negated = 0;
2234 if (GET_CODE (op0) == NOT)
2235 num_negated++, op0 = XEXP (op0, 0);
2236 if (GET_CODE (op1) == NOT)
2237 num_negated++, op1 = XEXP (op1, 0);
2239 if (num_negated == 2)
2240 return simplify_gen_binary (XOR, mode, op0, op1);
2241 else if (num_negated == 1)
2242 return simplify_gen_unary (NOT, mode,
2243 simplify_gen_binary (XOR, mode, op0, op1),
2247 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2248 correspond to a machine insn or result in further simplifications
2249 if B is a constant. */
2251 if (GET_CODE (op0) == AND
2252 && rtx_equal_p (XEXP (op0, 1), op1)
2253 && ! side_effects_p (op1))
2254 return simplify_gen_binary (AND, mode,
2255 simplify_gen_unary (NOT, mode,
2256 XEXP (op0, 0), mode),
2259 else if (GET_CODE (op0) == AND
2260 && rtx_equal_p (XEXP (op0, 0), op1)
2261 && ! side_effects_p (op1))
2262 return simplify_gen_binary (AND, mode,
2263 simplify_gen_unary (NOT, mode,
2264 XEXP (op0, 1), mode),
2267 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2268 comparison if STORE_FLAG_VALUE is 1. */
2269 if (STORE_FLAG_VALUE == 1
2270 && trueop1 == const1_rtx
2271 && COMPARISON_P (op0)
2272 && (reversed = reversed_comparison (op0, mode)))
2275 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2276 is (lt foo (const_int 0)), so we can perform the above
2277 simplification if STORE_FLAG_VALUE is 1. */
2279 if (STORE_FLAG_VALUE == 1
2280 && trueop1 == const1_rtx
2281 && GET_CODE (op0) == LSHIFTRT
2282 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2283 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2284 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2286 /* (xor (comparison foo bar) (const_int sign-bit))
2287 when STORE_FLAG_VALUE is the sign bit. */
2288 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2289 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2290 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2291 && trueop1 == const_true_rtx
2292 && COMPARISON_P (op0)
2293 && (reversed = reversed_comparison (op0, mode)))
2296 tem = simplify_associative_operation (code, mode, op0, op1);
2302 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2304 /* If we are turning off bits already known off in OP0, we need
2306 if (GET_CODE (trueop1) == CONST_INT
2307 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2308 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2310 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2311 && GET_MODE_CLASS (mode) != MODE_CC)
2314 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2315 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2316 && ! side_effects_p (op0)
2317 && GET_MODE_CLASS (mode) != MODE_CC)
2318 return CONST0_RTX (mode);
2320 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2321 there are no nonzero bits of C outside of X's mode. */
2322 if ((GET_CODE (op0) == SIGN_EXTEND
2323 || GET_CODE (op0) == ZERO_EXTEND)
2324 && GET_CODE (trueop1) == CONST_INT
2325 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2326 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2327 & INTVAL (trueop1)) == 0)
2329 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2330 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2331 gen_int_mode (INTVAL (trueop1),
2333 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2336 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2337 if (GET_CODE (op0) == IOR
2338 && GET_CODE (trueop1) == CONST_INT
2339 && GET_CODE (XEXP (op0, 1)) == CONST_INT)
2341 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2342 return simplify_gen_binary (IOR, mode,
2343 simplify_gen_binary (AND, mode,
2344 XEXP (op0, 0), op1),
2345 gen_int_mode (tmp, mode));
2348 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2349 insn (and may simplify more). */
2350 if (GET_CODE (op0) == XOR
2351 && rtx_equal_p (XEXP (op0, 0), op1)
2352 && ! side_effects_p (op1))
2353 return simplify_gen_binary (AND, mode,
2354 simplify_gen_unary (NOT, mode,
2355 XEXP (op0, 1), mode),
2358 if (GET_CODE (op0) == XOR
2359 && rtx_equal_p (XEXP (op0, 1), op1)
2360 && ! side_effects_p (op1))
2361 return simplify_gen_binary (AND, mode,
2362 simplify_gen_unary (NOT, mode,
2363 XEXP (op0, 0), mode),
2366 /* Similarly for (~(A ^ B)) & A. */
2367 if (GET_CODE (op0) == NOT
2368 && GET_CODE (XEXP (op0, 0)) == XOR
2369 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2370 && ! side_effects_p (op1))
2371 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2373 if (GET_CODE (op0) == NOT
2374 && GET_CODE (XEXP (op0, 0)) == XOR
2375 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2376 && ! side_effects_p (op1))
2377 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2379 /* Convert (A | B) & A to A. */
2380 if (GET_CODE (op0) == IOR
2381 && (rtx_equal_p (XEXP (op0, 0), op1)
2382 || rtx_equal_p (XEXP (op0, 1), op1))
2383 && ! side_effects_p (XEXP (op0, 0))
2384 && ! side_effects_p (XEXP (op0, 1)))
2387 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2388 ((A & N) + B) & M -> (A + B) & M
2389 Similarly if (N & M) == 0,
2390 ((A | N) + B) & M -> (A + B) & M
2391 and for - instead of + and/or ^ instead of |. */
2392 if (GET_CODE (trueop1) == CONST_INT
2393 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2394 && ~INTVAL (trueop1)
2395 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2396 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2401 pmop[0] = XEXP (op0, 0);
2402 pmop[1] = XEXP (op0, 1);
2404 for (which = 0; which < 2; which++)
2407 switch (GET_CODE (tem))
2410 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2411 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2412 == INTVAL (trueop1))
2413 pmop[which] = XEXP (tem, 0);
2417 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2418 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2419 pmop[which] = XEXP (tem, 0);
2426 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2428 tem = simplify_gen_binary (GET_CODE (op0), mode,
2430 return simplify_gen_binary (code, mode, tem, op1);
2433 tem = simplify_associative_operation (code, mode, op0, op1);
2439 /* 0/x is 0 (or x&0 if x has side-effects). */
2440 if (trueop0 == CONST0_RTX (mode))
2442 if (side_effects_p (op1))
2443 return simplify_gen_binary (AND, mode, op1, trueop0);
2447 if (trueop1 == CONST1_RTX (mode))
2448 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2449 /* Convert divide by power of two into shift. */
2450 if (GET_CODE (trueop1) == CONST_INT
2451 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2452 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2456 /* Handle floating point and integers separately. */
2457 if (SCALAR_FLOAT_MODE_P (mode))
2459 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2460 safe for modes with NaNs, since 0.0 / 0.0 will then be
2461 NaN rather than 0.0. Nor is it safe for modes with signed
2462 zeros, since dividing 0 by a negative number gives -0.0 */
2463 if (trueop0 == CONST0_RTX (mode)
2464 && !HONOR_NANS (mode)
2465 && !HONOR_SIGNED_ZEROS (mode)
2466 && ! side_effects_p (op1))
2469 if (trueop1 == CONST1_RTX (mode)
2470 && !HONOR_SNANS (mode))
2473 if (GET_CODE (trueop1) == CONST_DOUBLE
2474 && trueop1 != CONST0_RTX (mode))
2477 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2480 if (REAL_VALUES_EQUAL (d, dconstm1)
2481 && !HONOR_SNANS (mode))
2482 return simplify_gen_unary (NEG, mode, op0, mode);
2484 /* Change FP division by a constant into multiplication.
2485 Only do this with -funsafe-math-optimizations. */
2486 if (flag_unsafe_math_optimizations
2487 && !REAL_VALUES_EQUAL (d, dconst0))
2489 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2490 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2491 return simplify_gen_binary (MULT, mode, op0, tem);
2497 /* 0/x is 0 (or x&0 if x has side-effects). */
2498 if (trueop0 == CONST0_RTX (mode))
2500 if (side_effects_p (op1))
2501 return simplify_gen_binary (AND, mode, op1, trueop0);
2505 if (trueop1 == CONST1_RTX (mode))
2506 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2508 if (trueop1 == constm1_rtx)
2510 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2511 return simplify_gen_unary (NEG, mode, x, mode);
2517 /* 0%x is 0 (or x&0 if x has side-effects). */
2518 if (trueop0 == CONST0_RTX (mode))
2520 if (side_effects_p (op1))
2521 return simplify_gen_binary (AND, mode, op1, trueop0);
2524 /* x%1 is 0 (of x&0 if x has side-effects). */
2525 if (trueop1 == CONST1_RTX (mode))
2527 if (side_effects_p (op0))
2528 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2529 return CONST0_RTX (mode);
2531 /* Implement modulus by power of two as AND. */
2532 if (GET_CODE (trueop1) == CONST_INT
2533 && exact_log2 (INTVAL (trueop1)) > 0)
2534 return simplify_gen_binary (AND, mode, op0,
2535 GEN_INT (INTVAL (op1) - 1));
2539 /* 0%x is 0 (or x&0 if x has side-effects). */
2540 if (trueop0 == CONST0_RTX (mode))
2542 if (side_effects_p (op1))
2543 return simplify_gen_binary (AND, mode, op1, trueop0);
2546 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2547 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2549 if (side_effects_p (op0))
2550 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2551 return CONST0_RTX (mode);
2558 if (trueop1 == CONST0_RTX (mode))
2560 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2562 /* Rotating ~0 always results in ~0. */
2563 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2564 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2565 && ! side_effects_p (op1))
2571 if (trueop1 == CONST0_RTX (mode))
2573 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2578 if (trueop1 == CONST0_RTX (mode))
2580 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2582 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2583 if (GET_CODE (op0) == CLZ
2584 && GET_CODE (trueop1) == CONST_INT
2585 && STORE_FLAG_VALUE == 1
2586 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2588 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2589 unsigned HOST_WIDE_INT zero_val = 0;
2591 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2592 && zero_val == GET_MODE_BITSIZE (imode)
2593 && INTVAL (trueop1) == exact_log2 (zero_val))
2594 return simplify_gen_relational (EQ, mode, imode,
2595 XEXP (op0, 0), const0_rtx);
2600 if (width <= HOST_BITS_PER_WIDE_INT
2601 && GET_CODE (trueop1) == CONST_INT
2602 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2603 && ! side_effects_p (op0))
2605 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2607 tem = simplify_associative_operation (code, mode, op0, op1);
2613 if (width <= HOST_BITS_PER_WIDE_INT
2614 && GET_CODE (trueop1) == CONST_INT
2615 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2616 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2617 && ! side_effects_p (op0))
2619 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2621 tem = simplify_associative_operation (code, mode, op0, op1);
2627 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2629 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2631 tem = simplify_associative_operation (code, mode, op0, op1);
2637 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2639 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2641 tem = simplify_associative_operation (code, mode, op0, op1);
2650 /* ??? There are simplifications that can be done. */
2654 if (!VECTOR_MODE_P (mode))
2656 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2657 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2658 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2659 gcc_assert (XVECLEN (trueop1, 0) == 1);
2660 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2662 if (GET_CODE (trueop0) == CONST_VECTOR)
2663 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2668 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2669 gcc_assert (GET_MODE_INNER (mode)
2670 == GET_MODE_INNER (GET_MODE (trueop0)));
2671 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2673 if (GET_CODE (trueop0) == CONST_VECTOR)
2675 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2676 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2677 rtvec v = rtvec_alloc (n_elts);
2680 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2681 for (i = 0; i < n_elts; i++)
2683 rtx x = XVECEXP (trueop1, 0, i);
2685 gcc_assert (GET_CODE (x) == CONST_INT);
2686 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2690 return gen_rtx_CONST_VECTOR (mode, v);
2694 if (XVECLEN (trueop1, 0) == 1
2695 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2696 && GET_CODE (trueop0) == VEC_CONCAT)
2699 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2701 /* Try to find the element in the VEC_CONCAT. */
2702 while (GET_MODE (vec) != mode
2703 && GET_CODE (vec) == VEC_CONCAT)
2705 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2706 if (offset < vec_size)
2707 vec = XEXP (vec, 0);
2711 vec = XEXP (vec, 1);
2713 vec = avoid_constant_pool_reference (vec);
2716 if (GET_MODE (vec) == mode)
2723 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2724 ? GET_MODE (trueop0)
2725 : GET_MODE_INNER (mode));
2726 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2727 ? GET_MODE (trueop1)
2728 : GET_MODE_INNER (mode));
2730 gcc_assert (VECTOR_MODE_P (mode));
2731 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2732 == GET_MODE_SIZE (mode));
2734 if (VECTOR_MODE_P (op0_mode))
2735 gcc_assert (GET_MODE_INNER (mode)
2736 == GET_MODE_INNER (op0_mode));
2738 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2740 if (VECTOR_MODE_P (op1_mode))
2741 gcc_assert (GET_MODE_INNER (mode)
2742 == GET_MODE_INNER (op1_mode));
2744 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2746 if ((GET_CODE (trueop0) == CONST_VECTOR
2747 || GET_CODE (trueop0) == CONST_INT
2748 || GET_CODE (trueop0) == CONST_DOUBLE)
2749 && (GET_CODE (trueop1) == CONST_VECTOR
2750 || GET_CODE (trueop1) == CONST_INT
2751 || GET_CODE (trueop1) == CONST_DOUBLE))
2753 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2754 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2755 rtvec v = rtvec_alloc (n_elts);
2757 unsigned in_n_elts = 1;
2759 if (VECTOR_MODE_P (op0_mode))
2760 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2761 for (i = 0; i < n_elts; i++)
2765 if (!VECTOR_MODE_P (op0_mode))
2766 RTVEC_ELT (v, i) = trueop0;
2768 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2772 if (!VECTOR_MODE_P (op1_mode))
2773 RTVEC_ELT (v, i) = trueop1;
2775 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2780 return gen_rtx_CONST_VECTOR (mode, v);
2793 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2796 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2798 unsigned int width = GET_MODE_BITSIZE (mode);
2800 if (VECTOR_MODE_P (mode)
2801 && code != VEC_CONCAT
2802 && GET_CODE (op0) == CONST_VECTOR
2803 && GET_CODE (op1) == CONST_VECTOR)
2805 unsigned n_elts = GET_MODE_NUNITS (mode);
2806 enum machine_mode op0mode = GET_MODE (op0);
2807 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2808 enum machine_mode op1mode = GET_MODE (op1);
2809 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2810 rtvec v = rtvec_alloc (n_elts);
2813 gcc_assert (op0_n_elts == n_elts);
2814 gcc_assert (op1_n_elts == n_elts);
2815 for (i = 0; i < n_elts; i++)
2817 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2818 CONST_VECTOR_ELT (op0, i),
2819 CONST_VECTOR_ELT (op1, i));
2822 RTVEC_ELT (v, i) = x;
2825 return gen_rtx_CONST_VECTOR (mode, v);
2828 if (VECTOR_MODE_P (mode)
2829 && code == VEC_CONCAT
2830 && CONSTANT_P (op0) && CONSTANT_P (op1))
2832 unsigned n_elts = GET_MODE_NUNITS (mode);
2833 rtvec v = rtvec_alloc (n_elts);
2835 gcc_assert (n_elts >= 2);
2838 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2839 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2841 RTVEC_ELT (v, 0) = op0;
2842 RTVEC_ELT (v, 1) = op1;
2846 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2847 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2850 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2851 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2852 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2854 for (i = 0; i < op0_n_elts; ++i)
2855 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2856 for (i = 0; i < op1_n_elts; ++i)
2857 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2860 return gen_rtx_CONST_VECTOR (mode, v);
2863 if (SCALAR_FLOAT_MODE_P (mode)
2864 && GET_CODE (op0) == CONST_DOUBLE
2865 && GET_CODE (op1) == CONST_DOUBLE
2866 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2877 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2879 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2881 for (i = 0; i < 4; i++)
2898 real_from_target (&r, tmp0, mode);
2899 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2903 REAL_VALUE_TYPE f0, f1, value, result;
2906 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2907 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2908 real_convert (&f0, mode, &f0);
2909 real_convert (&f1, mode, &f1);
2911 if (HONOR_SNANS (mode)
2912 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2916 && REAL_VALUES_EQUAL (f1, dconst0)
2917 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2920 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2921 && flag_trapping_math
2922 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2924 int s0 = REAL_VALUE_NEGATIVE (f0);
2925 int s1 = REAL_VALUE_NEGATIVE (f1);
2930 /* Inf + -Inf = NaN plus exception. */
2935 /* Inf - Inf = NaN plus exception. */
2940 /* Inf / Inf = NaN plus exception. */
2947 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2948 && flag_trapping_math
2949 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2950 || (REAL_VALUE_ISINF (f1)
2951 && REAL_VALUES_EQUAL (f0, dconst0))))
2952 /* Inf * 0 = NaN plus exception. */
2955 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2957 real_convert (&result, mode, &value);
2959 /* Don't constant fold this floating point operation if
2960 the result has overflowed and flag_trapping_math. */
2962 if (flag_trapping_math
2963 && MODE_HAS_INFINITIES (mode)
2964 && REAL_VALUE_ISINF (result)
2965 && !REAL_VALUE_ISINF (f0)
2966 && !REAL_VALUE_ISINF (f1))
2967 /* Overflow plus exception. */
2970 /* Don't constant fold this floating point operation if the
2971 result may dependent upon the run-time rounding mode and
2972 flag_rounding_math is set, or if GCC's software emulation
2973 is unable to accurately represent the result. */
2975 if ((flag_rounding_math
2976 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2977 && !flag_unsafe_math_optimizations))
2978 && (inexact || !real_identical (&result, &value)))
2981 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2985 /* We can fold some multi-word operations. */
2986 if (GET_MODE_CLASS (mode) == MODE_INT
2987 && width == HOST_BITS_PER_WIDE_INT * 2
2988 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2989 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2991 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2992 HOST_WIDE_INT h1, h2, hv, ht;
2994 if (GET_CODE (op0) == CONST_DOUBLE)
2995 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2997 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2999 if (GET_CODE (op1) == CONST_DOUBLE)
3000 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3002 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3007 /* A - B == A + (-B). */
3008 neg_double (l2, h2, &lv, &hv);
3011 /* Fall through.... */
3014 add_double (l1, h1, l2, h2, &lv, &hv);
3018 mul_double (l1, h1, l2, h2, &lv, &hv);
3022 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3023 &lv, &hv, <, &ht))
3028 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3029 <, &ht, &lv, &hv))
3034 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3035 &lv, &hv, <, &ht))
3040 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3041 <, &ht, &lv, &hv))
3046 lv = l1 & l2, hv = h1 & h2;
3050 lv = l1 | l2, hv = h1 | h2;
3054 lv = l1 ^ l2, hv = h1 ^ h2;
3060 && ((unsigned HOST_WIDE_INT) l1
3061 < (unsigned HOST_WIDE_INT) l2)))
3070 && ((unsigned HOST_WIDE_INT) l1
3071 > (unsigned HOST_WIDE_INT) l2)))
3078 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3080 && ((unsigned HOST_WIDE_INT) l1
3081 < (unsigned HOST_WIDE_INT) l2)))
3088 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3090 && ((unsigned HOST_WIDE_INT) l1
3091 > (unsigned HOST_WIDE_INT) l2)))
3097 case LSHIFTRT: case ASHIFTRT:
3099 case ROTATE: case ROTATERT:
3100 if (SHIFT_COUNT_TRUNCATED)
3101 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3103 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3106 if (code == LSHIFTRT || code == ASHIFTRT)
3107 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3109 else if (code == ASHIFT)
3110 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3111 else if (code == ROTATE)
3112 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3113 else /* code == ROTATERT */
3114 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3121 return immed_double_const (lv, hv, mode);
3124 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3125 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3127 /* Get the integer argument values in two forms:
3128 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3130 arg0 = INTVAL (op0);
3131 arg1 = INTVAL (op1);
3133 if (width < HOST_BITS_PER_WIDE_INT)
3135 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3136 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3139 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3140 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3143 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3144 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3152 /* Compute the value of the arithmetic. */
3157 val = arg0s + arg1s;
3161 val = arg0s - arg1s;
3165 val = arg0s * arg1s;
3170 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3173 val = arg0s / arg1s;
3178 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3181 val = arg0s % arg1s;
3186 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3189 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3194 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3197 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3215 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3216 the value is in range. We can't return any old value for
3217 out-of-range arguments because either the middle-end (via
3218 shift_truncation_mask) or the back-end might be relying on
3219 target-specific knowledge. Nor can we rely on
3220 shift_truncation_mask, since the shift might not be part of an
3221 ashlM3, lshrM3 or ashrM3 instruction. */
3222 if (SHIFT_COUNT_TRUNCATED)
3223 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3224 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3227 val = (code == ASHIFT
3228 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3229 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3231 /* Sign-extend the result for arithmetic right shifts. */
3232 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3233 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3241 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3242 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3250 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3251 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3255 /* Do nothing here. */
3259 val = arg0s <= arg1s ? arg0s : arg1s;
3263 val = ((unsigned HOST_WIDE_INT) arg0
3264 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3268 val = arg0s > arg1s ? arg0s : arg1s;
3272 val = ((unsigned HOST_WIDE_INT) arg0
3273 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3281 /* ??? There are simplifications that can be done. */
3288 return gen_int_mode (val, mode);
3296 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3299 Rather than test for specific case, we do this by a brute-force method
3300 and do all possible simplifications until no more changes occur. Then
3301 we rebuild the operation. */
3303 struct simplify_plus_minus_op_data
3310 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3314 result = (commutative_operand_precedence (y)
3315 - commutative_operand_precedence (x));
3319 /* Group together equal REGs to do more simplification. */
3320 if (REG_P (x) && REG_P (y))
3321 return REGNO (x) > REGNO (y);
3327 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3330 struct simplify_plus_minus_op_data ops[8];
3332 int n_ops = 2, input_ops = 2;
3333 int changed, n_constants = 0, canonicalized = 0;
3336 memset (ops, 0, sizeof ops);
3338 /* Set up the two operands and then expand them until nothing has been
3339 changed. If we run out of room in our array, give up; this should
3340 almost never happen. */
3345 ops[1].neg = (code == MINUS);
3351 for (i = 0; i < n_ops; i++)
3353 rtx this_op = ops[i].op;
3354 int this_neg = ops[i].neg;
3355 enum rtx_code this_code = GET_CODE (this_op);
3364 ops[n_ops].op = XEXP (this_op, 1);
3365 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3368 ops[i].op = XEXP (this_op, 0);
3371 canonicalized |= this_neg;
3375 ops[i].op = XEXP (this_op, 0);
3376 ops[i].neg = ! this_neg;
3383 && GET_CODE (XEXP (this_op, 0)) == PLUS
3384 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3385 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3387 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3388 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3389 ops[n_ops].neg = this_neg;
3397 /* ~a -> (-a - 1) */
3400 ops[n_ops].op = constm1_rtx;
3401 ops[n_ops++].neg = this_neg;
3402 ops[i].op = XEXP (this_op, 0);
3403 ops[i].neg = !this_neg;
3413 ops[i].op = neg_const_int (mode, this_op);
3427 if (n_constants > 1)
3430 gcc_assert (n_ops >= 2);
3432 /* If we only have two operands, we can avoid the loops. */
3435 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3438 /* Get the two operands. Be careful with the order, especially for
3439 the cases where code == MINUS. */
3440 if (ops[0].neg && ops[1].neg)
3442 lhs = gen_rtx_NEG (mode, ops[0].op);
3445 else if (ops[0].neg)
3456 return simplify_const_binary_operation (code, mode, lhs, rhs);
3459 /* Now simplify each pair of operands until nothing changes. */
3462 /* Insertion sort is good enough for an eight-element array. */
3463 for (i = 1; i < n_ops; i++)
3465 struct simplify_plus_minus_op_data save;
3467 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3473 ops[j + 1] = ops[j];
3474 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3478 /* This is only useful the first time through. */
3483 for (i = n_ops - 1; i > 0; i--)
3484 for (j = i - 1; j >= 0; j--)
3486 rtx lhs = ops[j].op, rhs = ops[i].op;
3487 int lneg = ops[j].neg, rneg = ops[i].neg;
3489 if (lhs != 0 && rhs != 0)
3491 enum rtx_code ncode = PLUS;
3497 tem = lhs, lhs = rhs, rhs = tem;
3499 else if (swap_commutative_operands_p (lhs, rhs))
3500 tem = lhs, lhs = rhs, rhs = tem;
3502 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3503 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3505 rtx tem_lhs, tem_rhs;
3507 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3508 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3509 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3511 if (tem && !CONSTANT_P (tem))
3512 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3515 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3517 /* Reject "simplifications" that just wrap the two
3518 arguments in a CONST. Failure to do so can result
3519 in infinite recursion with simplify_binary_operation
3520 when it calls us to simplify CONST operations. */
3522 && ! (GET_CODE (tem) == CONST
3523 && GET_CODE (XEXP (tem, 0)) == ncode
3524 && XEXP (XEXP (tem, 0), 0) == lhs
3525 && XEXP (XEXP (tem, 0), 1) == rhs))
3528 if (GET_CODE (tem) == NEG)
3529 tem = XEXP (tem, 0), lneg = !lneg;
3530 if (GET_CODE (tem) == CONST_INT && lneg)
3531 tem = neg_const_int (mode, tem), lneg = 0;
3535 ops[j].op = NULL_RTX;
3541 /* Pack all the operands to the lower-numbered entries. */
3542 for (i = 0, j = 0; j < n_ops; j++)
3552 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3554 && GET_CODE (ops[1].op) == CONST_INT
3555 && CONSTANT_P (ops[0].op)
3557 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3559 /* We suppressed creation of trivial CONST expressions in the
3560 combination loop to avoid recursion. Create one manually now.
3561 The combination loop should have ensured that there is exactly
3562 one CONST_INT, and the sort will have ensured that it is last
3563 in the array and that any other constant will be next-to-last. */
3566 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3567 && CONSTANT_P (ops[n_ops - 2].op))
3569 rtx value = ops[n_ops - 1].op;
3570 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3571 value = neg_const_int (mode, value);
3572 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3576 /* Put a non-negated operand first, if possible. */
3578 for (i = 0; i < n_ops && ops[i].neg; i++)
3581 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3590 /* Now make the result by performing the requested operations. */
3592 for (i = 1; i < n_ops; i++)
3593 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3594 mode, result, ops[i].op);
3599 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3601 plus_minus_operand_p (const_rtx x)
3603 return GET_CODE (x) == PLUS
3604 || GET_CODE (x) == MINUS
3605 || (GET_CODE (x) == CONST
3606 && GET_CODE (XEXP (x, 0)) == PLUS
3607 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3608 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3611 /* Like simplify_binary_operation except used for relational operators.
3612 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3613 not also be VOIDmode.
3615 CMP_MODE specifies in which mode the comparison is done in, so it is
3616 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3617 the operands or, if both are VOIDmode, the operands are compared in
3618 "infinite precision". */
3620 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3621 enum machine_mode cmp_mode, rtx op0, rtx op1)
3623 rtx tem, trueop0, trueop1;
3625 if (cmp_mode == VOIDmode)
3626 cmp_mode = GET_MODE (op0);
3627 if (cmp_mode == VOIDmode)
3628 cmp_mode = GET_MODE (op1);
3630 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3633 if (SCALAR_FLOAT_MODE_P (mode))
3635 if (tem == const0_rtx)
3636 return CONST0_RTX (mode);
3637 #ifdef FLOAT_STORE_FLAG_VALUE
3639 REAL_VALUE_TYPE val;
3640 val = FLOAT_STORE_FLAG_VALUE (mode);
3641 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3647 if (VECTOR_MODE_P (mode))
3649 if (tem == const0_rtx)
3650 return CONST0_RTX (mode);
3651 #ifdef VECTOR_STORE_FLAG_VALUE
3656 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3657 if (val == NULL_RTX)
3659 if (val == const1_rtx)
3660 return CONST1_RTX (mode);
3662 units = GET_MODE_NUNITS (mode);
3663 v = rtvec_alloc (units);
3664 for (i = 0; i < units; i++)
3665 RTVEC_ELT (v, i) = val;
3666 return gen_rtx_raw_CONST_VECTOR (mode, v);
3676 /* For the following tests, ensure const0_rtx is op1. */
3677 if (swap_commutative_operands_p (op0, op1)
3678 || (op0 == const0_rtx && op1 != const0_rtx))
3679 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3681 /* If op0 is a compare, extract the comparison arguments from it. */
3682 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3683 return simplify_relational_operation (code, mode, VOIDmode,
3684 XEXP (op0, 0), XEXP (op0, 1));
3686 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3690 trueop0 = avoid_constant_pool_reference (op0);
3691 trueop1 = avoid_constant_pool_reference (op1);
3692 return simplify_relational_operation_1 (code, mode, cmp_mode,
3696 /* This part of simplify_relational_operation is only used when CMP_MODE
3697 is not in class MODE_CC (i.e. it is a real comparison).
3699 MODE is the mode of the result, while CMP_MODE specifies in which
3700 mode the comparison is done in, so it is the mode of the operands. */
3703 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3704 enum machine_mode cmp_mode, rtx op0, rtx op1)
3706 enum rtx_code op0code = GET_CODE (op0);
3708 if (op1 == const0_rtx && COMPARISON_P (op0))
3710 /* If op0 is a comparison, extract the comparison arguments
3714 if (GET_MODE (op0) == mode)
3715 return simplify_rtx (op0);
3717 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3718 XEXP (op0, 0), XEXP (op0, 1));
3720 else if (code == EQ)
3722 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3723 if (new_code != UNKNOWN)
3724 return simplify_gen_relational (new_code, mode, VOIDmode,
3725 XEXP (op0, 0), XEXP (op0, 1));
3729 if (op1 == const0_rtx)
3731 /* Canonicalize (GTU x 0) as (NE x 0). */
3733 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
3734 /* Canonicalize (LEU x 0) as (EQ x 0). */
3736 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
3738 else if (op1 == const1_rtx)
3743 /* Canonicalize (GE x 1) as (GT x 0). */
3744 return simplify_gen_relational (GT, mode, cmp_mode,
3747 /* Canonicalize (GEU x 1) as (NE x 0). */
3748 return simplify_gen_relational (NE, mode, cmp_mode,
3751 /* Canonicalize (LT x 1) as (LE x 0). */
3752 return simplify_gen_relational (LE, mode, cmp_mode,
3755 /* Canonicalize (LTU x 1) as (EQ x 0). */
3756 return simplify_gen_relational (EQ, mode, cmp_mode,
3762 else if (op1 == constm1_rtx)
3764 /* Canonicalize (LE x -1) as (LT x 0). */
3766 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
3767 /* Canonicalize (GT x -1) as (GE x 0). */
3769 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
3772 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3773 if ((code == EQ || code == NE)
3774 && (op0code == PLUS || op0code == MINUS)
3776 && CONSTANT_P (XEXP (op0, 1))
3777 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3779 rtx x = XEXP (op0, 0);
3780 rtx c = XEXP (op0, 1);
3782 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3784 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3787 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3788 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3790 && op1 == const0_rtx
3791 && GET_MODE_CLASS (mode) == MODE_INT
3792 && cmp_mode != VOIDmode
3793 /* ??? Work-around BImode bugs in the ia64 backend. */
3795 && cmp_mode != BImode
3796 && nonzero_bits (op0, cmp_mode) == 1
3797 && STORE_FLAG_VALUE == 1)
3798 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3799 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3800 : lowpart_subreg (mode, op0, cmp_mode);
3802 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3803 if ((code == EQ || code == NE)
3804 && op1 == const0_rtx
3806 return simplify_gen_relational (code, mode, cmp_mode,
3807 XEXP (op0, 0), XEXP (op0, 1));
3809 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3810 if ((code == EQ || code == NE)
3812 && rtx_equal_p (XEXP (op0, 0), op1)
3813 && !side_effects_p (XEXP (op0, 0)))
3814 return simplify_gen_relational (code, mode, cmp_mode,
3815 XEXP (op0, 1), const0_rtx);
3817 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3818 if ((code == EQ || code == NE)
3820 && rtx_equal_p (XEXP (op0, 1), op1)
3821 && !side_effects_p (XEXP (op0, 1)))
3822 return simplify_gen_relational (code, mode, cmp_mode,
3823 XEXP (op0, 0), const0_rtx);
3825 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3826 if ((code == EQ || code == NE)
3828 && (GET_CODE (op1) == CONST_INT
3829 || GET_CODE (op1) == CONST_DOUBLE)
3830 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3831 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3832 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3833 simplify_gen_binary (XOR, cmp_mode,
3834 XEXP (op0, 1), op1));
3836 if (op0code == POPCOUNT && op1 == const0_rtx)
3842 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
3843 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
3844 XEXP (op0, 0), const0_rtx);
3849 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
3850 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
3851 XEXP (op0, 0), const0_rtx);
3860 /* Check if the given comparison (done in the given MODE) is actually a
3861 tautology or a contradiction.
3862 If no simplification is possible, this function returns zero.
3863 Otherwise, it returns either const_true_rtx or const0_rtx. */
3866 simplify_const_relational_operation (enum rtx_code code,
3867 enum machine_mode mode,
3870 int equal, op0lt, op0ltu, op1lt, op1ltu;
3875 gcc_assert (mode != VOIDmode
3876 || (GET_MODE (op0) == VOIDmode
3877 && GET_MODE (op1) == VOIDmode));
3879 /* If op0 is a compare, extract the comparison arguments from it. */
3880 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3882 op1 = XEXP (op0, 1);
3883 op0 = XEXP (op0, 0);
3885 if (GET_MODE (op0) != VOIDmode)
3886 mode = GET_MODE (op0);
3887 else if (GET_MODE (op1) != VOIDmode)
3888 mode = GET_MODE (op1);
3893 /* We can't simplify MODE_CC values since we don't know what the
3894 actual comparison is. */
3895 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3898 /* Make sure the constant is second. */
3899 if (swap_commutative_operands_p (op0, op1))
3901 tem = op0, op0 = op1, op1 = tem;
3902 code = swap_condition (code);
3905 trueop0 = avoid_constant_pool_reference (op0);
3906 trueop1 = avoid_constant_pool_reference (op1);
3908 /* For integer comparisons of A and B maybe we can simplify A - B and can
3909 then simplify a comparison of that with zero. If A and B are both either
3910 a register or a CONST_INT, this can't help; testing for these cases will
3911 prevent infinite recursion here and speed things up.
3913 We can only do this for EQ and NE comparisons as otherwise we may
3914 lose or introduce overflow which we cannot disregard as undefined as
3915 we do not know the signedness of the operation on either the left or
3916 the right hand side of the comparison. */
3918 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3919 && (code == EQ || code == NE)
3920 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3921 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3922 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3923 /* We cannot do this if tem is a nonzero address. */
3924 && ! nonzero_address_p (tem))
3925 return simplify_const_relational_operation (signed_condition (code),
3926 mode, tem, const0_rtx);
3928 if (! HONOR_NANS (mode) && code == ORDERED)
3929 return const_true_rtx;
3931 if (! HONOR_NANS (mode) && code == UNORDERED)
3934 /* For modes without NaNs, if the two operands are equal, we know the
3935 result except if they have side-effects. */
3936 if (! HONOR_NANS (GET_MODE (trueop0))
3937 && rtx_equal_p (trueop0, trueop1)
3938 && ! side_effects_p (trueop0))
3939 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3941 /* If the operands are floating-point constants, see if we can fold
3943 else if (GET_CODE (trueop0) == CONST_DOUBLE
3944 && GET_CODE (trueop1) == CONST_DOUBLE
3945 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3947 REAL_VALUE_TYPE d0, d1;
3949 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3950 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3952 /* Comparisons are unordered iff at least one of the values is NaN. */
3953 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3963 return const_true_rtx;
3976 equal = REAL_VALUES_EQUAL (d0, d1);
3977 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3978 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3981 /* Otherwise, see if the operands are both integers. */
3982 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3983 && (GET_CODE (trueop0) == CONST_DOUBLE
3984 || GET_CODE (trueop0) == CONST_INT)
3985 && (GET_CODE (trueop1) == CONST_DOUBLE
3986 || GET_CODE (trueop1) == CONST_INT))
3988 int width = GET_MODE_BITSIZE (mode);
3989 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3990 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3992 /* Get the two words comprising each integer constant. */
3993 if (GET_CODE (trueop0) == CONST_DOUBLE)
3995 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3996 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4000 l0u = l0s = INTVAL (trueop0);
4001 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4004 if (GET_CODE (trueop1) == CONST_DOUBLE)
4006 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4007 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4011 l1u = l1s = INTVAL (trueop1);
4012 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4015 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4016 we have to sign or zero-extend the values. */
4017 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4019 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4020 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4022 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4023 l0s |= ((HOST_WIDE_INT) (-1) << width);
4025 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4026 l1s |= ((HOST_WIDE_INT) (-1) << width);
4028 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4029 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4031 equal = (h0u == h1u && l0u == l1u);
4032 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
4033 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
4034 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
4035 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
4038 /* Otherwise, there are some code-specific tests we can make. */
4041 /* Optimize comparisons with upper and lower bounds. */
4042 if (SCALAR_INT_MODE_P (mode)
4043 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
4056 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
4063 /* x >= min is always true. */
4064 if (rtx_equal_p (trueop1, mmin))
4065 tem = const_true_rtx;
4071 /* x <= max is always true. */
4072 if (rtx_equal_p (trueop1, mmax))
4073 tem = const_true_rtx;
4078 /* x > max is always false. */
4079 if (rtx_equal_p (trueop1, mmax))
4085 /* x < min is always false. */
4086 if (rtx_equal_p (trueop1, mmin))
4093 if (tem == const0_rtx
4094 || tem == const_true_rtx)
4101 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4106 if (trueop1 == const0_rtx && nonzero_address_p (op0))
4107 return const_true_rtx;
4111 /* Optimize abs(x) < 0.0. */
4112 if (trueop1 == CONST0_RTX (mode)
4113 && !HONOR_SNANS (mode)
4114 && (!INTEGRAL_MODE_P (mode)
4115 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4117 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4119 if (GET_CODE (tem) == ABS)
4121 if (INTEGRAL_MODE_P (mode)
4122 && (issue_strict_overflow_warning
4123 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4124 warning (OPT_Wstrict_overflow,
4125 ("assuming signed overflow does not occur when "
4126 "assuming abs (x) < 0 is false"));
4131 /* Optimize popcount (x) < 0. */
4132 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4133 return const_true_rtx;
4137 /* Optimize abs(x) >= 0.0. */
4138 if (trueop1 == CONST0_RTX (mode)
4139 && !HONOR_NANS (mode)
4140 && (!INTEGRAL_MODE_P (mode)
4141 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4143 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4145 if (GET_CODE (tem) == ABS)
4147 if (INTEGRAL_MODE_P (mode)
4148 && (issue_strict_overflow_warning
4149 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4150 warning (OPT_Wstrict_overflow,
4151 ("assuming signed overflow does not occur when "
4152 "assuming abs (x) >= 0 is true"));
4153 return const_true_rtx;
4157 /* Optimize popcount (x) >= 0. */
4158 if (GET_CODE (trueop0) == POPCOUNT && trueop1 == const0_rtx)
4159 return const_true_rtx;
4163 /* Optimize ! (abs(x) < 0.0). */
4164 if (trueop1 == CONST0_RTX (mode))
4166 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
4168 if (GET_CODE (tem) == ABS)
4169 return const_true_rtx;
4180 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4186 return equal ? const_true_rtx : const0_rtx;
4189 return ! equal ? const_true_rtx : const0_rtx;
4192 return op0lt ? const_true_rtx : const0_rtx;
4195 return op1lt ? const_true_rtx : const0_rtx;
4197 return op0ltu ? const_true_rtx : const0_rtx;
4199 return op1ltu ? const_true_rtx : const0_rtx;
4202 return equal || op0lt ? const_true_rtx : const0_rtx;
4205 return equal || op1lt ? const_true_rtx : const0_rtx;
4207 return equal || op0ltu ? const_true_rtx : const0_rtx;
4209 return equal || op1ltu ? const_true_rtx : const0_rtx;
4211 return const_true_rtx;
4219 /* Simplify CODE, an operation with result mode MODE and three operands,
4220 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4221 a constant. Return 0 if no simplifications is possible. */
4224 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4225 enum machine_mode op0_mode, rtx op0, rtx op1,
4228 unsigned int width = GET_MODE_BITSIZE (mode);
4230 /* VOIDmode means "infinite" precision. */
4232 width = HOST_BITS_PER_WIDE_INT;
4238 if (GET_CODE (op0) == CONST_INT
4239 && GET_CODE (op1) == CONST_INT
4240 && GET_CODE (op2) == CONST_INT
4241 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4242 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4244 /* Extracting a bit-field from a constant */
4245 HOST_WIDE_INT val = INTVAL (op0);
4247 if (BITS_BIG_ENDIAN)
4248 val >>= (GET_MODE_BITSIZE (op0_mode)
4249 - INTVAL (op2) - INTVAL (op1));
4251 val >>= INTVAL (op2);
4253 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4255 /* First zero-extend. */
4256 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4257 /* If desired, propagate sign bit. */
4258 if (code == SIGN_EXTRACT
4259 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4260 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4263 /* Clear the bits that don't belong in our mode,
4264 unless they and our sign bit are all one.
4265 So we get either a reasonable negative value or a reasonable
4266 unsigned value for this mode. */
4267 if (width < HOST_BITS_PER_WIDE_INT
4268 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4269 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4270 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4272 return gen_int_mode (val, mode);
4277 if (GET_CODE (op0) == CONST_INT)
4278 return op0 != const0_rtx ? op1 : op2;
4280 /* Convert c ? a : a into "a". */
4281 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4284 /* Convert a != b ? a : b into "a". */
4285 if (GET_CODE (op0) == NE
4286 && ! side_effects_p (op0)
4287 && ! HONOR_NANS (mode)
4288 && ! HONOR_SIGNED_ZEROS (mode)
4289 && ((rtx_equal_p (XEXP (op0, 0), op1)
4290 && rtx_equal_p (XEXP (op0, 1), op2))
4291 || (rtx_equal_p (XEXP (op0, 0), op2)
4292 && rtx_equal_p (XEXP (op0, 1), op1))))
4295 /* Convert a == b ? a : b into "b". */
4296 if (GET_CODE (op0) == EQ
4297 && ! side_effects_p (op0)
4298 && ! HONOR_NANS (mode)
4299 && ! HONOR_SIGNED_ZEROS (mode)
4300 && ((rtx_equal_p (XEXP (op0, 0), op1)
4301 && rtx_equal_p (XEXP (op0, 1), op2))
4302 || (rtx_equal_p (XEXP (op0, 0), op2)
4303 && rtx_equal_p (XEXP (op0, 1), op1))))
4306 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4308 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4309 ? GET_MODE (XEXP (op0, 1))
4310 : GET_MODE (XEXP (op0, 0)));
4313 /* Look for happy constants in op1 and op2. */
4314 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4316 HOST_WIDE_INT t = INTVAL (op1);
4317 HOST_WIDE_INT f = INTVAL (op2);
4319 if (t == STORE_FLAG_VALUE && f == 0)
4320 code = GET_CODE (op0);
4321 else if (t == 0 && f == STORE_FLAG_VALUE)
4324 tmp = reversed_comparison_code (op0, NULL_RTX);
4332 return simplify_gen_relational (code, mode, cmp_mode,
4333 XEXP (op0, 0), XEXP (op0, 1));
4336 if (cmp_mode == VOIDmode)
4337 cmp_mode = op0_mode;
4338 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4339 cmp_mode, XEXP (op0, 0),
4342 /* See if any simplifications were possible. */
4345 if (GET_CODE (temp) == CONST_INT)
4346 return temp == const0_rtx ? op2 : op1;
4348 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4354 gcc_assert (GET_MODE (op0) == mode);
4355 gcc_assert (GET_MODE (op1) == mode);
4356 gcc_assert (VECTOR_MODE_P (mode));
4357 op2 = avoid_constant_pool_reference (op2);
4358 if (GET_CODE (op2) == CONST_INT)
4360 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4361 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4362 int mask = (1 << n_elts) - 1;
4364 if (!(INTVAL (op2) & mask))
4366 if ((INTVAL (op2) & mask) == mask)
4369 op0 = avoid_constant_pool_reference (op0);
4370 op1 = avoid_constant_pool_reference (op1);
4371 if (GET_CODE (op0) == CONST_VECTOR
4372 && GET_CODE (op1) == CONST_VECTOR)
4374 rtvec v = rtvec_alloc (n_elts);
4377 for (i = 0; i < n_elts; i++)
4378 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4379 ? CONST_VECTOR_ELT (op0, i)
4380 : CONST_VECTOR_ELT (op1, i));
4381 return gen_rtx_CONST_VECTOR (mode, v);
4393 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4394 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4396 Works by unpacking OP into a collection of 8-bit values
4397 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4398 and then repacking them again for OUTERMODE. */
4401 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4402 enum machine_mode innermode, unsigned int byte)
4404 /* We support up to 512-bit values (for V8DFmode). */
4408 value_mask = (1 << value_bit) - 1
4410 unsigned char value[max_bitsize / value_bit];
4419 rtvec result_v = NULL;
4420 enum mode_class outer_class;
4421 enum machine_mode outer_submode;
4423 /* Some ports misuse CCmode. */
4424 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4427 /* We have no way to represent a complex constant at the rtl level. */
4428 if (COMPLEX_MODE_P (outermode))
4431 /* Unpack the value. */
4433 if (GET_CODE (op) == CONST_VECTOR)
4435 num_elem = CONST_VECTOR_NUNITS (op);
4436 elems = &CONST_VECTOR_ELT (op, 0);
4437 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4443 elem_bitsize = max_bitsize;
4445 /* If this asserts, it is too complicated; reducing value_bit may help. */
4446 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4447 /* I don't know how to handle endianness of sub-units. */
4448 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4450 for (elem = 0; elem < num_elem; elem++)
4453 rtx el = elems[elem];
4455 /* Vectors are kept in target memory order. (This is probably
4458 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4459 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4461 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4462 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4463 unsigned bytele = (subword_byte % UNITS_PER_WORD
4464 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4465 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4468 switch (GET_CODE (el))
4472 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4474 *vp++ = INTVAL (el) >> i;
4475 /* CONST_INTs are always logically sign-extended. */
4476 for (; i < elem_bitsize; i += value_bit)
4477 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4481 if (GET_MODE (el) == VOIDmode)
4483 /* If this triggers, someone should have generated a
4484 CONST_INT instead. */
4485 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4487 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4488 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4489 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4492 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4495 /* It shouldn't matter what's done here, so fill it with
4497 for (; i < elem_bitsize; i += value_bit)
4502 long tmp[max_bitsize / 32];
4503 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4505 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4506 gcc_assert (bitsize <= elem_bitsize);
4507 gcc_assert (bitsize % value_bit == 0);
4509 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4512 /* real_to_target produces its result in words affected by
4513 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4514 and use WORDS_BIG_ENDIAN instead; see the documentation
4515 of SUBREG in rtl.texi. */
4516 for (i = 0; i < bitsize; i += value_bit)
4519 if (WORDS_BIG_ENDIAN)
4520 ibase = bitsize - 1 - i;
4523 *vp++ = tmp[ibase / 32] >> i % 32;
4526 /* It shouldn't matter what's done here, so fill it with
4528 for (; i < elem_bitsize; i += value_bit)
4538 /* Now, pick the right byte to start with. */
4539 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4540 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4541 will already have offset 0. */
4542 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4544 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4546 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4547 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4548 byte = (subword_byte % UNITS_PER_WORD
4549 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4552 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4553 so if it's become negative it will instead be very large.) */
4554 gcc_assert (byte < GET_MODE_SIZE (innermode));
4556 /* Convert from bytes to chunks of size value_bit. */
4557 value_start = byte * (BITS_PER_UNIT / value_bit);
4559 /* Re-pack the value. */
4561 if (VECTOR_MODE_P (outermode))
4563 num_elem = GET_MODE_NUNITS (outermode);
4564 result_v = rtvec_alloc (num_elem);
4565 elems = &RTVEC_ELT (result_v, 0);
4566 outer_submode = GET_MODE_INNER (outermode);
4572 outer_submode = outermode;
4575 outer_class = GET_MODE_CLASS (outer_submode);
4576 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4578 gcc_assert (elem_bitsize % value_bit == 0);
4579 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4581 for (elem = 0; elem < num_elem; elem++)
4585 /* Vectors are stored in target memory order. (This is probably
4588 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4589 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4591 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4592 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4593 unsigned bytele = (subword_byte % UNITS_PER_WORD
4594 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4595 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4598 switch (outer_class)
4601 case MODE_PARTIAL_INT:
4603 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4606 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4608 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4609 for (; i < elem_bitsize; i += value_bit)
4610 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4611 << (i - HOST_BITS_PER_WIDE_INT));
4613 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4615 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4616 elems[elem] = gen_int_mode (lo, outer_submode);
4617 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4618 elems[elem] = immed_double_const (lo, hi, outer_submode);
4625 case MODE_DECIMAL_FLOAT:
4628 long tmp[max_bitsize / 32];
4630 /* real_from_target wants its input in words affected by
4631 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4632 and use WORDS_BIG_ENDIAN instead; see the documentation
4633 of SUBREG in rtl.texi. */
4634 for (i = 0; i < max_bitsize / 32; i++)
4636 for (i = 0; i < elem_bitsize; i += value_bit)
4639 if (WORDS_BIG_ENDIAN)
4640 ibase = elem_bitsize - 1 - i;
4643 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4646 real_from_target (&r, tmp, outer_submode);
4647 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4655 if (VECTOR_MODE_P (outermode))
4656 return gen_rtx_CONST_VECTOR (outermode, result_v);
4661 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4662 Return 0 if no simplifications are possible. */
4664 simplify_subreg (enum machine_mode outermode, rtx op,
4665 enum machine_mode innermode, unsigned int byte)
4667 /* Little bit of sanity checking. */
4668 gcc_assert (innermode != VOIDmode);
4669 gcc_assert (outermode != VOIDmode);
4670 gcc_assert (innermode != BLKmode);
4671 gcc_assert (outermode != BLKmode);
4673 gcc_assert (GET_MODE (op) == innermode
4674 || GET_MODE (op) == VOIDmode);
4676 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4677 gcc_assert (byte < GET_MODE_SIZE (innermode));
4679 if (outermode == innermode && !byte)
4682 if (GET_CODE (op) == CONST_INT
4683 || GET_CODE (op) == CONST_DOUBLE
4684 || GET_CODE (op) == CONST_VECTOR)
4685 return simplify_immed_subreg (outermode, op, innermode, byte);
4687 /* Changing mode twice with SUBREG => just change it once,
4688 or not at all if changing back op starting mode. */
4689 if (GET_CODE (op) == SUBREG)
4691 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4692 int final_offset = byte + SUBREG_BYTE (op);
4695 if (outermode == innermostmode
4696 && byte == 0 && SUBREG_BYTE (op) == 0)
4697 return SUBREG_REG (op);
4699 /* The SUBREG_BYTE represents offset, as if the value were stored
4700 in memory. Irritating exception is paradoxical subreg, where
4701 we define SUBREG_BYTE to be 0. On big endian machines, this
4702 value should be negative. For a moment, undo this exception. */
4703 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4705 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4706 if (WORDS_BIG_ENDIAN)
4707 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4708 if (BYTES_BIG_ENDIAN)
4709 final_offset += difference % UNITS_PER_WORD;
4711 if (SUBREG_BYTE (op) == 0
4712 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4714 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4715 if (WORDS_BIG_ENDIAN)
4716 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4717 if (BYTES_BIG_ENDIAN)
4718 final_offset += difference % UNITS_PER_WORD;
4721 /* See whether resulting subreg will be paradoxical. */
4722 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4724 /* In nonparadoxical subregs we can't handle negative offsets. */
4725 if (final_offset < 0)
4727 /* Bail out in case resulting subreg would be incorrect. */
4728 if (final_offset % GET_MODE_SIZE (outermode)
4729 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4735 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4737 /* In paradoxical subreg, see if we are still looking on lower part.
4738 If so, our SUBREG_BYTE will be 0. */
4739 if (WORDS_BIG_ENDIAN)
4740 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4741 if (BYTES_BIG_ENDIAN)
4742 offset += difference % UNITS_PER_WORD;
4743 if (offset == final_offset)
4749 /* Recurse for further possible simplifications. */
4750 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4754 if (validate_subreg (outermode, innermostmode,
4755 SUBREG_REG (op), final_offset))
4756 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4760 /* Merge implicit and explicit truncations. */
4762 if (GET_CODE (op) == TRUNCATE
4763 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4764 && subreg_lowpart_offset (outermode, innermode) == byte)
4765 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4766 GET_MODE (XEXP (op, 0)));
4768 /* SUBREG of a hard register => just change the register number
4769 and/or mode. If the hard register is not valid in that mode,
4770 suppress this simplification. If the hard register is the stack,
4771 frame, or argument pointer, leave this as a SUBREG. */
4774 && REGNO (op) < FIRST_PSEUDO_REGISTER
4775 #ifdef CANNOT_CHANGE_MODE_CLASS
4776 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4777 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4778 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4780 && ((reload_completed && !frame_pointer_needed)
4781 || (REGNO (op) != FRAME_POINTER_REGNUM
4782 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4783 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4786 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4787 && REGNO (op) != ARG_POINTER_REGNUM
4789 && REGNO (op) != STACK_POINTER_REGNUM
4790 && subreg_offset_representable_p (REGNO (op), innermode,
4793 unsigned int regno = REGNO (op);
4794 unsigned int final_regno
4795 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4797 /* ??? We do allow it if the current REG is not valid for
4798 its mode. This is a kludge to work around how float/complex
4799 arguments are passed on 32-bit SPARC and should be fixed. */
4800 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4801 || ! HARD_REGNO_MODE_OK (regno, innermode))
4804 int final_offset = byte;
4806 /* Adjust offset for paradoxical subregs. */
4808 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4810 int difference = (GET_MODE_SIZE (innermode)
4811 - GET_MODE_SIZE (outermode));
4812 if (WORDS_BIG_ENDIAN)
4813 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4814 if (BYTES_BIG_ENDIAN)
4815 final_offset += difference % UNITS_PER_WORD;
4818 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4820 /* Propagate original regno. We don't have any way to specify
4821 the offset inside original regno, so do so only for lowpart.
4822 The information is used only by alias analysis that can not
4823 grog partial register anyway. */
4825 if (subreg_lowpart_offset (outermode, innermode) == byte)
4826 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4831 /* If we have a SUBREG of a register that we are replacing and we are
4832 replacing it with a MEM, make a new MEM and try replacing the
4833 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4834 or if we would be widening it. */
4837 && ! mode_dependent_address_p (XEXP (op, 0))
4838 /* Allow splitting of volatile memory references in case we don't
4839 have instruction to move the whole thing. */
4840 && (! MEM_VOLATILE_P (op)
4841 || ! have_insn_for (SET, innermode))
4842 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4843 return adjust_address_nv (op, outermode, byte);
4845 /* Handle complex values represented as CONCAT
4846 of real and imaginary part. */
4847 if (GET_CODE (op) == CONCAT)
4849 unsigned int part_size, final_offset;
4852 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4853 if (byte < part_size)
4855 part = XEXP (op, 0);
4856 final_offset = byte;
4860 part = XEXP (op, 1);
4861 final_offset = byte - part_size;
4864 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4867 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4870 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4871 return gen_rtx_SUBREG (outermode, part, final_offset);
4875 /* Optimize SUBREG truncations of zero and sign extended values. */
4876 if ((GET_CODE (op) == ZERO_EXTEND
4877 || GET_CODE (op) == SIGN_EXTEND)
4878 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4880 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4882 /* If we're requesting the lowpart of a zero or sign extension,
4883 there are three possibilities. If the outermode is the same
4884 as the origmode, we can omit both the extension and the subreg.
4885 If the outermode is not larger than the origmode, we can apply
4886 the truncation without the extension. Finally, if the outermode
4887 is larger than the origmode, but both are integer modes, we
4888 can just extend to the appropriate mode. */
4891 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4892 if (outermode == origmode)
4893 return XEXP (op, 0);
4894 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4895 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4896 subreg_lowpart_offset (outermode,
4898 if (SCALAR_INT_MODE_P (outermode))
4899 return simplify_gen_unary (GET_CODE (op), outermode,
4900 XEXP (op, 0), origmode);
4903 /* A SUBREG resulting from a zero extension may fold to zero if
4904 it extracts higher bits that the ZERO_EXTEND's source bits. */
4905 if (GET_CODE (op) == ZERO_EXTEND
4906 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4907 return CONST0_RTX (outermode);
4910 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4911 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4912 the outer subreg is effectively a truncation to the original mode. */
4913 if ((GET_CODE (op) == LSHIFTRT
4914 || GET_CODE (op) == ASHIFTRT)
4915 && SCALAR_INT_MODE_P (outermode)
4916 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4917 to avoid the possibility that an outer LSHIFTRT shifts by more
4918 than the sign extension's sign_bit_copies and introduces zeros
4919 into the high bits of the result. */
4920 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4921 && GET_CODE (XEXP (op, 1)) == CONST_INT
4922 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4923 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4924 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4925 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4926 return simplify_gen_binary (ASHIFTRT, outermode,
4927 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4929 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4930 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4931 the outer subreg is effectively a truncation to the original mode. */
4932 if ((GET_CODE (op) == LSHIFTRT
4933 || GET_CODE (op) == ASHIFTRT)
4934 && SCALAR_INT_MODE_P (outermode)
4935 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4936 && GET_CODE (XEXP (op, 1)) == CONST_INT
4937 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4938 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4939 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4940 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4941 return simplify_gen_binary (LSHIFTRT, outermode,
4942 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4944 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4945 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4946 the outer subreg is effectively a truncation to the original mode. */
4947 if (GET_CODE (op) == ASHIFT
4948 && SCALAR_INT_MODE_P (outermode)
4949 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4950 && GET_CODE (XEXP (op, 1)) == CONST_INT
4951 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4952 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4953 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4954 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4955 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4956 return simplify_gen_binary (ASHIFT, outermode,
4957 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4962 /* Make a SUBREG operation or equivalent if it folds. */
4965 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4966 enum machine_mode innermode, unsigned int byte)
4970 newx = simplify_subreg (outermode, op, innermode, byte);
4974 if (GET_CODE (op) == SUBREG
4975 || GET_CODE (op) == CONCAT
4976 || GET_MODE (op) == VOIDmode)
4979 if (validate_subreg (outermode, innermode, op, byte))
4980 return gen_rtx_SUBREG (outermode, op, byte);
4985 /* Simplify X, an rtx expression.
4987 Return the simplified expression or NULL if no simplifications
4990 This is the preferred entry point into the simplification routines;
4991 however, we still allow passes to call the more specific routines.
4993 Right now GCC has three (yes, three) major bodies of RTL simplification
4994 code that need to be unified.
4996 1. fold_rtx in cse.c. This code uses various CSE specific
4997 information to aid in RTL simplification.
4999 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5000 it uses combine specific information to aid in RTL
5003 3. The routines in this file.
5006 Long term we want to only have one body of simplification code; to
5007 get to that state I recommend the following steps:
5009 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5010 which are not pass dependent state into these routines.
5012 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5013 use this routine whenever possible.
5015 3. Allow for pass dependent state to be provided to these
5016 routines and add simplifications based on the pass dependent
5017 state. Remove code from cse.c & combine.c that becomes
5020 It will take time, but ultimately the compiler will be easier to
5021 maintain and improve. It's totally silly that when we add a
5022 simplification that it needs to be added to 4 places (3 for RTL
5023 simplification and 1 for tree simplification. */
5026 simplify_rtx (rtx x)
5028 enum rtx_code code = GET_CODE (x);
5029 enum machine_mode mode = GET_MODE (x);
5031 switch (GET_RTX_CLASS (code))
5034 return simplify_unary_operation (code, mode,
5035 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5036 case RTX_COMM_ARITH:
5037 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5038 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5040 /* Fall through.... */
5043 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5046 case RTX_BITFIELD_OPS:
5047 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5048 XEXP (x, 0), XEXP (x, 1),
5052 case RTX_COMM_COMPARE:
5053 return simplify_relational_operation (code, mode,
5054 ((GET_MODE (XEXP (x, 0))
5056 ? GET_MODE (XEXP (x, 0))
5057 : GET_MODE (XEXP (x, 1))),
5063 return simplify_subreg (mode, SUBREG_REG (x),
5064 GET_MODE (SUBREG_REG (x)),
5071 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5072 if (GET_CODE (XEXP (x, 0)) == HIGH
5073 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))