1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
32 #include "hard-reg-set.h"
35 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
87 width = GET_MODE_BITSIZE (mode);
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x)
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
140 switch (GET_CODE (x))
146 /* Handle float extensions of constant pool references. */
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
162 if (GET_MODE (x) == BLKmode)
167 /* Call target hook to avoid the effects of -fpic etc.... */
168 addr = targetm.delegitimize_address (addr);
170 /* Split the address into a base and integer offset. */
171 if (GET_CODE (addr) == CONST
172 && GET_CODE (XEXP (addr, 0)) == PLUS
173 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
175 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
176 addr = XEXP (XEXP (addr, 0), 0);
179 if (GET_CODE (addr) == LO_SUM)
180 addr = XEXP (addr, 1);
182 /* If this is a constant pool reference, we can turn it into its
183 constant and hope that simplifications happen. */
184 if (GET_CODE (addr) == SYMBOL_REF
185 && CONSTANT_POOL_ADDRESS_P (addr))
187 c = get_pool_constant (addr);
188 cmode = get_pool_mode (addr);
190 /* If we're accessing the constant in a different mode than it was
191 originally stored, attempt to fix that up via subreg simplifications.
192 If that fails we have no choice but to return the original memory. */
193 if (offset != 0 || cmode != GET_MODE (x))
195 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
196 if (tem && CONSTANT_P (tem))
206 /* Return true if X is a MEM referencing the constant pool. */
209 constant_pool_reference_p (rtx x)
211 return avoid_constant_pool_reference (x) != x;
214 /* Make a unary operation by first seeing if it folds and otherwise making
215 the specified operation. */
218 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
219 enum machine_mode op_mode)
223 /* If this simplifies, use it. */
224 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
227 return gen_rtx_fmt_e (code, mode, op);
230 /* Likewise for ternary operations. */
233 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
234 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
238 /* If this simplifies, use it. */
239 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
243 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
246 /* Likewise, for relational operations.
247 CMP_MODE specifies mode comparison is done in. */
250 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
251 enum machine_mode cmp_mode, rtx op0, rtx op1)
255 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
259 return gen_rtx_fmt_ee (code, mode, op0, op1);
262 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
263 resulting RTX. Return a new RTX which is as simplified as possible. */
266 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
268 enum rtx_code code = GET_CODE (x);
269 enum machine_mode mode = GET_MODE (x);
270 enum machine_mode op_mode;
273 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
274 to build a new expression substituting recursively. If we can't do
275 anything, return our input. */
280 switch (GET_RTX_CLASS (code))
284 op_mode = GET_MODE (op0);
285 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
286 if (op0 == XEXP (x, 0))
288 return simplify_gen_unary (code, mode, op0, op_mode);
292 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
293 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
294 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
296 return simplify_gen_binary (code, mode, op0, op1);
299 case RTX_COMM_COMPARE:
302 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
303 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
304 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
305 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
307 return simplify_gen_relational (code, mode, op_mode, op0, op1);
310 case RTX_BITFIELD_OPS:
312 op_mode = GET_MODE (op0);
313 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
314 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
315 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
316 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
318 if (op_mode == VOIDmode)
319 op_mode = GET_MODE (op0);
320 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
323 /* The only case we try to handle is a SUBREG. */
326 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
327 if (op0 == SUBREG_REG (x))
329 op0 = simplify_gen_subreg (GET_MODE (x), op0,
330 GET_MODE (SUBREG_REG (x)),
332 return op0 ? op0 : x;
339 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
340 if (op0 == XEXP (x, 0))
342 return replace_equiv_address_nv (x, op0);
344 else if (code == LO_SUM)
346 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
347 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
349 /* (lo_sum (high x) x) -> x */
350 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
353 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
355 return gen_rtx_LO_SUM (mode, op0, op1);
357 else if (code == REG)
359 if (rtx_equal_p (x, old_rtx))
370 /* Try to simplify a unary operation CODE whose output mode is to be
371 MODE with input operand OP whose mode was originally OP_MODE.
372 Return zero if no simplification can be made. */
374 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
375 rtx op, enum machine_mode op_mode)
379 if (GET_CODE (op) == CONST)
382 trueop = avoid_constant_pool_reference (op);
384 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
388 return simplify_unary_operation_1 (code, mode, op);
391 /* Perform some simplifications we can do even if the operands
394 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
396 enum rtx_code reversed;
402 /* (not (not X)) == X. */
403 if (GET_CODE (op) == NOT)
406 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
407 comparison is all ones. */
408 if (COMPARISON_P (op)
409 && (mode == BImode || STORE_FLAG_VALUE == -1)
410 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
411 return simplify_gen_relational (reversed, mode, VOIDmode,
412 XEXP (op, 0), XEXP (op, 1));
414 /* (not (plus X -1)) can become (neg X). */
415 if (GET_CODE (op) == PLUS
416 && XEXP (op, 1) == constm1_rtx)
417 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
419 /* Similarly, (not (neg X)) is (plus X -1). */
420 if (GET_CODE (op) == NEG)
421 return plus_constant (XEXP (op, 0), -1);
423 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
424 if (GET_CODE (op) == XOR
425 && GET_CODE (XEXP (op, 1)) == CONST_INT
426 && (temp = simplify_unary_operation (NOT, mode,
427 XEXP (op, 1), mode)) != 0)
428 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
430 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
431 if (GET_CODE (op) == PLUS
432 && GET_CODE (XEXP (op, 1)) == CONST_INT
433 && mode_signbit_p (mode, XEXP (op, 1))
434 && (temp = simplify_unary_operation (NOT, mode,
435 XEXP (op, 1), mode)) != 0)
436 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
439 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
440 operands other than 1, but that is not valid. We could do a
441 similar simplification for (not (lshiftrt C X)) where C is
442 just the sign bit, but this doesn't seem common enough to
444 if (GET_CODE (op) == ASHIFT
445 && XEXP (op, 0) == const1_rtx)
447 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
448 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
451 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
452 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
453 so we can perform the above simplification. */
455 if (STORE_FLAG_VALUE == -1
456 && GET_CODE (op) == ASHIFTRT
457 && GET_CODE (XEXP (op, 1)) == CONST_INT
458 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
459 return simplify_gen_relational (GE, mode, VOIDmode,
460 XEXP (op, 0), const0_rtx);
463 if (GET_CODE (op) == SUBREG
464 && subreg_lowpart_p (op)
465 && (GET_MODE_SIZE (GET_MODE (op))
466 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
467 && GET_CODE (SUBREG_REG (op)) == ASHIFT
468 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
470 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
473 x = gen_rtx_ROTATE (inner_mode,
474 simplify_gen_unary (NOT, inner_mode, const1_rtx,
476 XEXP (SUBREG_REG (op), 1));
477 return rtl_hooks.gen_lowpart_no_emit (mode, x);
480 /* Apply De Morgan's laws to reduce number of patterns for machines
481 with negating logical insns (and-not, nand, etc.). If result has
482 only one NOT, put it first, since that is how the patterns are
485 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
487 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
488 enum machine_mode op_mode;
490 op_mode = GET_MODE (in1);
491 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
493 op_mode = GET_MODE (in2);
494 if (op_mode == VOIDmode)
496 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
498 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
501 in2 = in1; in1 = tem;
504 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
510 /* (neg (neg X)) == X. */
511 if (GET_CODE (op) == NEG)
514 /* (neg (plus X 1)) can become (not X). */
515 if (GET_CODE (op) == PLUS
516 && XEXP (op, 1) == const1_rtx)
517 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
519 /* Similarly, (neg (not X)) is (plus X 1). */
520 if (GET_CODE (op) == NOT)
521 return plus_constant (XEXP (op, 0), 1);
523 /* (neg (minus X Y)) can become (minus Y X). This transformation
524 isn't safe for modes with signed zeros, since if X and Y are
525 both +0, (minus Y X) is the same as (minus X Y). If the
526 rounding mode is towards +infinity (or -infinity) then the two
527 expressions will be rounded differently. */
528 if (GET_CODE (op) == MINUS
529 && !HONOR_SIGNED_ZEROS (mode)
530 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
531 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
533 if (GET_CODE (op) == PLUS
534 && !HONOR_SIGNED_ZEROS (mode)
535 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
537 /* (neg (plus A C)) is simplified to (minus -C A). */
538 if (GET_CODE (XEXP (op, 1)) == CONST_INT
539 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
541 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
543 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
546 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
547 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
548 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
551 /* (neg (mult A B)) becomes (mult (neg A) B).
552 This works even for floating-point values. */
553 if (GET_CODE (op) == MULT
554 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
556 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
557 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
560 /* NEG commutes with ASHIFT since it is multiplication. Only do
561 this if we can then eliminate the NEG (e.g., if the operand
563 if (GET_CODE (op) == ASHIFT)
565 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
567 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
570 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
571 C is equal to the width of MODE minus 1. */
572 if (GET_CODE (op) == ASHIFTRT
573 && GET_CODE (XEXP (op, 1)) == CONST_INT
574 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
575 return simplify_gen_binary (LSHIFTRT, mode,
576 XEXP (op, 0), XEXP (op, 1));
578 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
579 C is equal to the width of MODE minus 1. */
580 if (GET_CODE (op) == LSHIFTRT
581 && GET_CODE (XEXP (op, 1)) == CONST_INT
582 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
583 return simplify_gen_binary (ASHIFTRT, mode,
584 XEXP (op, 0), XEXP (op, 1));
586 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
587 if (GET_CODE (op) == XOR
588 && XEXP (op, 1) == const1_rtx
589 && nonzero_bits (XEXP (op, 0), mode) == 1)
590 return plus_constant (XEXP (op, 0), -1);
592 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
593 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
594 if (GET_CODE (op) == LT
595 && XEXP (op, 1) == const0_rtx)
597 enum machine_mode inner = GET_MODE (XEXP (op, 0));
598 int isize = GET_MODE_BITSIZE (inner);
599 if (STORE_FLAG_VALUE == 1)
601 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
602 GEN_INT (isize - 1));
605 if (GET_MODE_BITSIZE (mode) > isize)
606 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
607 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
609 else if (STORE_FLAG_VALUE == -1)
611 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
612 GEN_INT (isize - 1));
615 if (GET_MODE_BITSIZE (mode) > isize)
616 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
617 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
623 /* We can't handle truncation to a partial integer mode here
624 because we don't know the real bitsize of the partial
626 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
629 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
630 if ((GET_CODE (op) == SIGN_EXTEND
631 || GET_CODE (op) == ZERO_EXTEND)
632 && GET_MODE (XEXP (op, 0)) == mode)
635 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
636 (OP:SI foo:SI) if OP is NEG or ABS. */
637 if ((GET_CODE (op) == ABS
638 || GET_CODE (op) == NEG)
639 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
640 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
641 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
642 return simplify_gen_unary (GET_CODE (op), mode,
643 XEXP (XEXP (op, 0), 0), mode);
645 /* (truncate:A (subreg:B (truncate:C X) 0)) is
647 if (GET_CODE (op) == SUBREG
648 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
649 && subreg_lowpart_p (op))
650 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
651 GET_MODE (XEXP (SUBREG_REG (op), 0)));
653 /* If we know that the value is already truncated, we can
654 replace the TRUNCATE with a SUBREG. Note that this is also
655 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
656 modes we just have to apply a different definition for
657 truncation. But don't do this for an (LSHIFTRT (MULT ...))
658 since this will cause problems with the umulXi3_highpart
660 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
661 GET_MODE_BITSIZE (GET_MODE (op)))
662 ? (num_sign_bit_copies (op, GET_MODE (op))
663 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
664 - GET_MODE_BITSIZE (mode)))
665 : truncated_to_mode (mode, op))
666 && ! (GET_CODE (op) == LSHIFTRT
667 && GET_CODE (XEXP (op, 0)) == MULT))
668 return rtl_hooks.gen_lowpart_no_emit (mode, op);
670 /* A truncate of a comparison can be replaced with a subreg if
671 STORE_FLAG_VALUE permits. This is like the previous test,
672 but it works even if the comparison is done in a mode larger
673 than HOST_BITS_PER_WIDE_INT. */
674 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
676 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
677 return rtl_hooks.gen_lowpart_no_emit (mode, op);
681 if (DECIMAL_FLOAT_MODE_P (mode))
684 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
685 if (GET_CODE (op) == FLOAT_EXTEND
686 && GET_MODE (XEXP (op, 0)) == mode)
689 /* (float_truncate:SF (float_truncate:DF foo:XF))
690 = (float_truncate:SF foo:XF).
691 This may eliminate double rounding, so it is unsafe.
693 (float_truncate:SF (float_extend:XF foo:DF))
694 = (float_truncate:SF foo:DF).
696 (float_truncate:DF (float_extend:XF foo:SF))
697 = (float_extend:SF foo:DF). */
698 if ((GET_CODE (op) == FLOAT_TRUNCATE
699 && flag_unsafe_math_optimizations)
700 || GET_CODE (op) == FLOAT_EXTEND)
701 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
703 > GET_MODE_SIZE (mode)
704 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
708 /* (float_truncate (float x)) is (float x) */
709 if (GET_CODE (op) == FLOAT
710 && (flag_unsafe_math_optimizations
711 || ((unsigned)significand_size (GET_MODE (op))
712 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
713 - num_sign_bit_copies (XEXP (op, 0),
714 GET_MODE (XEXP (op, 0)))))))
715 return simplify_gen_unary (FLOAT, mode,
717 GET_MODE (XEXP (op, 0)));
719 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
720 (OP:SF foo:SF) if OP is NEG or ABS. */
721 if ((GET_CODE (op) == ABS
722 || GET_CODE (op) == NEG)
723 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
724 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
725 return simplify_gen_unary (GET_CODE (op), mode,
726 XEXP (XEXP (op, 0), 0), mode);
728 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
729 is (float_truncate:SF x). */
730 if (GET_CODE (op) == SUBREG
731 && subreg_lowpart_p (op)
732 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
733 return SUBREG_REG (op);
737 if (DECIMAL_FLOAT_MODE_P (mode))
740 /* (float_extend (float_extend x)) is (float_extend x)
742 (float_extend (float x)) is (float x) assuming that double
743 rounding can't happen.
745 if (GET_CODE (op) == FLOAT_EXTEND
746 || (GET_CODE (op) == FLOAT
747 && ((unsigned)significand_size (GET_MODE (op))
748 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
749 - num_sign_bit_copies (XEXP (op, 0),
750 GET_MODE (XEXP (op, 0)))))))
751 return simplify_gen_unary (GET_CODE (op), mode,
753 GET_MODE (XEXP (op, 0)));
758 /* (abs (neg <foo>)) -> (abs <foo>) */
759 if (GET_CODE (op) == NEG)
760 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
761 GET_MODE (XEXP (op, 0)));
763 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
765 if (GET_MODE (op) == VOIDmode)
768 /* If operand is something known to be positive, ignore the ABS. */
769 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
770 || ((GET_MODE_BITSIZE (GET_MODE (op))
771 <= HOST_BITS_PER_WIDE_INT)
772 && ((nonzero_bits (op, GET_MODE (op))
774 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
778 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
779 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
780 return gen_rtx_NEG (mode, op);
785 /* (ffs (*_extend <X>)) = (ffs <X>) */
786 if (GET_CODE (op) == SIGN_EXTEND
787 || GET_CODE (op) == ZERO_EXTEND)
788 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
789 GET_MODE (XEXP (op, 0)));
794 /* (pop* (zero_extend <X>)) = (pop* <X>) */
795 if (GET_CODE (op) == ZERO_EXTEND)
796 return simplify_gen_unary (code, mode, XEXP (op, 0),
797 GET_MODE (XEXP (op, 0)));
801 /* (float (sign_extend <X>)) = (float <X>). */
802 if (GET_CODE (op) == SIGN_EXTEND)
803 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
804 GET_MODE (XEXP (op, 0)));
808 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
809 becomes just the MINUS if its mode is MODE. This allows
810 folding switch statements on machines using casesi (such as
812 if (GET_CODE (op) == TRUNCATE
813 && GET_MODE (XEXP (op, 0)) == mode
814 && GET_CODE (XEXP (op, 0)) == MINUS
815 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
816 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
819 /* Check for a sign extension of a subreg of a promoted
820 variable, where the promotion is sign-extended, and the
821 target mode is the same as the variable's promotion. */
822 if (GET_CODE (op) == SUBREG
823 && SUBREG_PROMOTED_VAR_P (op)
824 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
825 && GET_MODE (XEXP (op, 0)) == mode)
828 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
829 if (! POINTERS_EXTEND_UNSIGNED
830 && mode == Pmode && GET_MODE (op) == ptr_mode
832 || (GET_CODE (op) == SUBREG
833 && REG_P (SUBREG_REG (op))
834 && REG_POINTER (SUBREG_REG (op))
835 && GET_MODE (SUBREG_REG (op)) == Pmode)))
836 return convert_memory_address (Pmode, op);
841 /* Check for a zero extension of a subreg of a promoted
842 variable, where the promotion is zero-extended, and the
843 target mode is the same as the variable's promotion. */
844 if (GET_CODE (op) == SUBREG
845 && SUBREG_PROMOTED_VAR_P (op)
846 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
847 && GET_MODE (XEXP (op, 0)) == mode)
850 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
851 if (POINTERS_EXTEND_UNSIGNED > 0
852 && mode == Pmode && GET_MODE (op) == ptr_mode
854 || (GET_CODE (op) == SUBREG
855 && REG_P (SUBREG_REG (op))
856 && REG_POINTER (SUBREG_REG (op))
857 && GET_MODE (SUBREG_REG (op)) == Pmode)))
858 return convert_memory_address (Pmode, op);
869 /* Try to compute the value of a unary operation CODE whose output mode is to
870 be MODE with input operand OP whose mode was originally OP_MODE.
871 Return zero if the value cannot be computed. */
873 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
874 rtx op, enum machine_mode op_mode)
876 unsigned int width = GET_MODE_BITSIZE (mode);
878 if (code == VEC_DUPLICATE)
880 gcc_assert (VECTOR_MODE_P (mode));
881 if (GET_MODE (op) != VOIDmode)
883 if (!VECTOR_MODE_P (GET_MODE (op)))
884 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
886 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
889 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
890 || GET_CODE (op) == CONST_VECTOR)
892 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
893 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
894 rtvec v = rtvec_alloc (n_elts);
897 if (GET_CODE (op) != CONST_VECTOR)
898 for (i = 0; i < n_elts; i++)
899 RTVEC_ELT (v, i) = op;
902 enum machine_mode inmode = GET_MODE (op);
903 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
904 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
906 gcc_assert (in_n_elts < n_elts);
907 gcc_assert ((n_elts % in_n_elts) == 0);
908 for (i = 0; i < n_elts; i++)
909 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
911 return gen_rtx_CONST_VECTOR (mode, v);
915 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
917 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
918 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
919 enum machine_mode opmode = GET_MODE (op);
920 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
921 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
922 rtvec v = rtvec_alloc (n_elts);
925 gcc_assert (op_n_elts == n_elts);
926 for (i = 0; i < n_elts; i++)
928 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
929 CONST_VECTOR_ELT (op, i),
930 GET_MODE_INNER (opmode));
933 RTVEC_ELT (v, i) = x;
935 return gen_rtx_CONST_VECTOR (mode, v);
938 /* The order of these tests is critical so that, for example, we don't
939 check the wrong mode (input vs. output) for a conversion operation,
940 such as FIX. At some point, this should be simplified. */
942 if (code == FLOAT && GET_MODE (op) == VOIDmode
943 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
945 HOST_WIDE_INT hv, lv;
948 if (GET_CODE (op) == CONST_INT)
949 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
951 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
953 REAL_VALUE_FROM_INT (d, lv, hv, mode);
954 d = real_value_truncate (mode, d);
955 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
957 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
958 && (GET_CODE (op) == CONST_DOUBLE
959 || GET_CODE (op) == CONST_INT))
961 HOST_WIDE_INT hv, lv;
964 if (GET_CODE (op) == CONST_INT)
965 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
967 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
969 if (op_mode == VOIDmode)
971 /* We don't know how to interpret negative-looking numbers in
972 this case, so don't try to fold those. */
976 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
979 hv = 0, lv &= GET_MODE_MASK (op_mode);
981 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
982 d = real_value_truncate (mode, d);
983 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
986 if (GET_CODE (op) == CONST_INT
987 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
989 HOST_WIDE_INT arg0 = INTVAL (op);
1003 val = (arg0 >= 0 ? arg0 : - arg0);
1007 /* Don't use ffs here. Instead, get low order bit and then its
1008 number. If arg0 is zero, this will return 0, as desired. */
1009 arg0 &= GET_MODE_MASK (mode);
1010 val = exact_log2 (arg0 & (- arg0)) + 1;
1014 arg0 &= GET_MODE_MASK (mode);
1015 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1018 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1022 arg0 &= GET_MODE_MASK (mode);
1025 /* Even if the value at zero is undefined, we have to come
1026 up with some replacement. Seems good enough. */
1027 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1028 val = GET_MODE_BITSIZE (mode);
1031 val = exact_log2 (arg0 & -arg0);
1035 arg0 &= GET_MODE_MASK (mode);
1038 val++, arg0 &= arg0 - 1;
1042 arg0 &= GET_MODE_MASK (mode);
1045 val++, arg0 &= arg0 - 1;
1057 /* When zero-extending a CONST_INT, we need to know its
1059 gcc_assert (op_mode != VOIDmode);
1060 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1062 /* If we were really extending the mode,
1063 we would have to distinguish between zero-extension
1064 and sign-extension. */
1065 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1068 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1069 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1075 if (op_mode == VOIDmode)
1077 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1079 /* If we were really extending the mode,
1080 we would have to distinguish between zero-extension
1081 and sign-extension. */
1082 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1085 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1088 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1090 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1091 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1099 case FLOAT_TRUNCATE:
1109 return gen_int_mode (val, mode);
1112 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1113 for a DImode operation on a CONST_INT. */
1114 else if (GET_MODE (op) == VOIDmode
1115 && width <= HOST_BITS_PER_WIDE_INT * 2
1116 && (GET_CODE (op) == CONST_DOUBLE
1117 || GET_CODE (op) == CONST_INT))
1119 unsigned HOST_WIDE_INT l1, lv;
1120 HOST_WIDE_INT h1, hv;
1122 if (GET_CODE (op) == CONST_DOUBLE)
1123 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1125 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1135 neg_double (l1, h1, &lv, &hv);
1140 neg_double (l1, h1, &lv, &hv);
1152 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1155 lv = exact_log2 (l1 & -l1) + 1;
1161 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1162 - HOST_BITS_PER_WIDE_INT;
1164 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1165 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1166 lv = GET_MODE_BITSIZE (mode);
1172 lv = exact_log2 (l1 & -l1);
1174 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1175 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1176 lv = GET_MODE_BITSIZE (mode);
1199 /* This is just a change-of-mode, so do nothing. */
1204 gcc_assert (op_mode != VOIDmode);
1206 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1210 lv = l1 & GET_MODE_MASK (op_mode);
1214 if (op_mode == VOIDmode
1215 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1219 lv = l1 & GET_MODE_MASK (op_mode);
1220 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1221 && (lv & ((HOST_WIDE_INT) 1
1222 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1223 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1225 hv = HWI_SIGN_EXTEND (lv);
1236 return immed_double_const (lv, hv, mode);
1239 else if (GET_CODE (op) == CONST_DOUBLE
1240 && SCALAR_FLOAT_MODE_P (mode))
1242 REAL_VALUE_TYPE d, t;
1243 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1248 if (HONOR_SNANS (mode) && real_isnan (&d))
1250 real_sqrt (&t, mode, &d);
1254 d = REAL_VALUE_ABS (d);
1257 d = REAL_VALUE_NEGATE (d);
1259 case FLOAT_TRUNCATE:
1260 d = real_value_truncate (mode, d);
1263 /* All this does is change the mode. */
1266 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1273 real_to_target (tmp, &d, GET_MODE (op));
1274 for (i = 0; i < 4; i++)
1276 real_from_target (&d, tmp, mode);
1282 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1285 else if (GET_CODE (op) == CONST_DOUBLE
1286 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1287 && GET_MODE_CLASS (mode) == MODE_INT
1288 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1290 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1291 operators are intentionally left unspecified (to ease implementation
1292 by target backends), for consistency, this routine implements the
1293 same semantics for constant folding as used by the middle-end. */
1295 /* This was formerly used only for non-IEEE float.
1296 eggert@twinsun.com says it is safe for IEEE also. */
1297 HOST_WIDE_INT xh, xl, th, tl;
1298 REAL_VALUE_TYPE x, t;
1299 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1303 if (REAL_VALUE_ISNAN (x))
1306 /* Test against the signed upper bound. */
1307 if (width > HOST_BITS_PER_WIDE_INT)
1309 th = ((unsigned HOST_WIDE_INT) 1
1310 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1316 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1318 real_from_integer (&t, VOIDmode, tl, th, 0);
1319 if (REAL_VALUES_LESS (t, x))
1326 /* Test against the signed lower bound. */
1327 if (width > HOST_BITS_PER_WIDE_INT)
1329 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1335 tl = (HOST_WIDE_INT) -1 << (width - 1);
1337 real_from_integer (&t, VOIDmode, tl, th, 0);
1338 if (REAL_VALUES_LESS (x, t))
1344 REAL_VALUE_TO_INT (&xl, &xh, x);
1348 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1351 /* Test against the unsigned upper bound. */
1352 if (width == 2*HOST_BITS_PER_WIDE_INT)
1357 else if (width >= HOST_BITS_PER_WIDE_INT)
1359 th = ((unsigned HOST_WIDE_INT) 1
1360 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1366 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1368 real_from_integer (&t, VOIDmode, tl, th, 1);
1369 if (REAL_VALUES_LESS (t, x))
1376 REAL_VALUE_TO_INT (&xl, &xh, x);
1382 return immed_double_const (xl, xh, mode);
1388 /* Subroutine of simplify_binary_operation to simplify a commutative,
1389 associative binary operation CODE with result mode MODE, operating
1390 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1391 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1392 canonicalization is possible. */
1395 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1400 /* Linearize the operator to the left. */
1401 if (GET_CODE (op1) == code)
1403 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1404 if (GET_CODE (op0) == code)
1406 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1407 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1410 /* "a op (b op c)" becomes "(b op c) op a". */
1411 if (! swap_commutative_operands_p (op1, op0))
1412 return simplify_gen_binary (code, mode, op1, op0);
1419 if (GET_CODE (op0) == code)
1421 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1422 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1424 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1425 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1428 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1429 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1430 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1431 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1433 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1435 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1436 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1437 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1438 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1440 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1447 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1448 and OP1. Return 0 if no simplification is possible.
1450 Don't use this for relational operations such as EQ or LT.
1451 Use simplify_relational_operation instead. */
1453 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1456 rtx trueop0, trueop1;
1459 /* Relational operations don't work here. We must know the mode
1460 of the operands in order to do the comparison correctly.
1461 Assuming a full word can give incorrect results.
1462 Consider comparing 128 with -128 in QImode. */
1463 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1464 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1466 /* Make sure the constant is second. */
1467 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1468 && swap_commutative_operands_p (op0, op1))
1470 tem = op0, op0 = op1, op1 = tem;
1473 trueop0 = avoid_constant_pool_reference (op0);
1474 trueop1 = avoid_constant_pool_reference (op1);
1476 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1479 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1482 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1483 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1484 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1485 actual constants. */
1488 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1489 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1491 rtx tem, reversed, opleft, opright;
1493 unsigned int width = GET_MODE_BITSIZE (mode);
1495 /* Even if we can't compute a constant result,
1496 there are some cases worth simplifying. */
1501 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1502 when x is NaN, infinite, or finite and nonzero. They aren't
1503 when x is -0 and the rounding mode is not towards -infinity,
1504 since (-0) + 0 is then 0. */
1505 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1508 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1509 transformations are safe even for IEEE. */
1510 if (GET_CODE (op0) == NEG)
1511 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1512 else if (GET_CODE (op1) == NEG)
1513 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1515 /* (~a) + 1 -> -a */
1516 if (INTEGRAL_MODE_P (mode)
1517 && GET_CODE (op0) == NOT
1518 && trueop1 == const1_rtx)
1519 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1521 /* Handle both-operands-constant cases. We can only add
1522 CONST_INTs to constants since the sum of relocatable symbols
1523 can't be handled by most assemblers. Don't add CONST_INT
1524 to CONST_INT since overflow won't be computed properly if wider
1525 than HOST_BITS_PER_WIDE_INT. */
1527 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1528 && GET_CODE (op1) == CONST_INT)
1529 return plus_constant (op0, INTVAL (op1));
1530 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1531 && GET_CODE (op0) == CONST_INT)
1532 return plus_constant (op1, INTVAL (op0));
1534 /* See if this is something like X * C - X or vice versa or
1535 if the multiplication is written as a shift. If so, we can
1536 distribute and make a new multiply, shift, or maybe just
1537 have X (if C is 2 in the example above). But don't make
1538 something more expensive than we had before. */
1540 if (SCALAR_INT_MODE_P (mode))
1542 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1543 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1544 rtx lhs = op0, rhs = op1;
1546 if (GET_CODE (lhs) == NEG)
1550 lhs = XEXP (lhs, 0);
1552 else if (GET_CODE (lhs) == MULT
1553 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1555 coeff0l = INTVAL (XEXP (lhs, 1));
1556 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1557 lhs = XEXP (lhs, 0);
1559 else if (GET_CODE (lhs) == ASHIFT
1560 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1561 && INTVAL (XEXP (lhs, 1)) >= 0
1562 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1564 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1566 lhs = XEXP (lhs, 0);
1569 if (GET_CODE (rhs) == NEG)
1573 rhs = XEXP (rhs, 0);
1575 else if (GET_CODE (rhs) == MULT
1576 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1578 coeff1l = INTVAL (XEXP (rhs, 1));
1579 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1580 rhs = XEXP (rhs, 0);
1582 else if (GET_CODE (rhs) == ASHIFT
1583 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1584 && INTVAL (XEXP (rhs, 1)) >= 0
1585 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1587 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1589 rhs = XEXP (rhs, 0);
1592 if (rtx_equal_p (lhs, rhs))
1594 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1596 unsigned HOST_WIDE_INT l;
1599 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1600 coeff = immed_double_const (l, h, mode);
1602 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1603 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1608 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1609 if ((GET_CODE (op1) == CONST_INT
1610 || GET_CODE (op1) == CONST_DOUBLE)
1611 && GET_CODE (op0) == XOR
1612 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1613 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1614 && mode_signbit_p (mode, op1))
1615 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1616 simplify_gen_binary (XOR, mode, op1,
1619 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1620 if (GET_CODE (op0) == MULT
1621 && GET_CODE (XEXP (op0, 0)) == NEG)
1625 in1 = XEXP (XEXP (op0, 0), 0);
1626 in2 = XEXP (op0, 1);
1627 return simplify_gen_binary (MINUS, mode, op1,
1628 simplify_gen_binary (MULT, mode,
1632 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1633 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1635 if (COMPARISON_P (op0)
1636 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1637 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1638 && (reversed = reversed_comparison (op0, mode)))
1640 simplify_gen_unary (NEG, mode, reversed, mode);
1642 /* If one of the operands is a PLUS or a MINUS, see if we can
1643 simplify this by the associative law.
1644 Don't use the associative law for floating point.
1645 The inaccuracy makes it nonassociative,
1646 and subtle programs can break if operations are associated. */
1648 if (INTEGRAL_MODE_P (mode)
1649 && (plus_minus_operand_p (op0)
1650 || plus_minus_operand_p (op1))
1651 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1654 /* Reassociate floating point addition only when the user
1655 specifies unsafe math optimizations. */
1656 if (FLOAT_MODE_P (mode)
1657 && flag_unsafe_math_optimizations)
1659 tem = simplify_associative_operation (code, mode, op0, op1);
1667 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1668 using cc0, in which case we want to leave it as a COMPARE
1669 so we can distinguish it from a register-register-copy.
1671 In IEEE floating point, x-0 is not the same as x. */
1673 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1674 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1675 && trueop1 == CONST0_RTX (mode))
1679 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1680 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1681 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1682 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1684 rtx xop00 = XEXP (op0, 0);
1685 rtx xop10 = XEXP (op1, 0);
1688 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1690 if (REG_P (xop00) && REG_P (xop10)
1691 && GET_MODE (xop00) == GET_MODE (xop10)
1692 && REGNO (xop00) == REGNO (xop10)
1693 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1694 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1701 /* We can't assume x-x is 0 even with non-IEEE floating point,
1702 but since it is zero except in very strange circumstances, we
1703 will treat it as zero with -funsafe-math-optimizations. */
1704 if (rtx_equal_p (trueop0, trueop1)
1705 && ! side_effects_p (op0)
1706 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1707 return CONST0_RTX (mode);
1709 /* Change subtraction from zero into negation. (0 - x) is the
1710 same as -x when x is NaN, infinite, or finite and nonzero.
1711 But if the mode has signed zeros, and does not round towards
1712 -infinity, then 0 - 0 is 0, not -0. */
1713 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1714 return simplify_gen_unary (NEG, mode, op1, mode);
1716 /* (-1 - a) is ~a. */
1717 if (trueop0 == constm1_rtx)
1718 return simplify_gen_unary (NOT, mode, op1, mode);
1720 /* Subtracting 0 has no effect unless the mode has signed zeros
1721 and supports rounding towards -infinity. In such a case,
1723 if (!(HONOR_SIGNED_ZEROS (mode)
1724 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1725 && trueop1 == CONST0_RTX (mode))
1728 /* See if this is something like X * C - X or vice versa or
1729 if the multiplication is written as a shift. If so, we can
1730 distribute and make a new multiply, shift, or maybe just
1731 have X (if C is 2 in the example above). But don't make
1732 something more expensive than we had before. */
1734 if (SCALAR_INT_MODE_P (mode))
1736 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1737 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1738 rtx lhs = op0, rhs = op1;
1740 if (GET_CODE (lhs) == NEG)
1744 lhs = XEXP (lhs, 0);
1746 else if (GET_CODE (lhs) == MULT
1747 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1749 coeff0l = INTVAL (XEXP (lhs, 1));
1750 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1751 lhs = XEXP (lhs, 0);
1753 else if (GET_CODE (lhs) == ASHIFT
1754 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1755 && INTVAL (XEXP (lhs, 1)) >= 0
1756 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1758 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1760 lhs = XEXP (lhs, 0);
1763 if (GET_CODE (rhs) == NEG)
1767 rhs = XEXP (rhs, 0);
1769 else if (GET_CODE (rhs) == MULT
1770 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1772 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1773 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1774 rhs = XEXP (rhs, 0);
1776 else if (GET_CODE (rhs) == ASHIFT
1777 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1778 && INTVAL (XEXP (rhs, 1)) >= 0
1779 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1781 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1783 rhs = XEXP (rhs, 0);
1786 if (rtx_equal_p (lhs, rhs))
1788 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1790 unsigned HOST_WIDE_INT l;
1793 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1794 coeff = immed_double_const (l, h, mode);
1796 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1797 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1802 /* (a - (-b)) -> (a + b). True even for IEEE. */
1803 if (GET_CODE (op1) == NEG)
1804 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1806 /* (-x - c) may be simplified as (-c - x). */
1807 if (GET_CODE (op0) == NEG
1808 && (GET_CODE (op1) == CONST_INT
1809 || GET_CODE (op1) == CONST_DOUBLE))
1811 tem = simplify_unary_operation (NEG, mode, op1, mode);
1813 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1816 /* Don't let a relocatable value get a negative coeff. */
1817 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1818 return simplify_gen_binary (PLUS, mode,
1820 neg_const_int (mode, op1));
1822 /* (x - (x & y)) -> (x & ~y) */
1823 if (GET_CODE (op1) == AND)
1825 if (rtx_equal_p (op0, XEXP (op1, 0)))
1827 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1828 GET_MODE (XEXP (op1, 1)));
1829 return simplify_gen_binary (AND, mode, op0, tem);
1831 if (rtx_equal_p (op0, XEXP (op1, 1)))
1833 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1834 GET_MODE (XEXP (op1, 0)));
1835 return simplify_gen_binary (AND, mode, op0, tem);
1839 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1840 by reversing the comparison code if valid. */
1841 if (STORE_FLAG_VALUE == 1
1842 && trueop0 == const1_rtx
1843 && COMPARISON_P (op1)
1844 && (reversed = reversed_comparison (op1, mode)))
1847 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1848 if (GET_CODE (op1) == MULT
1849 && GET_CODE (XEXP (op1, 0)) == NEG)
1853 in1 = XEXP (XEXP (op1, 0), 0);
1854 in2 = XEXP (op1, 1);
1855 return simplify_gen_binary (PLUS, mode,
1856 simplify_gen_binary (MULT, mode,
1861 /* Canonicalize (minus (neg A) (mult B C)) to
1862 (minus (mult (neg B) C) A). */
1863 if (GET_CODE (op1) == MULT
1864 && GET_CODE (op0) == NEG)
1868 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1869 in2 = XEXP (op1, 1);
1870 return simplify_gen_binary (MINUS, mode,
1871 simplify_gen_binary (MULT, mode,
1876 /* If one of the operands is a PLUS or a MINUS, see if we can
1877 simplify this by the associative law. This will, for example,
1878 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1879 Don't use the associative law for floating point.
1880 The inaccuracy makes it nonassociative,
1881 and subtle programs can break if operations are associated. */
1883 if (INTEGRAL_MODE_P (mode)
1884 && (plus_minus_operand_p (op0)
1885 || plus_minus_operand_p (op1))
1886 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1891 if (trueop1 == constm1_rtx)
1892 return simplify_gen_unary (NEG, mode, op0, mode);
1894 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1895 x is NaN, since x * 0 is then also NaN. Nor is it valid
1896 when the mode has signed zeros, since multiplying a negative
1897 number by 0 will give -0, not 0. */
1898 if (!HONOR_NANS (mode)
1899 && !HONOR_SIGNED_ZEROS (mode)
1900 && trueop1 == CONST0_RTX (mode)
1901 && ! side_effects_p (op0))
1904 /* In IEEE floating point, x*1 is not equivalent to x for
1906 if (!HONOR_SNANS (mode)
1907 && trueop1 == CONST1_RTX (mode))
1910 /* Convert multiply by constant power of two into shift unless
1911 we are still generating RTL. This test is a kludge. */
1912 if (GET_CODE (trueop1) == CONST_INT
1913 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1914 /* If the mode is larger than the host word size, and the
1915 uppermost bit is set, then this isn't a power of two due
1916 to implicit sign extension. */
1917 && (width <= HOST_BITS_PER_WIDE_INT
1918 || val != HOST_BITS_PER_WIDE_INT - 1))
1919 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1921 /* Likewise for multipliers wider than a word. */
1922 if (GET_CODE (trueop1) == CONST_DOUBLE
1923 && (GET_MODE (trueop1) == VOIDmode
1924 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1925 && GET_MODE (op0) == mode
1926 && CONST_DOUBLE_LOW (trueop1) == 0
1927 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1928 return simplify_gen_binary (ASHIFT, mode, op0,
1929 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1931 /* x*2 is x+x and x*(-1) is -x */
1932 if (GET_CODE (trueop1) == CONST_DOUBLE
1933 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1934 && GET_MODE (op0) == mode)
1937 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1939 if (REAL_VALUES_EQUAL (d, dconst2))
1940 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1942 if (!HONOR_SNANS (mode)
1943 && REAL_VALUES_EQUAL (d, dconstm1))
1944 return simplify_gen_unary (NEG, mode, op0, mode);
1947 /* Optimize -x * -x as x * x. */
1948 if (FLOAT_MODE_P (mode)
1949 && GET_CODE (op0) == NEG
1950 && GET_CODE (op1) == NEG
1951 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1952 && !side_effects_p (XEXP (op0, 0)))
1953 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1955 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1956 if (SCALAR_FLOAT_MODE_P (mode)
1957 && GET_CODE (op0) == ABS
1958 && GET_CODE (op1) == ABS
1959 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1960 && !side_effects_p (XEXP (op0, 0)))
1961 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1963 /* Reassociate multiplication, but for floating point MULTs
1964 only when the user specifies unsafe math optimizations. */
1965 if (! FLOAT_MODE_P (mode)
1966 || flag_unsafe_math_optimizations)
1968 tem = simplify_associative_operation (code, mode, op0, op1);
1975 if (trueop1 == const0_rtx)
1977 if (GET_CODE (trueop1) == CONST_INT
1978 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1979 == GET_MODE_MASK (mode)))
1981 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1983 /* A | (~A) -> -1 */
1984 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1985 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1986 && ! side_effects_p (op0)
1987 && SCALAR_INT_MODE_P (mode))
1990 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1991 if (GET_CODE (op1) == CONST_INT
1992 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1993 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1996 /* Convert (A & B) | A to A. */
1997 if (GET_CODE (op0) == AND
1998 && (rtx_equal_p (XEXP (op0, 0), op1)
1999 || rtx_equal_p (XEXP (op0, 1), op1))
2000 && ! side_effects_p (XEXP (op0, 0))
2001 && ! side_effects_p (XEXP (op0, 1)))
2004 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2005 mode size to (rotate A CX). */
2007 if (GET_CODE (op1) == ASHIFT
2008 || GET_CODE (op1) == SUBREG)
2019 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2020 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2021 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2022 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2023 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2024 == GET_MODE_BITSIZE (mode)))
2025 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2027 /* Same, but for ashift that has been "simplified" to a wider mode
2028 by simplify_shift_const. */
2030 if (GET_CODE (opleft) == SUBREG
2031 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2032 && GET_CODE (opright) == LSHIFTRT
2033 && GET_CODE (XEXP (opright, 0)) == SUBREG
2034 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2035 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2036 && (GET_MODE_SIZE (GET_MODE (opleft))
2037 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2038 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2039 SUBREG_REG (XEXP (opright, 0)))
2040 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2041 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2042 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2043 == GET_MODE_BITSIZE (mode)))
2044 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2045 XEXP (SUBREG_REG (opleft), 1));
2047 /* If we have (ior (and (X C1) C2)), simplify this by making
2048 C1 as small as possible if C1 actually changes. */
2049 if (GET_CODE (op1) == CONST_INT
2050 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2051 || INTVAL (op1) > 0)
2052 && GET_CODE (op0) == AND
2053 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2054 && GET_CODE (op1) == CONST_INT
2055 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2056 return simplify_gen_binary (IOR, mode,
2058 (AND, mode, XEXP (op0, 0),
2059 GEN_INT (INTVAL (XEXP (op0, 1))
2063 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2064 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2065 the PLUS does not affect any of the bits in OP1: then we can do
2066 the IOR as a PLUS and we can associate. This is valid if OP1
2067 can be safely shifted left C bits. */
2068 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2069 && GET_CODE (XEXP (op0, 0)) == PLUS
2070 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2071 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2072 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2074 int count = INTVAL (XEXP (op0, 1));
2075 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2077 if (mask >> count == INTVAL (trueop1)
2078 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2079 return simplify_gen_binary (ASHIFTRT, mode,
2080 plus_constant (XEXP (op0, 0), mask),
2084 tem = simplify_associative_operation (code, mode, op0, op1);
2090 if (trueop1 == const0_rtx)
2092 if (GET_CODE (trueop1) == CONST_INT
2093 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2094 == GET_MODE_MASK (mode)))
2095 return simplify_gen_unary (NOT, mode, op0, mode);
2096 if (rtx_equal_p (trueop0, trueop1)
2097 && ! side_effects_p (op0)
2098 && GET_MODE_CLASS (mode) != MODE_CC)
2099 return CONST0_RTX (mode);
2101 /* Canonicalize XOR of the most significant bit to PLUS. */
2102 if ((GET_CODE (op1) == CONST_INT
2103 || GET_CODE (op1) == CONST_DOUBLE)
2104 && mode_signbit_p (mode, op1))
2105 return simplify_gen_binary (PLUS, mode, op0, op1);
2106 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2107 if ((GET_CODE (op1) == CONST_INT
2108 || GET_CODE (op1) == CONST_DOUBLE)
2109 && GET_CODE (op0) == PLUS
2110 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2111 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2112 && mode_signbit_p (mode, XEXP (op0, 1)))
2113 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2114 simplify_gen_binary (XOR, mode, op1,
2117 /* If we are XORing two things that have no bits in common,
2118 convert them into an IOR. This helps to detect rotation encoded
2119 using those methods and possibly other simplifications. */
2121 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2122 && (nonzero_bits (op0, mode)
2123 & nonzero_bits (op1, mode)) == 0)
2124 return (simplify_gen_binary (IOR, mode, op0, op1));
2126 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2127 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2130 int num_negated = 0;
2132 if (GET_CODE (op0) == NOT)
2133 num_negated++, op0 = XEXP (op0, 0);
2134 if (GET_CODE (op1) == NOT)
2135 num_negated++, op1 = XEXP (op1, 0);
2137 if (num_negated == 2)
2138 return simplify_gen_binary (XOR, mode, op0, op1);
2139 else if (num_negated == 1)
2140 return simplify_gen_unary (NOT, mode,
2141 simplify_gen_binary (XOR, mode, op0, op1),
2145 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2146 correspond to a machine insn or result in further simplifications
2147 if B is a constant. */
2149 if (GET_CODE (op0) == AND
2150 && rtx_equal_p (XEXP (op0, 1), op1)
2151 && ! side_effects_p (op1))
2152 return simplify_gen_binary (AND, mode,
2153 simplify_gen_unary (NOT, mode,
2154 XEXP (op0, 0), mode),
2157 else if (GET_CODE (op0) == AND
2158 && rtx_equal_p (XEXP (op0, 0), op1)
2159 && ! side_effects_p (op1))
2160 return simplify_gen_binary (AND, mode,
2161 simplify_gen_unary (NOT, mode,
2162 XEXP (op0, 1), mode),
2165 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2166 comparison if STORE_FLAG_VALUE is 1. */
2167 if (STORE_FLAG_VALUE == 1
2168 && trueop1 == const1_rtx
2169 && COMPARISON_P (op0)
2170 && (reversed = reversed_comparison (op0, mode)))
2173 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2174 is (lt foo (const_int 0)), so we can perform the above
2175 simplification if STORE_FLAG_VALUE is 1. */
2177 if (STORE_FLAG_VALUE == 1
2178 && trueop1 == const1_rtx
2179 && GET_CODE (op0) == LSHIFTRT
2180 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2181 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2182 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2184 /* (xor (comparison foo bar) (const_int sign-bit))
2185 when STORE_FLAG_VALUE is the sign bit. */
2186 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2187 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2188 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2189 && trueop1 == const_true_rtx
2190 && COMPARISON_P (op0)
2191 && (reversed = reversed_comparison (op0, mode)))
2196 tem = simplify_associative_operation (code, mode, op0, op1);
2202 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2204 /* If we are turning off bits already known off in OP0, we need
2206 if (GET_CODE (trueop1) == CONST_INT
2207 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2208 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2210 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2211 && GET_MODE_CLASS (mode) != MODE_CC)
2214 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2215 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2216 && ! side_effects_p (op0)
2217 && GET_MODE_CLASS (mode) != MODE_CC)
2218 return CONST0_RTX (mode);
2220 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2221 there are no nonzero bits of C outside of X's mode. */
2222 if ((GET_CODE (op0) == SIGN_EXTEND
2223 || GET_CODE (op0) == ZERO_EXTEND)
2224 && GET_CODE (trueop1) == CONST_INT
2225 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2226 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2227 & INTVAL (trueop1)) == 0)
2229 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2230 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2231 gen_int_mode (INTVAL (trueop1),
2233 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2236 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2237 insn (and may simplify more). */
2238 if (GET_CODE (op0) == XOR
2239 && rtx_equal_p (XEXP (op0, 0), op1)
2240 && ! side_effects_p (op1))
2241 return simplify_gen_binary (AND, mode,
2242 simplify_gen_unary (NOT, mode,
2243 XEXP (op0, 1), mode),
2246 if (GET_CODE (op0) == XOR
2247 && rtx_equal_p (XEXP (op0, 1), op1)
2248 && ! side_effects_p (op1))
2249 return simplify_gen_binary (AND, mode,
2250 simplify_gen_unary (NOT, mode,
2251 XEXP (op0, 0), mode),
2254 /* Similarly for (~(A ^ B)) & A. */
2255 if (GET_CODE (op0) == NOT
2256 && GET_CODE (XEXP (op0, 0)) == XOR
2257 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2258 && ! side_effects_p (op1))
2259 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2261 if (GET_CODE (op0) == NOT
2262 && GET_CODE (XEXP (op0, 0)) == XOR
2263 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2264 && ! side_effects_p (op1))
2265 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2267 /* Convert (A | B) & A to A. */
2268 if (GET_CODE (op0) == IOR
2269 && (rtx_equal_p (XEXP (op0, 0), op1)
2270 || rtx_equal_p (XEXP (op0, 1), op1))
2271 && ! side_effects_p (XEXP (op0, 0))
2272 && ! side_effects_p (XEXP (op0, 1)))
2275 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2276 ((A & N) + B) & M -> (A + B) & M
2277 Similarly if (N & M) == 0,
2278 ((A | N) + B) & M -> (A + B) & M
2279 and for - instead of + and/or ^ instead of |. */
2280 if (GET_CODE (trueop1) == CONST_INT
2281 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2282 && ~INTVAL (trueop1)
2283 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2284 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2289 pmop[0] = XEXP (op0, 0);
2290 pmop[1] = XEXP (op0, 1);
2292 for (which = 0; which < 2; which++)
2295 switch (GET_CODE (tem))
2298 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2299 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2300 == INTVAL (trueop1))
2301 pmop[which] = XEXP (tem, 0);
2305 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2306 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2307 pmop[which] = XEXP (tem, 0);
2314 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2316 tem = simplify_gen_binary (GET_CODE (op0), mode,
2318 return simplify_gen_binary (code, mode, tem, op1);
2321 tem = simplify_associative_operation (code, mode, op0, op1);
2327 /* 0/x is 0 (or x&0 if x has side-effects). */
2328 if (trueop0 == CONST0_RTX (mode))
2330 if (side_effects_p (op1))
2331 return simplify_gen_binary (AND, mode, op1, trueop0);
2335 if (trueop1 == CONST1_RTX (mode))
2336 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2337 /* Convert divide by power of two into shift. */
2338 if (GET_CODE (trueop1) == CONST_INT
2339 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2340 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2344 /* Handle floating point and integers separately. */
2345 if (SCALAR_FLOAT_MODE_P (mode))
2347 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2348 safe for modes with NaNs, since 0.0 / 0.0 will then be
2349 NaN rather than 0.0. Nor is it safe for modes with signed
2350 zeros, since dividing 0 by a negative number gives -0.0 */
2351 if (trueop0 == CONST0_RTX (mode)
2352 && !HONOR_NANS (mode)
2353 && !HONOR_SIGNED_ZEROS (mode)
2354 && ! side_effects_p (op1))
2357 if (trueop1 == CONST1_RTX (mode)
2358 && !HONOR_SNANS (mode))
2361 if (GET_CODE (trueop1) == CONST_DOUBLE
2362 && trueop1 != CONST0_RTX (mode))
2365 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2368 if (REAL_VALUES_EQUAL (d, dconstm1)
2369 && !HONOR_SNANS (mode))
2370 return simplify_gen_unary (NEG, mode, op0, mode);
2372 /* Change FP division by a constant into multiplication.
2373 Only do this with -funsafe-math-optimizations. */
2374 if (flag_unsafe_math_optimizations
2375 && !REAL_VALUES_EQUAL (d, dconst0))
2377 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2378 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2379 return simplify_gen_binary (MULT, mode, op0, tem);
2385 /* 0/x is 0 (or x&0 if x has side-effects). */
2386 if (trueop0 == CONST0_RTX (mode))
2388 if (side_effects_p (op1))
2389 return simplify_gen_binary (AND, mode, op1, trueop0);
2393 if (trueop1 == CONST1_RTX (mode))
2394 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2396 if (trueop1 == constm1_rtx)
2398 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2399 return simplify_gen_unary (NEG, mode, x, mode);
2405 /* 0%x is 0 (or x&0 if x has side-effects). */
2406 if (trueop0 == CONST0_RTX (mode))
2408 if (side_effects_p (op1))
2409 return simplify_gen_binary (AND, mode, op1, trueop0);
2412 /* x%1 is 0 (of x&0 if x has side-effects). */
2413 if (trueop1 == CONST1_RTX (mode))
2415 if (side_effects_p (op0))
2416 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2417 return CONST0_RTX (mode);
2419 /* Implement modulus by power of two as AND. */
2420 if (GET_CODE (trueop1) == CONST_INT
2421 && exact_log2 (INTVAL (trueop1)) > 0)
2422 return simplify_gen_binary (AND, mode, op0,
2423 GEN_INT (INTVAL (op1) - 1));
2427 /* 0%x is 0 (or x&0 if x has side-effects). */
2428 if (trueop0 == CONST0_RTX (mode))
2430 if (side_effects_p (op1))
2431 return simplify_gen_binary (AND, mode, op1, trueop0);
2434 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2435 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2437 if (side_effects_p (op0))
2438 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2439 return CONST0_RTX (mode);
2446 if (trueop1 == CONST0_RTX (mode))
2448 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2450 /* Rotating ~0 always results in ~0. */
2451 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2452 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2453 && ! side_effects_p (op1))
2459 if (trueop1 == CONST0_RTX (mode))
2461 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2466 if (trueop1 == CONST0_RTX (mode))
2468 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2470 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2471 if (GET_CODE (op0) == CLZ
2472 && GET_CODE (trueop1) == CONST_INT
2473 && STORE_FLAG_VALUE == 1
2474 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2476 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2477 unsigned HOST_WIDE_INT zero_val = 0;
2479 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2480 && zero_val == GET_MODE_BITSIZE (imode)
2481 && INTVAL (trueop1) == exact_log2 (zero_val))
2482 return simplify_gen_relational (EQ, mode, imode,
2483 XEXP (op0, 0), const0_rtx);
2488 if (width <= HOST_BITS_PER_WIDE_INT
2489 && GET_CODE (trueop1) == CONST_INT
2490 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2491 && ! side_effects_p (op0))
2493 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2495 tem = simplify_associative_operation (code, mode, op0, op1);
2501 if (width <= HOST_BITS_PER_WIDE_INT
2502 && GET_CODE (trueop1) == CONST_INT
2503 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2504 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2505 && ! side_effects_p (op0))
2507 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2509 tem = simplify_associative_operation (code, mode, op0, op1);
2515 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2517 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2519 tem = simplify_associative_operation (code, mode, op0, op1);
2525 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2527 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2529 tem = simplify_associative_operation (code, mode, op0, op1);
2538 /* ??? There are simplifications that can be done. */
2542 if (!VECTOR_MODE_P (mode))
2544 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2545 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2546 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2547 gcc_assert (XVECLEN (trueop1, 0) == 1);
2548 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2550 if (GET_CODE (trueop0) == CONST_VECTOR)
2551 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2556 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2557 gcc_assert (GET_MODE_INNER (mode)
2558 == GET_MODE_INNER (GET_MODE (trueop0)));
2559 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2561 if (GET_CODE (trueop0) == CONST_VECTOR)
2563 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2564 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2565 rtvec v = rtvec_alloc (n_elts);
2568 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2569 for (i = 0; i < n_elts; i++)
2571 rtx x = XVECEXP (trueop1, 0, i);
2573 gcc_assert (GET_CODE (x) == CONST_INT);
2574 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2578 return gen_rtx_CONST_VECTOR (mode, v);
2582 if (XVECLEN (trueop1, 0) == 1
2583 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2584 && GET_CODE (trueop0) == VEC_CONCAT)
2587 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2589 /* Try to find the element in the VEC_CONCAT. */
2590 while (GET_MODE (vec) != mode
2591 && GET_CODE (vec) == VEC_CONCAT)
2593 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2594 if (offset < vec_size)
2595 vec = XEXP (vec, 0);
2599 vec = XEXP (vec, 1);
2601 vec = avoid_constant_pool_reference (vec);
2604 if (GET_MODE (vec) == mode)
2611 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2612 ? GET_MODE (trueop0)
2613 : GET_MODE_INNER (mode));
2614 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2615 ? GET_MODE (trueop1)
2616 : GET_MODE_INNER (mode));
2618 gcc_assert (VECTOR_MODE_P (mode));
2619 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2620 == GET_MODE_SIZE (mode));
2622 if (VECTOR_MODE_P (op0_mode))
2623 gcc_assert (GET_MODE_INNER (mode)
2624 == GET_MODE_INNER (op0_mode));
2626 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2628 if (VECTOR_MODE_P (op1_mode))
2629 gcc_assert (GET_MODE_INNER (mode)
2630 == GET_MODE_INNER (op1_mode));
2632 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2634 if ((GET_CODE (trueop0) == CONST_VECTOR
2635 || GET_CODE (trueop0) == CONST_INT
2636 || GET_CODE (trueop0) == CONST_DOUBLE)
2637 && (GET_CODE (trueop1) == CONST_VECTOR
2638 || GET_CODE (trueop1) == CONST_INT
2639 || GET_CODE (trueop1) == CONST_DOUBLE))
2641 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2642 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2643 rtvec v = rtvec_alloc (n_elts);
2645 unsigned in_n_elts = 1;
2647 if (VECTOR_MODE_P (op0_mode))
2648 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2649 for (i = 0; i < n_elts; i++)
2653 if (!VECTOR_MODE_P (op0_mode))
2654 RTVEC_ELT (v, i) = trueop0;
2656 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2660 if (!VECTOR_MODE_P (op1_mode))
2661 RTVEC_ELT (v, i) = trueop1;
2663 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2668 return gen_rtx_CONST_VECTOR (mode, v);
2681 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2684 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2686 unsigned int width = GET_MODE_BITSIZE (mode);
2688 if (VECTOR_MODE_P (mode)
2689 && code != VEC_CONCAT
2690 && GET_CODE (op0) == CONST_VECTOR
2691 && GET_CODE (op1) == CONST_VECTOR)
2693 unsigned n_elts = GET_MODE_NUNITS (mode);
2694 enum machine_mode op0mode = GET_MODE (op0);
2695 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2696 enum machine_mode op1mode = GET_MODE (op1);
2697 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2698 rtvec v = rtvec_alloc (n_elts);
2701 gcc_assert (op0_n_elts == n_elts);
2702 gcc_assert (op1_n_elts == n_elts);
2703 for (i = 0; i < n_elts; i++)
2705 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2706 CONST_VECTOR_ELT (op0, i),
2707 CONST_VECTOR_ELT (op1, i));
2710 RTVEC_ELT (v, i) = x;
2713 return gen_rtx_CONST_VECTOR (mode, v);
2716 if (VECTOR_MODE_P (mode)
2717 && code == VEC_CONCAT
2718 && CONSTANT_P (op0) && CONSTANT_P (op1))
2720 unsigned n_elts = GET_MODE_NUNITS (mode);
2721 rtvec v = rtvec_alloc (n_elts);
2723 gcc_assert (n_elts >= 2);
2726 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2727 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2729 RTVEC_ELT (v, 0) = op0;
2730 RTVEC_ELT (v, 1) = op1;
2734 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2735 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2738 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2739 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2740 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2742 for (i = 0; i < op0_n_elts; ++i)
2743 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2744 for (i = 0; i < op1_n_elts; ++i)
2745 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2748 return gen_rtx_CONST_VECTOR (mode, v);
2751 if (SCALAR_FLOAT_MODE_P (mode)
2752 && GET_CODE (op0) == CONST_DOUBLE
2753 && GET_CODE (op1) == CONST_DOUBLE
2754 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2765 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2767 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2769 for (i = 0; i < 4; i++)
2786 real_from_target (&r, tmp0, mode);
2787 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2791 REAL_VALUE_TYPE f0, f1, value, result;
2794 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2795 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2796 real_convert (&f0, mode, &f0);
2797 real_convert (&f1, mode, &f1);
2799 if (HONOR_SNANS (mode)
2800 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2804 && REAL_VALUES_EQUAL (f1, dconst0)
2805 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2808 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2809 && flag_trapping_math
2810 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2812 int s0 = REAL_VALUE_NEGATIVE (f0);
2813 int s1 = REAL_VALUE_NEGATIVE (f1);
2818 /* Inf + -Inf = NaN plus exception. */
2823 /* Inf - Inf = NaN plus exception. */
2828 /* Inf / Inf = NaN plus exception. */
2835 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2836 && flag_trapping_math
2837 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2838 || (REAL_VALUE_ISINF (f1)
2839 && REAL_VALUES_EQUAL (f0, dconst0))))
2840 /* Inf * 0 = NaN plus exception. */
2843 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2845 real_convert (&result, mode, &value);
2847 /* Don't constant fold this floating point operation if
2848 the result has overflowed and flag_trapping_math. */
2850 if (flag_trapping_math
2851 && MODE_HAS_INFINITIES (mode)
2852 && REAL_VALUE_ISINF (result)
2853 && !REAL_VALUE_ISINF (f0)
2854 && !REAL_VALUE_ISINF (f1))
2855 /* Overflow plus exception. */
2858 /* Don't constant fold this floating point operation if the
2859 result may dependent upon the run-time rounding mode and
2860 flag_rounding_math is set, or if GCC's software emulation
2861 is unable to accurately represent the result. */
2863 if ((flag_rounding_math
2864 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2865 && !flag_unsafe_math_optimizations))
2866 && (inexact || !real_identical (&result, &value)))
2869 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2873 /* We can fold some multi-word operations. */
2874 if (GET_MODE_CLASS (mode) == MODE_INT
2875 && width == HOST_BITS_PER_WIDE_INT * 2
2876 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2877 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2879 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2880 HOST_WIDE_INT h1, h2, hv, ht;
2882 if (GET_CODE (op0) == CONST_DOUBLE)
2883 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2885 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2887 if (GET_CODE (op1) == CONST_DOUBLE)
2888 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2890 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2895 /* A - B == A + (-B). */
2896 neg_double (l2, h2, &lv, &hv);
2899 /* Fall through.... */
2902 add_double (l1, h1, l2, h2, &lv, &hv);
2906 mul_double (l1, h1, l2, h2, &lv, &hv);
2910 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2911 &lv, &hv, <, &ht))
2916 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2917 <, &ht, &lv, &hv))
2922 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2923 &lv, &hv, <, &ht))
2928 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2929 <, &ht, &lv, &hv))
2934 lv = l1 & l2, hv = h1 & h2;
2938 lv = l1 | l2, hv = h1 | h2;
2942 lv = l1 ^ l2, hv = h1 ^ h2;
2948 && ((unsigned HOST_WIDE_INT) l1
2949 < (unsigned HOST_WIDE_INT) l2)))
2958 && ((unsigned HOST_WIDE_INT) l1
2959 > (unsigned HOST_WIDE_INT) l2)))
2966 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2968 && ((unsigned HOST_WIDE_INT) l1
2969 < (unsigned HOST_WIDE_INT) l2)))
2976 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2978 && ((unsigned HOST_WIDE_INT) l1
2979 > (unsigned HOST_WIDE_INT) l2)))
2985 case LSHIFTRT: case ASHIFTRT:
2987 case ROTATE: case ROTATERT:
2988 if (SHIFT_COUNT_TRUNCATED)
2989 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2991 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2994 if (code == LSHIFTRT || code == ASHIFTRT)
2995 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2997 else if (code == ASHIFT)
2998 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2999 else if (code == ROTATE)
3000 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3001 else /* code == ROTATERT */
3002 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3009 return immed_double_const (lv, hv, mode);
3012 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3013 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3015 /* Get the integer argument values in two forms:
3016 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3018 arg0 = INTVAL (op0);
3019 arg1 = INTVAL (op1);
3021 if (width < HOST_BITS_PER_WIDE_INT)
3023 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3024 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3027 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3028 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3031 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3032 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3040 /* Compute the value of the arithmetic. */
3045 val = arg0s + arg1s;
3049 val = arg0s - arg1s;
3053 val = arg0s * arg1s;
3058 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3061 val = arg0s / arg1s;
3066 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3069 val = arg0s % arg1s;
3074 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3077 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3082 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3085 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3103 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3104 the value is in range. We can't return any old value for
3105 out-of-range arguments because either the middle-end (via
3106 shift_truncation_mask) or the back-end might be relying on
3107 target-specific knowledge. Nor can we rely on
3108 shift_truncation_mask, since the shift might not be part of an
3109 ashlM3, lshrM3 or ashrM3 instruction. */
3110 if (SHIFT_COUNT_TRUNCATED)
3111 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3112 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3115 val = (code == ASHIFT
3116 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3117 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3119 /* Sign-extend the result for arithmetic right shifts. */
3120 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3121 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3129 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3130 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3138 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3139 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3143 /* Do nothing here. */
3147 val = arg0s <= arg1s ? arg0s : arg1s;
3151 val = ((unsigned HOST_WIDE_INT) arg0
3152 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3156 val = arg0s > arg1s ? arg0s : arg1s;
3160 val = ((unsigned HOST_WIDE_INT) arg0
3161 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3169 /* ??? There are simplifications that can be done. */
3176 return gen_int_mode (val, mode);
3184 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3187 Rather than test for specific case, we do this by a brute-force method
3188 and do all possible simplifications until no more changes occur. Then
3189 we rebuild the operation. */
3191 struct simplify_plus_minus_op_data
3198 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3200 const struct simplify_plus_minus_op_data *d1 = p1;
3201 const struct simplify_plus_minus_op_data *d2 = p2;
3204 result = (commutative_operand_precedence (d2->op)
3205 - commutative_operand_precedence (d1->op));
3209 /* Group together equal REGs to do more simplification. */
3210 if (REG_P (d1->op) && REG_P (d2->op))
3211 return REGNO (d1->op) - REGNO (d2->op);
3217 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3220 struct simplify_plus_minus_op_data ops[8];
3222 int n_ops = 2, input_ops = 2;
3223 int changed, n_constants = 0, canonicalized = 0;
3226 memset (ops, 0, sizeof ops);
3228 /* Set up the two operands and then expand them until nothing has been
3229 changed. If we run out of room in our array, give up; this should
3230 almost never happen. */
3235 ops[1].neg = (code == MINUS);
3241 for (i = 0; i < n_ops; i++)
3243 rtx this_op = ops[i].op;
3244 int this_neg = ops[i].neg;
3245 enum rtx_code this_code = GET_CODE (this_op);
3254 ops[n_ops].op = XEXP (this_op, 1);
3255 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3258 ops[i].op = XEXP (this_op, 0);
3261 canonicalized |= this_neg;
3265 ops[i].op = XEXP (this_op, 0);
3266 ops[i].neg = ! this_neg;
3273 && GET_CODE (XEXP (this_op, 0)) == PLUS
3274 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3275 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3277 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3278 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3279 ops[n_ops].neg = this_neg;
3287 /* ~a -> (-a - 1) */
3290 ops[n_ops].op = constm1_rtx;
3291 ops[n_ops++].neg = this_neg;
3292 ops[i].op = XEXP (this_op, 0);
3293 ops[i].neg = !this_neg;
3303 ops[i].op = neg_const_int (mode, this_op);
3317 if (n_constants > 1)
3320 gcc_assert (n_ops >= 2);
3322 /* If we only have two operands, we can avoid the loops. */
3325 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3328 /* Get the two operands. Be careful with the order, especially for
3329 the cases where code == MINUS. */
3330 if (ops[0].neg && ops[1].neg)
3332 lhs = gen_rtx_NEG (mode, ops[0].op);
3335 else if (ops[0].neg)
3346 return simplify_const_binary_operation (code, mode, lhs, rhs);
3349 /* Now simplify each pair of operands until nothing changes. */
3352 /* Insertion sort is good enough for an eight-element array. */
3353 for (i = 1; i < n_ops; i++)
3355 struct simplify_plus_minus_op_data save;
3357 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3363 ops[j + 1] = ops[j];
3364 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3368 /* This is only useful the first time through. */
3373 for (i = n_ops - 1; i > 0; i--)
3374 for (j = i - 1; j >= 0; j--)
3376 rtx lhs = ops[j].op, rhs = ops[i].op;
3377 int lneg = ops[j].neg, rneg = ops[i].neg;
3379 if (lhs != 0 && rhs != 0)
3381 enum rtx_code ncode = PLUS;
3387 tem = lhs, lhs = rhs, rhs = tem;
3389 else if (swap_commutative_operands_p (lhs, rhs))
3390 tem = lhs, lhs = rhs, rhs = tem;
3392 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3393 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3395 rtx tem_lhs, tem_rhs;
3397 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3398 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3399 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3401 if (tem && !CONSTANT_P (tem))
3402 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3405 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3407 /* Reject "simplifications" that just wrap the two
3408 arguments in a CONST. Failure to do so can result
3409 in infinite recursion with simplify_binary_operation
3410 when it calls us to simplify CONST operations. */
3412 && ! (GET_CODE (tem) == CONST
3413 && GET_CODE (XEXP (tem, 0)) == ncode
3414 && XEXP (XEXP (tem, 0), 0) == lhs
3415 && XEXP (XEXP (tem, 0), 1) == rhs))
3418 if (GET_CODE (tem) == NEG)
3419 tem = XEXP (tem, 0), lneg = !lneg;
3420 if (GET_CODE (tem) == CONST_INT && lneg)
3421 tem = neg_const_int (mode, tem), lneg = 0;
3425 ops[j].op = NULL_RTX;
3431 /* Pack all the operands to the lower-numbered entries. */
3432 for (i = 0, j = 0; j < n_ops; j++)
3442 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3444 && GET_CODE (ops[1].op) == CONST_INT
3445 && CONSTANT_P (ops[0].op)
3447 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3449 /* We suppressed creation of trivial CONST expressions in the
3450 combination loop to avoid recursion. Create one manually now.
3451 The combination loop should have ensured that there is exactly
3452 one CONST_INT, and the sort will have ensured that it is last
3453 in the array and that any other constant will be next-to-last. */
3456 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3457 && CONSTANT_P (ops[n_ops - 2].op))
3459 rtx value = ops[n_ops - 1].op;
3460 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3461 value = neg_const_int (mode, value);
3462 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3466 /* Put a non-negated operand first, if possible. */
3468 for (i = 0; i < n_ops && ops[i].neg; i++)
3471 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3480 /* Now make the result by performing the requested operations. */
3482 for (i = 1; i < n_ops; i++)
3483 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3484 mode, result, ops[i].op);
3489 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3491 plus_minus_operand_p (rtx x)
3493 return GET_CODE (x) == PLUS
3494 || GET_CODE (x) == MINUS
3495 || (GET_CODE (x) == CONST
3496 && GET_CODE (XEXP (x, 0)) == PLUS
3497 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3498 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3501 /* Like simplify_binary_operation except used for relational operators.
3502 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3503 not also be VOIDmode.
3505 CMP_MODE specifies in which mode the comparison is done in, so it is
3506 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3507 the operands or, if both are VOIDmode, the operands are compared in
3508 "infinite precision". */
3510 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3511 enum machine_mode cmp_mode, rtx op0, rtx op1)
3513 rtx tem, trueop0, trueop1;
3515 if (cmp_mode == VOIDmode)
3516 cmp_mode = GET_MODE (op0);
3517 if (cmp_mode == VOIDmode)
3518 cmp_mode = GET_MODE (op1);
3520 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3523 if (SCALAR_FLOAT_MODE_P (mode))
3525 if (tem == const0_rtx)
3526 return CONST0_RTX (mode);
3527 #ifdef FLOAT_STORE_FLAG_VALUE
3529 REAL_VALUE_TYPE val;
3530 val = FLOAT_STORE_FLAG_VALUE (mode);
3531 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3537 if (VECTOR_MODE_P (mode))
3539 if (tem == const0_rtx)
3540 return CONST0_RTX (mode);
3541 #ifdef VECTOR_STORE_FLAG_VALUE
3546 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3547 if (val == NULL_RTX)
3549 if (val == const1_rtx)
3550 return CONST1_RTX (mode);
3552 units = GET_MODE_NUNITS (mode);
3553 v = rtvec_alloc (units);
3554 for (i = 0; i < units; i++)
3555 RTVEC_ELT (v, i) = val;
3556 return gen_rtx_raw_CONST_VECTOR (mode, v);
3566 /* For the following tests, ensure const0_rtx is op1. */
3567 if (swap_commutative_operands_p (op0, op1)
3568 || (op0 == const0_rtx && op1 != const0_rtx))
3569 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3571 /* If op0 is a compare, extract the comparison arguments from it. */
3572 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3573 return simplify_relational_operation (code, mode, VOIDmode,
3574 XEXP (op0, 0), XEXP (op0, 1));
3576 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3580 trueop0 = avoid_constant_pool_reference (op0);
3581 trueop1 = avoid_constant_pool_reference (op1);
3582 return simplify_relational_operation_1 (code, mode, cmp_mode,
3586 /* This part of simplify_relational_operation is only used when CMP_MODE
3587 is not in class MODE_CC (i.e. it is a real comparison).
3589 MODE is the mode of the result, while CMP_MODE specifies in which
3590 mode the comparison is done in, so it is the mode of the operands. */
3593 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3594 enum machine_mode cmp_mode, rtx op0, rtx op1)
3596 enum rtx_code op0code = GET_CODE (op0);
3598 if (GET_CODE (op1) == CONST_INT)
3600 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3602 /* If op0 is a comparison, extract the comparison arguments
3606 if (GET_MODE (op0) == mode)
3607 return simplify_rtx (op0);
3609 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3610 XEXP (op0, 0), XEXP (op0, 1));
3612 else if (code == EQ)
3614 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3615 if (new_code != UNKNOWN)
3616 return simplify_gen_relational (new_code, mode, VOIDmode,
3617 XEXP (op0, 0), XEXP (op0, 1));
3622 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3623 if ((code == EQ || code == NE)
3624 && (op0code == PLUS || op0code == MINUS)
3626 && CONSTANT_P (XEXP (op0, 1))
3627 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3629 rtx x = XEXP (op0, 0);
3630 rtx c = XEXP (op0, 1);
3632 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3634 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3637 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3638 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3640 && op1 == const0_rtx
3641 && GET_MODE_CLASS (mode) == MODE_INT
3642 && cmp_mode != VOIDmode
3643 /* ??? Work-around BImode bugs in the ia64 backend. */
3645 && cmp_mode != BImode
3646 && nonzero_bits (op0, cmp_mode) == 1
3647 && STORE_FLAG_VALUE == 1)
3648 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3649 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3650 : lowpart_subreg (mode, op0, cmp_mode);
3652 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3653 if ((code == EQ || code == NE)
3654 && op1 == const0_rtx
3656 return simplify_gen_relational (code, mode, cmp_mode,
3657 XEXP (op0, 0), XEXP (op0, 1));
3659 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3660 if ((code == EQ || code == NE)
3662 && rtx_equal_p (XEXP (op0, 0), op1)
3663 && !side_effects_p (XEXP (op0, 0)))
3664 return simplify_gen_relational (code, mode, cmp_mode,
3665 XEXP (op0, 1), const0_rtx);
3667 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3668 if ((code == EQ || code == NE)
3670 && rtx_equal_p (XEXP (op0, 1), op1)
3671 && !side_effects_p (XEXP (op0, 1)))
3672 return simplify_gen_relational (code, mode, cmp_mode,
3673 XEXP (op0, 0), const0_rtx);
3675 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3676 if ((code == EQ || code == NE)
3678 && (GET_CODE (op1) == CONST_INT
3679 || GET_CODE (op1) == CONST_DOUBLE)
3680 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3681 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3682 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3683 simplify_gen_binary (XOR, cmp_mode,
3684 XEXP (op0, 1), op1));
3689 /* Check if the given comparison (done in the given MODE) is actually a
3690 tautology or a contradiction.
3691 If no simplification is possible, this function returns zero.
3692 Otherwise, it returns either const_true_rtx or const0_rtx. */
3695 simplify_const_relational_operation (enum rtx_code code,
3696 enum machine_mode mode,
3699 int equal, op0lt, op0ltu, op1lt, op1ltu;
3704 gcc_assert (mode != VOIDmode
3705 || (GET_MODE (op0) == VOIDmode
3706 && GET_MODE (op1) == VOIDmode));
3708 /* If op0 is a compare, extract the comparison arguments from it. */
3709 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3711 op1 = XEXP (op0, 1);
3712 op0 = XEXP (op0, 0);
3714 if (GET_MODE (op0) != VOIDmode)
3715 mode = GET_MODE (op0);
3716 else if (GET_MODE (op1) != VOIDmode)
3717 mode = GET_MODE (op1);
3722 /* We can't simplify MODE_CC values since we don't know what the
3723 actual comparison is. */
3724 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3727 /* Make sure the constant is second. */
3728 if (swap_commutative_operands_p (op0, op1))
3730 tem = op0, op0 = op1, op1 = tem;
3731 code = swap_condition (code);
3734 trueop0 = avoid_constant_pool_reference (op0);
3735 trueop1 = avoid_constant_pool_reference (op1);
3737 /* For integer comparisons of A and B maybe we can simplify A - B and can
3738 then simplify a comparison of that with zero. If A and B are both either
3739 a register or a CONST_INT, this can't help; testing for these cases will
3740 prevent infinite recursion here and speed things up.
3742 We can only do this for EQ and NE comparisons as otherwise we may
3743 lose or introduce overflow which we cannot disregard as undefined as
3744 we do not know the signedness of the operation on either the left or
3745 the right hand side of the comparison. */
3747 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3748 && (code == EQ || code == NE)
3749 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3750 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3751 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3752 /* We cannot do this if tem is a nonzero address. */
3753 && ! nonzero_address_p (tem))
3754 return simplify_const_relational_operation (signed_condition (code),
3755 mode, tem, const0_rtx);
3757 if (! HONOR_NANS (mode) && code == ORDERED)
3758 return const_true_rtx;
3760 if (! HONOR_NANS (mode) && code == UNORDERED)
3763 /* For modes without NaNs, if the two operands are equal, we know the
3764 result except if they have side-effects. */
3765 if (! HONOR_NANS (GET_MODE (trueop0))
3766 && rtx_equal_p (trueop0, trueop1)
3767 && ! side_effects_p (trueop0))
3768 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3770 /* If the operands are floating-point constants, see if we can fold
3772 else if (GET_CODE (trueop0) == CONST_DOUBLE
3773 && GET_CODE (trueop1) == CONST_DOUBLE
3774 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3776 REAL_VALUE_TYPE d0, d1;
3778 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3779 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3781 /* Comparisons are unordered iff at least one of the values is NaN. */
3782 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3792 return const_true_rtx;
3805 equal = REAL_VALUES_EQUAL (d0, d1);
3806 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3807 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3810 /* Otherwise, see if the operands are both integers. */
3811 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3812 && (GET_CODE (trueop0) == CONST_DOUBLE
3813 || GET_CODE (trueop0) == CONST_INT)
3814 && (GET_CODE (trueop1) == CONST_DOUBLE
3815 || GET_CODE (trueop1) == CONST_INT))
3817 int width = GET_MODE_BITSIZE (mode);
3818 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3819 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3821 /* Get the two words comprising each integer constant. */
3822 if (GET_CODE (trueop0) == CONST_DOUBLE)
3824 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3825 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3829 l0u = l0s = INTVAL (trueop0);
3830 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3833 if (GET_CODE (trueop1) == CONST_DOUBLE)
3835 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3836 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3840 l1u = l1s = INTVAL (trueop1);
3841 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3844 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3845 we have to sign or zero-extend the values. */
3846 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3848 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3849 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3851 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3852 l0s |= ((HOST_WIDE_INT) (-1) << width);
3854 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3855 l1s |= ((HOST_WIDE_INT) (-1) << width);
3857 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3858 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3860 equal = (h0u == h1u && l0u == l1u);
3861 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3862 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3863 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3864 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3867 /* Otherwise, there are some code-specific tests we can make. */
3870 /* Optimize comparisons with upper and lower bounds. */
3871 if (SCALAR_INT_MODE_P (mode)
3872 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3885 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3892 /* x >= min is always true. */
3893 if (rtx_equal_p (trueop1, mmin))
3894 tem = const_true_rtx;
3900 /* x <= max is always true. */
3901 if (rtx_equal_p (trueop1, mmax))
3902 tem = const_true_rtx;
3907 /* x > max is always false. */
3908 if (rtx_equal_p (trueop1, mmax))
3914 /* x < min is always false. */
3915 if (rtx_equal_p (trueop1, mmin))
3922 if (tem == const0_rtx
3923 || tem == const_true_rtx)
3930 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3935 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3936 return const_true_rtx;
3940 /* Optimize abs(x) < 0.0. */
3941 if (trueop1 == CONST0_RTX (mode)
3942 && !HONOR_SNANS (mode)
3943 && (!INTEGRAL_MODE_P (mode)
3944 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3946 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3948 if (GET_CODE (tem) == ABS)
3954 /* Optimize abs(x) >= 0.0. */
3955 if (trueop1 == CONST0_RTX (mode)
3956 && !HONOR_NANS (mode)
3957 && (!INTEGRAL_MODE_P (mode)
3958 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3960 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3962 if (GET_CODE (tem) == ABS)
3963 return const_true_rtx;
3968 /* Optimize ! (abs(x) < 0.0). */
3969 if (trueop1 == CONST0_RTX (mode))
3971 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3973 if (GET_CODE (tem) == ABS)
3974 return const_true_rtx;
3985 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3991 return equal ? const_true_rtx : const0_rtx;
3994 return ! equal ? const_true_rtx : const0_rtx;
3997 return op0lt ? const_true_rtx : const0_rtx;
4000 return op1lt ? const_true_rtx : const0_rtx;
4002 return op0ltu ? const_true_rtx : const0_rtx;
4004 return op1ltu ? const_true_rtx : const0_rtx;
4007 return equal || op0lt ? const_true_rtx : const0_rtx;
4010 return equal || op1lt ? const_true_rtx : const0_rtx;
4012 return equal || op0ltu ? const_true_rtx : const0_rtx;
4014 return equal || op1ltu ? const_true_rtx : const0_rtx;
4016 return const_true_rtx;
4024 /* Simplify CODE, an operation with result mode MODE and three operands,
4025 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4026 a constant. Return 0 if no simplifications is possible. */
4029 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4030 enum machine_mode op0_mode, rtx op0, rtx op1,
4033 unsigned int width = GET_MODE_BITSIZE (mode);
4035 /* VOIDmode means "infinite" precision. */
4037 width = HOST_BITS_PER_WIDE_INT;
4043 if (GET_CODE (op0) == CONST_INT
4044 && GET_CODE (op1) == CONST_INT
4045 && GET_CODE (op2) == CONST_INT
4046 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4047 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4049 /* Extracting a bit-field from a constant */
4050 HOST_WIDE_INT val = INTVAL (op0);
4052 if (BITS_BIG_ENDIAN)
4053 val >>= (GET_MODE_BITSIZE (op0_mode)
4054 - INTVAL (op2) - INTVAL (op1));
4056 val >>= INTVAL (op2);
4058 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4060 /* First zero-extend. */
4061 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4062 /* If desired, propagate sign bit. */
4063 if (code == SIGN_EXTRACT
4064 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4065 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4068 /* Clear the bits that don't belong in our mode,
4069 unless they and our sign bit are all one.
4070 So we get either a reasonable negative value or a reasonable
4071 unsigned value for this mode. */
4072 if (width < HOST_BITS_PER_WIDE_INT
4073 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4074 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4075 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4077 return gen_int_mode (val, mode);
4082 if (GET_CODE (op0) == CONST_INT)
4083 return op0 != const0_rtx ? op1 : op2;
4085 /* Convert c ? a : a into "a". */
4086 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4089 /* Convert a != b ? a : b into "a". */
4090 if (GET_CODE (op0) == NE
4091 && ! side_effects_p (op0)
4092 && ! HONOR_NANS (mode)
4093 && ! HONOR_SIGNED_ZEROS (mode)
4094 && ((rtx_equal_p (XEXP (op0, 0), op1)
4095 && rtx_equal_p (XEXP (op0, 1), op2))
4096 || (rtx_equal_p (XEXP (op0, 0), op2)
4097 && rtx_equal_p (XEXP (op0, 1), op1))))
4100 /* Convert a == b ? a : b into "b". */
4101 if (GET_CODE (op0) == EQ
4102 && ! side_effects_p (op0)
4103 && ! HONOR_NANS (mode)
4104 && ! HONOR_SIGNED_ZEROS (mode)
4105 && ((rtx_equal_p (XEXP (op0, 0), op1)
4106 && rtx_equal_p (XEXP (op0, 1), op2))
4107 || (rtx_equal_p (XEXP (op0, 0), op2)
4108 && rtx_equal_p (XEXP (op0, 1), op1))))
4111 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4113 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4114 ? GET_MODE (XEXP (op0, 1))
4115 : GET_MODE (XEXP (op0, 0)));
4118 /* Look for happy constants in op1 and op2. */
4119 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4121 HOST_WIDE_INT t = INTVAL (op1);
4122 HOST_WIDE_INT f = INTVAL (op2);
4124 if (t == STORE_FLAG_VALUE && f == 0)
4125 code = GET_CODE (op0);
4126 else if (t == 0 && f == STORE_FLAG_VALUE)
4129 tmp = reversed_comparison_code (op0, NULL_RTX);
4137 return simplify_gen_relational (code, mode, cmp_mode,
4138 XEXP (op0, 0), XEXP (op0, 1));
4141 if (cmp_mode == VOIDmode)
4142 cmp_mode = op0_mode;
4143 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4144 cmp_mode, XEXP (op0, 0),
4147 /* See if any simplifications were possible. */
4150 if (GET_CODE (temp) == CONST_INT)
4151 return temp == const0_rtx ? op2 : op1;
4153 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4159 gcc_assert (GET_MODE (op0) == mode);
4160 gcc_assert (GET_MODE (op1) == mode);
4161 gcc_assert (VECTOR_MODE_P (mode));
4162 op2 = avoid_constant_pool_reference (op2);
4163 if (GET_CODE (op2) == CONST_INT)
4165 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4166 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4167 int mask = (1 << n_elts) - 1;
4169 if (!(INTVAL (op2) & mask))
4171 if ((INTVAL (op2) & mask) == mask)
4174 op0 = avoid_constant_pool_reference (op0);
4175 op1 = avoid_constant_pool_reference (op1);
4176 if (GET_CODE (op0) == CONST_VECTOR
4177 && GET_CODE (op1) == CONST_VECTOR)
4179 rtvec v = rtvec_alloc (n_elts);
4182 for (i = 0; i < n_elts; i++)
4183 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4184 ? CONST_VECTOR_ELT (op0, i)
4185 : CONST_VECTOR_ELT (op1, i));
4186 return gen_rtx_CONST_VECTOR (mode, v);
4198 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4199 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4201 Works by unpacking OP into a collection of 8-bit values
4202 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4203 and then repacking them again for OUTERMODE. */
4206 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4207 enum machine_mode innermode, unsigned int byte)
4209 /* We support up to 512-bit values (for V8DFmode). */
4213 value_mask = (1 << value_bit) - 1
4215 unsigned char value[max_bitsize / value_bit];
4224 rtvec result_v = NULL;
4225 enum mode_class outer_class;
4226 enum machine_mode outer_submode;
4228 /* Some ports misuse CCmode. */
4229 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4232 /* We have no way to represent a complex constant at the rtl level. */
4233 if (COMPLEX_MODE_P (outermode))
4236 /* Unpack the value. */
4238 if (GET_CODE (op) == CONST_VECTOR)
4240 num_elem = CONST_VECTOR_NUNITS (op);
4241 elems = &CONST_VECTOR_ELT (op, 0);
4242 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4248 elem_bitsize = max_bitsize;
4250 /* If this asserts, it is too complicated; reducing value_bit may help. */
4251 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4252 /* I don't know how to handle endianness of sub-units. */
4253 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4255 for (elem = 0; elem < num_elem; elem++)
4258 rtx el = elems[elem];
4260 /* Vectors are kept in target memory order. (This is probably
4263 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4264 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4266 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4267 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4268 unsigned bytele = (subword_byte % UNITS_PER_WORD
4269 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4270 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4273 switch (GET_CODE (el))
4277 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4279 *vp++ = INTVAL (el) >> i;
4280 /* CONST_INTs are always logically sign-extended. */
4281 for (; i < elem_bitsize; i += value_bit)
4282 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4286 if (GET_MODE (el) == VOIDmode)
4288 /* If this triggers, someone should have generated a
4289 CONST_INT instead. */
4290 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4292 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4293 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4294 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4297 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4300 /* It shouldn't matter what's done here, so fill it with
4302 for (; i < elem_bitsize; i += value_bit)
4307 long tmp[max_bitsize / 32];
4308 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4310 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4311 gcc_assert (bitsize <= elem_bitsize);
4312 gcc_assert (bitsize % value_bit == 0);
4314 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4317 /* real_to_target produces its result in words affected by
4318 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4319 and use WORDS_BIG_ENDIAN instead; see the documentation
4320 of SUBREG in rtl.texi. */
4321 for (i = 0; i < bitsize; i += value_bit)
4324 if (WORDS_BIG_ENDIAN)
4325 ibase = bitsize - 1 - i;
4328 *vp++ = tmp[ibase / 32] >> i % 32;
4331 /* It shouldn't matter what's done here, so fill it with
4333 for (; i < elem_bitsize; i += value_bit)
4343 /* Now, pick the right byte to start with. */
4344 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4345 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4346 will already have offset 0. */
4347 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4349 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4351 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4352 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4353 byte = (subword_byte % UNITS_PER_WORD
4354 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4357 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4358 so if it's become negative it will instead be very large.) */
4359 gcc_assert (byte < GET_MODE_SIZE (innermode));
4361 /* Convert from bytes to chunks of size value_bit. */
4362 value_start = byte * (BITS_PER_UNIT / value_bit);
4364 /* Re-pack the value. */
4366 if (VECTOR_MODE_P (outermode))
4368 num_elem = GET_MODE_NUNITS (outermode);
4369 result_v = rtvec_alloc (num_elem);
4370 elems = &RTVEC_ELT (result_v, 0);
4371 outer_submode = GET_MODE_INNER (outermode);
4377 outer_submode = outermode;
4380 outer_class = GET_MODE_CLASS (outer_submode);
4381 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4383 gcc_assert (elem_bitsize % value_bit == 0);
4384 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4386 for (elem = 0; elem < num_elem; elem++)
4390 /* Vectors are stored in target memory order. (This is probably
4393 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4394 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4396 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4397 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4398 unsigned bytele = (subword_byte % UNITS_PER_WORD
4399 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4400 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4403 switch (outer_class)
4406 case MODE_PARTIAL_INT:
4408 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4411 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4413 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4414 for (; i < elem_bitsize; i += value_bit)
4415 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4416 << (i - HOST_BITS_PER_WIDE_INT));
4418 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4420 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4421 elems[elem] = gen_int_mode (lo, outer_submode);
4422 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4423 elems[elem] = immed_double_const (lo, hi, outer_submode);
4430 case MODE_DECIMAL_FLOAT:
4433 long tmp[max_bitsize / 32];
4435 /* real_from_target wants its input in words affected by
4436 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4437 and use WORDS_BIG_ENDIAN instead; see the documentation
4438 of SUBREG in rtl.texi. */
4439 for (i = 0; i < max_bitsize / 32; i++)
4441 for (i = 0; i < elem_bitsize; i += value_bit)
4444 if (WORDS_BIG_ENDIAN)
4445 ibase = elem_bitsize - 1 - i;
4448 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4451 real_from_target (&r, tmp, outer_submode);
4452 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4460 if (VECTOR_MODE_P (outermode))
4461 return gen_rtx_CONST_VECTOR (outermode, result_v);
4466 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4467 Return 0 if no simplifications are possible. */
4469 simplify_subreg (enum machine_mode outermode, rtx op,
4470 enum machine_mode innermode, unsigned int byte)
4472 /* Little bit of sanity checking. */
4473 gcc_assert (innermode != VOIDmode);
4474 gcc_assert (outermode != VOIDmode);
4475 gcc_assert (innermode != BLKmode);
4476 gcc_assert (outermode != BLKmode);
4478 gcc_assert (GET_MODE (op) == innermode
4479 || GET_MODE (op) == VOIDmode);
4481 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4482 gcc_assert (byte < GET_MODE_SIZE (innermode));
4484 if (outermode == innermode && !byte)
4487 if (GET_CODE (op) == CONST_INT
4488 || GET_CODE (op) == CONST_DOUBLE
4489 || GET_CODE (op) == CONST_VECTOR)
4490 return simplify_immed_subreg (outermode, op, innermode, byte);
4492 /* Changing mode twice with SUBREG => just change it once,
4493 or not at all if changing back op starting mode. */
4494 if (GET_CODE (op) == SUBREG)
4496 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4497 int final_offset = byte + SUBREG_BYTE (op);
4500 if (outermode == innermostmode
4501 && byte == 0 && SUBREG_BYTE (op) == 0)
4502 return SUBREG_REG (op);
4504 /* The SUBREG_BYTE represents offset, as if the value were stored
4505 in memory. Irritating exception is paradoxical subreg, where
4506 we define SUBREG_BYTE to be 0. On big endian machines, this
4507 value should be negative. For a moment, undo this exception. */
4508 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4510 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4511 if (WORDS_BIG_ENDIAN)
4512 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4513 if (BYTES_BIG_ENDIAN)
4514 final_offset += difference % UNITS_PER_WORD;
4516 if (SUBREG_BYTE (op) == 0
4517 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4519 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4520 if (WORDS_BIG_ENDIAN)
4521 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4522 if (BYTES_BIG_ENDIAN)
4523 final_offset += difference % UNITS_PER_WORD;
4526 /* See whether resulting subreg will be paradoxical. */
4527 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4529 /* In nonparadoxical subregs we can't handle negative offsets. */
4530 if (final_offset < 0)
4532 /* Bail out in case resulting subreg would be incorrect. */
4533 if (final_offset % GET_MODE_SIZE (outermode)
4534 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4540 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4542 /* In paradoxical subreg, see if we are still looking on lower part.
4543 If so, our SUBREG_BYTE will be 0. */
4544 if (WORDS_BIG_ENDIAN)
4545 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4546 if (BYTES_BIG_ENDIAN)
4547 offset += difference % UNITS_PER_WORD;
4548 if (offset == final_offset)
4554 /* Recurse for further possible simplifications. */
4555 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4559 if (validate_subreg (outermode, innermostmode,
4560 SUBREG_REG (op), final_offset))
4561 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4565 /* Merge implicit and explicit truncations. */
4567 if (GET_CODE (op) == TRUNCATE
4568 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4569 && subreg_lowpart_offset (outermode, innermode) == byte)
4570 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4571 GET_MODE (XEXP (op, 0)));
4573 /* SUBREG of a hard register => just change the register number
4574 and/or mode. If the hard register is not valid in that mode,
4575 suppress this simplification. If the hard register is the stack,
4576 frame, or argument pointer, leave this as a SUBREG. */
4579 && REGNO (op) < FIRST_PSEUDO_REGISTER
4580 #ifdef CANNOT_CHANGE_MODE_CLASS
4581 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4582 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4583 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4585 && ((reload_completed && !frame_pointer_needed)
4586 || (REGNO (op) != FRAME_POINTER_REGNUM
4587 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4588 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4591 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4592 && REGNO (op) != ARG_POINTER_REGNUM
4594 && REGNO (op) != STACK_POINTER_REGNUM
4595 && subreg_offset_representable_p (REGNO (op), innermode,
4598 unsigned int regno = REGNO (op);
4599 unsigned int final_regno
4600 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4602 /* ??? We do allow it if the current REG is not valid for
4603 its mode. This is a kludge to work around how float/complex
4604 arguments are passed on 32-bit SPARC and should be fixed. */
4605 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4606 || ! HARD_REGNO_MODE_OK (regno, innermode))
4609 int final_offset = byte;
4611 /* Adjust offset for paradoxical subregs. */
4613 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4615 int difference = (GET_MODE_SIZE (innermode)
4616 - GET_MODE_SIZE (outermode));
4617 if (WORDS_BIG_ENDIAN)
4618 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4619 if (BYTES_BIG_ENDIAN)
4620 final_offset += difference % UNITS_PER_WORD;
4623 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4625 /* Propagate original regno. We don't have any way to specify
4626 the offset inside original regno, so do so only for lowpart.
4627 The information is used only by alias analysis that can not
4628 grog partial register anyway. */
4630 if (subreg_lowpart_offset (outermode, innermode) == byte)
4631 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4636 /* If we have a SUBREG of a register that we are replacing and we are
4637 replacing it with a MEM, make a new MEM and try replacing the
4638 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4639 or if we would be widening it. */
4642 && ! mode_dependent_address_p (XEXP (op, 0))
4643 /* Allow splitting of volatile memory references in case we don't
4644 have instruction to move the whole thing. */
4645 && (! MEM_VOLATILE_P (op)
4646 || ! have_insn_for (SET, innermode))
4647 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4648 return adjust_address_nv (op, outermode, byte);
4650 /* Handle complex values represented as CONCAT
4651 of real and imaginary part. */
4652 if (GET_CODE (op) == CONCAT)
4654 unsigned int part_size, final_offset;
4657 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
4658 if (byte < part_size)
4660 part = XEXP (op, 0);
4661 final_offset = byte;
4665 part = XEXP (op, 1);
4666 final_offset = byte - part_size;
4669 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
4672 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4675 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4676 return gen_rtx_SUBREG (outermode, part, final_offset);
4680 /* Optimize SUBREG truncations of zero and sign extended values. */
4681 if ((GET_CODE (op) == ZERO_EXTEND
4682 || GET_CODE (op) == SIGN_EXTEND)
4683 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4685 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4687 /* If we're requesting the lowpart of a zero or sign extension,
4688 there are three possibilities. If the outermode is the same
4689 as the origmode, we can omit both the extension and the subreg.
4690 If the outermode is not larger than the origmode, we can apply
4691 the truncation without the extension. Finally, if the outermode
4692 is larger than the origmode, but both are integer modes, we
4693 can just extend to the appropriate mode. */
4696 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4697 if (outermode == origmode)
4698 return XEXP (op, 0);
4699 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4700 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4701 subreg_lowpart_offset (outermode,
4703 if (SCALAR_INT_MODE_P (outermode))
4704 return simplify_gen_unary (GET_CODE (op), outermode,
4705 XEXP (op, 0), origmode);
4708 /* A SUBREG resulting from a zero extension may fold to zero if
4709 it extracts higher bits that the ZERO_EXTEND's source bits. */
4710 if (GET_CODE (op) == ZERO_EXTEND
4711 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4712 return CONST0_RTX (outermode);
4715 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4716 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4717 the outer subreg is effectively a truncation to the original mode. */
4718 if ((GET_CODE (op) == LSHIFTRT
4719 || GET_CODE (op) == ASHIFTRT)
4720 && SCALAR_INT_MODE_P (outermode)
4721 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4722 to avoid the possibility that an outer LSHIFTRT shifts by more
4723 than the sign extension's sign_bit_copies and introduces zeros
4724 into the high bits of the result. */
4725 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4726 && GET_CODE (XEXP (op, 1)) == CONST_INT
4727 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4728 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4729 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4730 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4731 return simplify_gen_binary (ASHIFTRT, outermode,
4732 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4734 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4735 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4736 the outer subreg is effectively a truncation to the original mode. */
4737 if ((GET_CODE (op) == LSHIFTRT
4738 || GET_CODE (op) == ASHIFTRT)
4739 && SCALAR_INT_MODE_P (outermode)
4740 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4741 && GET_CODE (XEXP (op, 1)) == CONST_INT
4742 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4743 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4744 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4745 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4746 return simplify_gen_binary (LSHIFTRT, outermode,
4747 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4749 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4750 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4751 the outer subreg is effectively a truncation to the original mode. */
4752 if (GET_CODE (op) == ASHIFT
4753 && SCALAR_INT_MODE_P (outermode)
4754 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4755 && GET_CODE (XEXP (op, 1)) == CONST_INT
4756 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4757 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4758 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4759 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4760 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4761 return simplify_gen_binary (ASHIFT, outermode,
4762 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4767 /* Make a SUBREG operation or equivalent if it folds. */
4770 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4771 enum machine_mode innermode, unsigned int byte)
4775 newx = simplify_subreg (outermode, op, innermode, byte);
4779 if (GET_CODE (op) == SUBREG
4780 || GET_CODE (op) == CONCAT
4781 || GET_MODE (op) == VOIDmode)
4784 if (validate_subreg (outermode, innermode, op, byte))
4785 return gen_rtx_SUBREG (outermode, op, byte);
4790 /* Simplify X, an rtx expression.
4792 Return the simplified expression or NULL if no simplifications
4795 This is the preferred entry point into the simplification routines;
4796 however, we still allow passes to call the more specific routines.
4798 Right now GCC has three (yes, three) major bodies of RTL simplification
4799 code that need to be unified.
4801 1. fold_rtx in cse.c. This code uses various CSE specific
4802 information to aid in RTL simplification.
4804 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4805 it uses combine specific information to aid in RTL
4808 3. The routines in this file.
4811 Long term we want to only have one body of simplification code; to
4812 get to that state I recommend the following steps:
4814 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4815 which are not pass dependent state into these routines.
4817 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4818 use this routine whenever possible.
4820 3. Allow for pass dependent state to be provided to these
4821 routines and add simplifications based on the pass dependent
4822 state. Remove code from cse.c & combine.c that becomes
4825 It will take time, but ultimately the compiler will be easier to
4826 maintain and improve. It's totally silly that when we add a
4827 simplification that it needs to be added to 4 places (3 for RTL
4828 simplification and 1 for tree simplification. */
4831 simplify_rtx (rtx x)
4833 enum rtx_code code = GET_CODE (x);
4834 enum machine_mode mode = GET_MODE (x);
4836 switch (GET_RTX_CLASS (code))
4839 return simplify_unary_operation (code, mode,
4840 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4841 case RTX_COMM_ARITH:
4842 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4843 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4845 /* Fall through.... */
4848 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4851 case RTX_BITFIELD_OPS:
4852 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4853 XEXP (x, 0), XEXP (x, 1),
4857 case RTX_COMM_COMPARE:
4858 return simplify_relational_operation (code, mode,
4859 ((GET_MODE (XEXP (x, 0))
4861 ? GET_MODE (XEXP (x, 0))
4862 : GET_MODE (XEXP (x, 1))),
4868 return simplify_subreg (mode, SUBREG_REG (x),
4869 GET_MODE (SUBREG_REG (x)),
4876 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4877 if (GET_CODE (XEXP (x, 0)) == HIGH
4878 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))