1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 if (GET_MODE (x) == BLKmode)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
231 case ARRAY_RANGE_REF:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
250 offset += bitpos / BITS_PER_UNIT;
252 offset += TREE_INT_CST_LOW (toffset);
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* Replace all occurrences of OLD_RTX in X with FN (X', DATA), where X'
354 is an expression in X that is equal to OLD_RTX. Canonicalize and
357 If FN is null, assume FN (X', DATA) == copy_rtx (DATA). */
360 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
361 rtx (*fn) (rtx, void *), void *data)
363 enum rtx_code code = GET_CODE (x);
364 enum machine_mode mode = GET_MODE (x);
365 enum machine_mode op_mode;
367 rtx op0, op1, op2, newx, op;
371 /* If X is OLD_RTX, return FN (X, DATA), with a null FN. Otherwise,
372 if this is an expression, try to build a new expression, substituting
373 recursively. If we can't do anything, return our input. */
375 if (rtx_equal_p (x, old_rtx))
380 return copy_rtx ((rtx) data);
383 switch (GET_RTX_CLASS (code))
387 op_mode = GET_MODE (op0);
388 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
389 if (op0 == XEXP (x, 0))
391 return simplify_gen_unary (code, mode, op0, op_mode);
395 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
396 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
397 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
399 return simplify_gen_binary (code, mode, op0, op1);
402 case RTX_COMM_COMPARE:
405 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
406 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
407 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
408 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
410 return simplify_gen_relational (code, mode, op_mode, op0, op1);
413 case RTX_BITFIELD_OPS:
415 op_mode = GET_MODE (op0);
416 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
417 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
418 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
419 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
421 if (op_mode == VOIDmode)
422 op_mode = GET_MODE (op0);
423 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
428 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
429 if (op0 == SUBREG_REG (x))
431 op0 = simplify_gen_subreg (GET_MODE (x), op0,
432 GET_MODE (SUBREG_REG (x)),
434 return op0 ? op0 : x;
441 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
442 if (op0 == XEXP (x, 0))
444 return replace_equiv_address_nv (x, op0);
446 else if (code == LO_SUM)
448 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
449 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
451 /* (lo_sum (high x) x) -> x */
452 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
455 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
457 return gen_rtx_LO_SUM (mode, op0, op1);
466 fmt = GET_RTX_FORMAT (code);
467 for (i = 0; fmt[i]; i++)
472 newvec = XVEC (newx, i);
473 for (j = 0; j < GET_NUM_ELEM (vec); j++)
475 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
477 if (op != RTVEC_ELT (vec, j))
481 newvec = shallow_copy_rtvec (vec);
483 newx = shallow_copy_rtx (x);
484 XVEC (newx, i) = newvec;
486 RTVEC_ELT (newvec, j) = op;
492 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
493 if (op != XEXP (x, i))
496 newx = shallow_copy_rtx (x);
504 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
505 resulting RTX. Return a new RTX which is as simplified as possible. */
508 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
510 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
513 /* Try to simplify a unary operation CODE whose output mode is to be
514 MODE with input operand OP whose mode was originally OP_MODE.
515 Return zero if no simplification can be made. */
517 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
518 rtx op, enum machine_mode op_mode)
522 trueop = avoid_constant_pool_reference (op);
524 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
528 return simplify_unary_operation_1 (code, mode, op);
531 /* Perform some simplifications we can do even if the operands
534 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
536 enum rtx_code reversed;
542 /* (not (not X)) == X. */
543 if (GET_CODE (op) == NOT)
546 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
547 comparison is all ones. */
548 if (COMPARISON_P (op)
549 && (mode == BImode || STORE_FLAG_VALUE == -1)
550 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
551 return simplify_gen_relational (reversed, mode, VOIDmode,
552 XEXP (op, 0), XEXP (op, 1));
554 /* (not (plus X -1)) can become (neg X). */
555 if (GET_CODE (op) == PLUS
556 && XEXP (op, 1) == constm1_rtx)
557 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
559 /* Similarly, (not (neg X)) is (plus X -1). */
560 if (GET_CODE (op) == NEG)
561 return plus_constant (XEXP (op, 0), -1);
563 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
564 if (GET_CODE (op) == XOR
565 && CONST_INT_P (XEXP (op, 1))
566 && (temp = simplify_unary_operation (NOT, mode,
567 XEXP (op, 1), mode)) != 0)
568 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
570 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
571 if (GET_CODE (op) == PLUS
572 && CONST_INT_P (XEXP (op, 1))
573 && mode_signbit_p (mode, XEXP (op, 1))
574 && (temp = simplify_unary_operation (NOT, mode,
575 XEXP (op, 1), mode)) != 0)
576 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
579 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
580 operands other than 1, but that is not valid. We could do a
581 similar simplification for (not (lshiftrt C X)) where C is
582 just the sign bit, but this doesn't seem common enough to
584 if (GET_CODE (op) == ASHIFT
585 && XEXP (op, 0) == const1_rtx)
587 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
588 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
591 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
592 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
593 so we can perform the above simplification. */
595 if (STORE_FLAG_VALUE == -1
596 && GET_CODE (op) == ASHIFTRT
597 && GET_CODE (XEXP (op, 1))
598 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
599 return simplify_gen_relational (GE, mode, VOIDmode,
600 XEXP (op, 0), const0_rtx);
603 if (GET_CODE (op) == SUBREG
604 && subreg_lowpart_p (op)
605 && (GET_MODE_SIZE (GET_MODE (op))
606 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
607 && GET_CODE (SUBREG_REG (op)) == ASHIFT
608 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
610 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
613 x = gen_rtx_ROTATE (inner_mode,
614 simplify_gen_unary (NOT, inner_mode, const1_rtx,
616 XEXP (SUBREG_REG (op), 1));
617 return rtl_hooks.gen_lowpart_no_emit (mode, x);
620 /* Apply De Morgan's laws to reduce number of patterns for machines
621 with negating logical insns (and-not, nand, etc.). If result has
622 only one NOT, put it first, since that is how the patterns are
625 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
627 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
628 enum machine_mode op_mode;
630 op_mode = GET_MODE (in1);
631 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
633 op_mode = GET_MODE (in2);
634 if (op_mode == VOIDmode)
636 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
638 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
641 in2 = in1; in1 = tem;
644 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
650 /* (neg (neg X)) == X. */
651 if (GET_CODE (op) == NEG)
654 /* (neg (plus X 1)) can become (not X). */
655 if (GET_CODE (op) == PLUS
656 && XEXP (op, 1) == const1_rtx)
657 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
659 /* Similarly, (neg (not X)) is (plus X 1). */
660 if (GET_CODE (op) == NOT)
661 return plus_constant (XEXP (op, 0), 1);
663 /* (neg (minus X Y)) can become (minus Y X). This transformation
664 isn't safe for modes with signed zeros, since if X and Y are
665 both +0, (minus Y X) is the same as (minus X Y). If the
666 rounding mode is towards +infinity (or -infinity) then the two
667 expressions will be rounded differently. */
668 if (GET_CODE (op) == MINUS
669 && !HONOR_SIGNED_ZEROS (mode)
670 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
671 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
673 if (GET_CODE (op) == PLUS
674 && !HONOR_SIGNED_ZEROS (mode)
675 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
677 /* (neg (plus A C)) is simplified to (minus -C A). */
678 if (CONST_INT_P (XEXP (op, 1))
679 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
681 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
683 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
686 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
687 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
688 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
691 /* (neg (mult A B)) becomes (mult (neg A) B).
692 This works even for floating-point values. */
693 if (GET_CODE (op) == MULT
694 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
696 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
697 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
700 /* NEG commutes with ASHIFT since it is multiplication. Only do
701 this if we can then eliminate the NEG (e.g., if the operand
703 if (GET_CODE (op) == ASHIFT)
705 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
707 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
710 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
711 C is equal to the width of MODE minus 1. */
712 if (GET_CODE (op) == ASHIFTRT
713 && CONST_INT_P (XEXP (op, 1))
714 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
715 return simplify_gen_binary (LSHIFTRT, mode,
716 XEXP (op, 0), XEXP (op, 1));
718 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
719 C is equal to the width of MODE minus 1. */
720 if (GET_CODE (op) == LSHIFTRT
721 && CONST_INT_P (XEXP (op, 1))
722 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
723 return simplify_gen_binary (ASHIFTRT, mode,
724 XEXP (op, 0), XEXP (op, 1));
726 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
727 if (GET_CODE (op) == XOR
728 && XEXP (op, 1) == const1_rtx
729 && nonzero_bits (XEXP (op, 0), mode) == 1)
730 return plus_constant (XEXP (op, 0), -1);
732 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
733 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
734 if (GET_CODE (op) == LT
735 && XEXP (op, 1) == const0_rtx
736 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
738 enum machine_mode inner = GET_MODE (XEXP (op, 0));
739 int isize = GET_MODE_BITSIZE (inner);
740 if (STORE_FLAG_VALUE == 1)
742 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
743 GEN_INT (isize - 1));
746 if (GET_MODE_BITSIZE (mode) > isize)
747 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
748 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
750 else if (STORE_FLAG_VALUE == -1)
752 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
753 GEN_INT (isize - 1));
756 if (GET_MODE_BITSIZE (mode) > isize)
757 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
758 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
764 /* We can't handle truncation to a partial integer mode here
765 because we don't know the real bitsize of the partial
767 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
770 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
771 if ((GET_CODE (op) == SIGN_EXTEND
772 || GET_CODE (op) == ZERO_EXTEND)
773 && GET_MODE (XEXP (op, 0)) == mode)
776 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
777 (OP:SI foo:SI) if OP is NEG or ABS. */
778 if ((GET_CODE (op) == ABS
779 || GET_CODE (op) == NEG)
780 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
781 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
782 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
783 return simplify_gen_unary (GET_CODE (op), mode,
784 XEXP (XEXP (op, 0), 0), mode);
786 /* (truncate:A (subreg:B (truncate:C X) 0)) is
788 if (GET_CODE (op) == SUBREG
789 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
790 && subreg_lowpart_p (op))
791 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
792 GET_MODE (XEXP (SUBREG_REG (op), 0)));
794 /* If we know that the value is already truncated, we can
795 replace the TRUNCATE with a SUBREG. Note that this is also
796 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
797 modes we just have to apply a different definition for
798 truncation. But don't do this for an (LSHIFTRT (MULT ...))
799 since this will cause problems with the umulXi3_highpart
801 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
802 GET_MODE_BITSIZE (GET_MODE (op)))
803 ? (num_sign_bit_copies (op, GET_MODE (op))
804 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
805 - GET_MODE_BITSIZE (mode)))
806 : truncated_to_mode (mode, op))
807 && ! (GET_CODE (op) == LSHIFTRT
808 && GET_CODE (XEXP (op, 0)) == MULT))
809 return rtl_hooks.gen_lowpart_no_emit (mode, op);
811 /* A truncate of a comparison can be replaced with a subreg if
812 STORE_FLAG_VALUE permits. This is like the previous test,
813 but it works even if the comparison is done in a mode larger
814 than HOST_BITS_PER_WIDE_INT. */
815 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
817 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
818 return rtl_hooks.gen_lowpart_no_emit (mode, op);
822 if (DECIMAL_FLOAT_MODE_P (mode))
825 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
826 if (GET_CODE (op) == FLOAT_EXTEND
827 && GET_MODE (XEXP (op, 0)) == mode)
830 /* (float_truncate:SF (float_truncate:DF foo:XF))
831 = (float_truncate:SF foo:XF).
832 This may eliminate double rounding, so it is unsafe.
834 (float_truncate:SF (float_extend:XF foo:DF))
835 = (float_truncate:SF foo:DF).
837 (float_truncate:DF (float_extend:XF foo:SF))
838 = (float_extend:SF foo:DF). */
839 if ((GET_CODE (op) == FLOAT_TRUNCATE
840 && flag_unsafe_math_optimizations)
841 || GET_CODE (op) == FLOAT_EXTEND)
842 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
844 > GET_MODE_SIZE (mode)
845 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
849 /* (float_truncate (float x)) is (float x) */
850 if (GET_CODE (op) == FLOAT
851 && (flag_unsafe_math_optimizations
852 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
853 && ((unsigned)significand_size (GET_MODE (op))
854 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
855 - num_sign_bit_copies (XEXP (op, 0),
856 GET_MODE (XEXP (op, 0))))))))
857 return simplify_gen_unary (FLOAT, mode,
859 GET_MODE (XEXP (op, 0)));
861 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
862 (OP:SF foo:SF) if OP is NEG or ABS. */
863 if ((GET_CODE (op) == ABS
864 || GET_CODE (op) == NEG)
865 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
866 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
867 return simplify_gen_unary (GET_CODE (op), mode,
868 XEXP (XEXP (op, 0), 0), mode);
870 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
871 is (float_truncate:SF x). */
872 if (GET_CODE (op) == SUBREG
873 && subreg_lowpart_p (op)
874 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
875 return SUBREG_REG (op);
879 if (DECIMAL_FLOAT_MODE_P (mode))
882 /* (float_extend (float_extend x)) is (float_extend x)
884 (float_extend (float x)) is (float x) assuming that double
885 rounding can't happen.
887 if (GET_CODE (op) == FLOAT_EXTEND
888 || (GET_CODE (op) == FLOAT
889 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
890 && ((unsigned)significand_size (GET_MODE (op))
891 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
892 - num_sign_bit_copies (XEXP (op, 0),
893 GET_MODE (XEXP (op, 0)))))))
894 return simplify_gen_unary (GET_CODE (op), mode,
896 GET_MODE (XEXP (op, 0)));
901 /* (abs (neg <foo>)) -> (abs <foo>) */
902 if (GET_CODE (op) == NEG)
903 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
904 GET_MODE (XEXP (op, 0)));
906 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
908 if (GET_MODE (op) == VOIDmode)
911 /* If operand is something known to be positive, ignore the ABS. */
912 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
913 || ((GET_MODE_BITSIZE (GET_MODE (op))
914 <= HOST_BITS_PER_WIDE_INT)
915 && ((nonzero_bits (op, GET_MODE (op))
917 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
921 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
922 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
923 return gen_rtx_NEG (mode, op);
928 /* (ffs (*_extend <X>)) = (ffs <X>) */
929 if (GET_CODE (op) == SIGN_EXTEND
930 || GET_CODE (op) == ZERO_EXTEND)
931 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
932 GET_MODE (XEXP (op, 0)));
936 switch (GET_CODE (op))
940 /* (popcount (zero_extend <X>)) = (popcount <X>) */
941 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
942 GET_MODE (XEXP (op, 0)));
946 /* Rotations don't affect popcount. */
947 if (!side_effects_p (XEXP (op, 1)))
948 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
949 GET_MODE (XEXP (op, 0)));
958 switch (GET_CODE (op))
964 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
965 GET_MODE (XEXP (op, 0)));
969 /* Rotations don't affect parity. */
970 if (!side_effects_p (XEXP (op, 1)))
971 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
972 GET_MODE (XEXP (op, 0)));
981 /* (bswap (bswap x)) -> x. */
982 if (GET_CODE (op) == BSWAP)
987 /* (float (sign_extend <X>)) = (float <X>). */
988 if (GET_CODE (op) == SIGN_EXTEND)
989 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
990 GET_MODE (XEXP (op, 0)));
994 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
995 becomes just the MINUS if its mode is MODE. This allows
996 folding switch statements on machines using casesi (such as
998 if (GET_CODE (op) == TRUNCATE
999 && GET_MODE (XEXP (op, 0)) == mode
1000 && GET_CODE (XEXP (op, 0)) == MINUS
1001 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1002 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1003 return XEXP (op, 0);
1005 /* Check for a sign extension of a subreg of a promoted
1006 variable, where the promotion is sign-extended, and the
1007 target mode is the same as the variable's promotion. */
1008 if (GET_CODE (op) == SUBREG
1009 && SUBREG_PROMOTED_VAR_P (op)
1010 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1011 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1012 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1014 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1015 if (! POINTERS_EXTEND_UNSIGNED
1016 && mode == Pmode && GET_MODE (op) == ptr_mode
1018 || (GET_CODE (op) == SUBREG
1019 && REG_P (SUBREG_REG (op))
1020 && REG_POINTER (SUBREG_REG (op))
1021 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1022 return convert_memory_address (Pmode, op);
1027 /* Check for a zero extension of a subreg of a promoted
1028 variable, where the promotion is zero-extended, and the
1029 target mode is the same as the variable's promotion. */
1030 if (GET_CODE (op) == SUBREG
1031 && SUBREG_PROMOTED_VAR_P (op)
1032 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1033 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1034 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1036 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1037 if (POINTERS_EXTEND_UNSIGNED > 0
1038 && mode == Pmode && GET_MODE (op) == ptr_mode
1040 || (GET_CODE (op) == SUBREG
1041 && REG_P (SUBREG_REG (op))
1042 && REG_POINTER (SUBREG_REG (op))
1043 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1044 return convert_memory_address (Pmode, op);
1055 /* Try to compute the value of a unary operation CODE whose output mode is to
1056 be MODE with input operand OP whose mode was originally OP_MODE.
1057 Return zero if the value cannot be computed. */
1059 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1060 rtx op, enum machine_mode op_mode)
1062 unsigned int width = GET_MODE_BITSIZE (mode);
1064 if (code == VEC_DUPLICATE)
1066 gcc_assert (VECTOR_MODE_P (mode));
1067 if (GET_MODE (op) != VOIDmode)
1069 if (!VECTOR_MODE_P (GET_MODE (op)))
1070 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1072 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1075 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1076 || GET_CODE (op) == CONST_VECTOR)
1078 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1079 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1080 rtvec v = rtvec_alloc (n_elts);
1083 if (GET_CODE (op) != CONST_VECTOR)
1084 for (i = 0; i < n_elts; i++)
1085 RTVEC_ELT (v, i) = op;
1088 enum machine_mode inmode = GET_MODE (op);
1089 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1090 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1092 gcc_assert (in_n_elts < n_elts);
1093 gcc_assert ((n_elts % in_n_elts) == 0);
1094 for (i = 0; i < n_elts; i++)
1095 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1097 return gen_rtx_CONST_VECTOR (mode, v);
1101 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1103 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1104 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1105 enum machine_mode opmode = GET_MODE (op);
1106 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1107 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1108 rtvec v = rtvec_alloc (n_elts);
1111 gcc_assert (op_n_elts == n_elts);
1112 for (i = 0; i < n_elts; i++)
1114 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1115 CONST_VECTOR_ELT (op, i),
1116 GET_MODE_INNER (opmode));
1119 RTVEC_ELT (v, i) = x;
1121 return gen_rtx_CONST_VECTOR (mode, v);
1124 /* The order of these tests is critical so that, for example, we don't
1125 check the wrong mode (input vs. output) for a conversion operation,
1126 such as FIX. At some point, this should be simplified. */
1128 if (code == FLOAT && GET_MODE (op) == VOIDmode
1129 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1131 HOST_WIDE_INT hv, lv;
1134 if (CONST_INT_P (op))
1135 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1137 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1139 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1140 d = real_value_truncate (mode, d);
1141 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1143 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1144 && (GET_CODE (op) == CONST_DOUBLE
1145 || CONST_INT_P (op)))
1147 HOST_WIDE_INT hv, lv;
1150 if (CONST_INT_P (op))
1151 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1153 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1155 if (op_mode == VOIDmode)
1157 /* We don't know how to interpret negative-looking numbers in
1158 this case, so don't try to fold those. */
1162 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1165 hv = 0, lv &= GET_MODE_MASK (op_mode);
1167 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1168 d = real_value_truncate (mode, d);
1169 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1172 if (CONST_INT_P (op)
1173 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1175 HOST_WIDE_INT arg0 = INTVAL (op);
1189 val = (arg0 >= 0 ? arg0 : - arg0);
1193 /* Don't use ffs here. Instead, get low order bit and then its
1194 number. If arg0 is zero, this will return 0, as desired. */
1195 arg0 &= GET_MODE_MASK (mode);
1196 val = exact_log2 (arg0 & (- arg0)) + 1;
1200 arg0 &= GET_MODE_MASK (mode);
1201 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1204 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1208 arg0 &= GET_MODE_MASK (mode);
1211 /* Even if the value at zero is undefined, we have to come
1212 up with some replacement. Seems good enough. */
1213 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1214 val = GET_MODE_BITSIZE (mode);
1217 val = exact_log2 (arg0 & -arg0);
1221 arg0 &= GET_MODE_MASK (mode);
1224 val++, arg0 &= arg0 - 1;
1228 arg0 &= GET_MODE_MASK (mode);
1231 val++, arg0 &= arg0 - 1;
1240 for (s = 0; s < width; s += 8)
1242 unsigned int d = width - s - 8;
1243 unsigned HOST_WIDE_INT byte;
1244 byte = (arg0 >> s) & 0xff;
1255 /* When zero-extending a CONST_INT, we need to know its
1257 gcc_assert (op_mode != VOIDmode);
1258 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1260 /* If we were really extending the mode,
1261 we would have to distinguish between zero-extension
1262 and sign-extension. */
1263 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1266 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1267 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1273 if (op_mode == VOIDmode)
1275 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1277 /* If we were really extending the mode,
1278 we would have to distinguish between zero-extension
1279 and sign-extension. */
1280 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1283 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1286 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1288 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1289 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1297 case FLOAT_TRUNCATE:
1309 return gen_int_mode (val, mode);
1312 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1313 for a DImode operation on a CONST_INT. */
1314 else if (GET_MODE (op) == VOIDmode
1315 && width <= HOST_BITS_PER_WIDE_INT * 2
1316 && (GET_CODE (op) == CONST_DOUBLE
1317 || CONST_INT_P (op)))
1319 unsigned HOST_WIDE_INT l1, lv;
1320 HOST_WIDE_INT h1, hv;
1322 if (GET_CODE (op) == CONST_DOUBLE)
1323 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1325 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1335 neg_double (l1, h1, &lv, &hv);
1340 neg_double (l1, h1, &lv, &hv);
1352 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1355 lv = exact_log2 (l1 & -l1) + 1;
1361 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1362 - HOST_BITS_PER_WIDE_INT;
1364 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1365 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1366 lv = GET_MODE_BITSIZE (mode);
1372 lv = exact_log2 (l1 & -l1);
1374 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1375 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1376 lv = GET_MODE_BITSIZE (mode);
1404 for (s = 0; s < width; s += 8)
1406 unsigned int d = width - s - 8;
1407 unsigned HOST_WIDE_INT byte;
1409 if (s < HOST_BITS_PER_WIDE_INT)
1410 byte = (l1 >> s) & 0xff;
1412 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1414 if (d < HOST_BITS_PER_WIDE_INT)
1417 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1423 /* This is just a change-of-mode, so do nothing. */
1428 gcc_assert (op_mode != VOIDmode);
1430 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1434 lv = l1 & GET_MODE_MASK (op_mode);
1438 if (op_mode == VOIDmode
1439 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1443 lv = l1 & GET_MODE_MASK (op_mode);
1444 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1445 && (lv & ((HOST_WIDE_INT) 1
1446 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1447 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1449 hv = HWI_SIGN_EXTEND (lv);
1460 return immed_double_const (lv, hv, mode);
1463 else if (GET_CODE (op) == CONST_DOUBLE
1464 && SCALAR_FLOAT_MODE_P (mode))
1466 REAL_VALUE_TYPE d, t;
1467 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1472 if (HONOR_SNANS (mode) && real_isnan (&d))
1474 real_sqrt (&t, mode, &d);
1478 d = REAL_VALUE_ABS (d);
1481 d = REAL_VALUE_NEGATE (d);
1483 case FLOAT_TRUNCATE:
1484 d = real_value_truncate (mode, d);
1487 /* All this does is change the mode. */
1490 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1497 real_to_target (tmp, &d, GET_MODE (op));
1498 for (i = 0; i < 4; i++)
1500 real_from_target (&d, tmp, mode);
1506 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1509 else if (GET_CODE (op) == CONST_DOUBLE
1510 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1511 && GET_MODE_CLASS (mode) == MODE_INT
1512 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1514 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1515 operators are intentionally left unspecified (to ease implementation
1516 by target backends), for consistency, this routine implements the
1517 same semantics for constant folding as used by the middle-end. */
1519 /* This was formerly used only for non-IEEE float.
1520 eggert@twinsun.com says it is safe for IEEE also. */
1521 HOST_WIDE_INT xh, xl, th, tl;
1522 REAL_VALUE_TYPE x, t;
1523 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1527 if (REAL_VALUE_ISNAN (x))
1530 /* Test against the signed upper bound. */
1531 if (width > HOST_BITS_PER_WIDE_INT)
1533 th = ((unsigned HOST_WIDE_INT) 1
1534 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1540 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1542 real_from_integer (&t, VOIDmode, tl, th, 0);
1543 if (REAL_VALUES_LESS (t, x))
1550 /* Test against the signed lower bound. */
1551 if (width > HOST_BITS_PER_WIDE_INT)
1553 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1559 tl = (HOST_WIDE_INT) -1 << (width - 1);
1561 real_from_integer (&t, VOIDmode, tl, th, 0);
1562 if (REAL_VALUES_LESS (x, t))
1568 REAL_VALUE_TO_INT (&xl, &xh, x);
1572 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1575 /* Test against the unsigned upper bound. */
1576 if (width == 2*HOST_BITS_PER_WIDE_INT)
1581 else if (width >= HOST_BITS_PER_WIDE_INT)
1583 th = ((unsigned HOST_WIDE_INT) 1
1584 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1590 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1592 real_from_integer (&t, VOIDmode, tl, th, 1);
1593 if (REAL_VALUES_LESS (t, x))
1600 REAL_VALUE_TO_INT (&xl, &xh, x);
1606 return immed_double_const (xl, xh, mode);
1612 /* Subroutine of simplify_binary_operation to simplify a commutative,
1613 associative binary operation CODE with result mode MODE, operating
1614 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1615 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1616 canonicalization is possible. */
1619 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1624 /* Linearize the operator to the left. */
1625 if (GET_CODE (op1) == code)
1627 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1628 if (GET_CODE (op0) == code)
1630 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1631 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1634 /* "a op (b op c)" becomes "(b op c) op a". */
1635 if (! swap_commutative_operands_p (op1, op0))
1636 return simplify_gen_binary (code, mode, op1, op0);
1643 if (GET_CODE (op0) == code)
1645 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1646 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1648 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1649 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1652 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1653 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1655 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1657 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1658 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1660 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1667 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1668 and OP1. Return 0 if no simplification is possible.
1670 Don't use this for relational operations such as EQ or LT.
1671 Use simplify_relational_operation instead. */
1673 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1676 rtx trueop0, trueop1;
1679 /* Relational operations don't work here. We must know the mode
1680 of the operands in order to do the comparison correctly.
1681 Assuming a full word can give incorrect results.
1682 Consider comparing 128 with -128 in QImode. */
1683 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1684 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1686 /* Make sure the constant is second. */
1687 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1688 && swap_commutative_operands_p (op0, op1))
1690 tem = op0, op0 = op1, op1 = tem;
1693 trueop0 = avoid_constant_pool_reference (op0);
1694 trueop1 = avoid_constant_pool_reference (op1);
1696 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1699 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1702 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1703 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1704 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1705 actual constants. */
1708 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1709 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1711 rtx tem, reversed, opleft, opright;
1713 unsigned int width = GET_MODE_BITSIZE (mode);
1715 /* Even if we can't compute a constant result,
1716 there are some cases worth simplifying. */
1721 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1722 when x is NaN, infinite, or finite and nonzero. They aren't
1723 when x is -0 and the rounding mode is not towards -infinity,
1724 since (-0) + 0 is then 0. */
1725 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1728 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1729 transformations are safe even for IEEE. */
1730 if (GET_CODE (op0) == NEG)
1731 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1732 else if (GET_CODE (op1) == NEG)
1733 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1735 /* (~a) + 1 -> -a */
1736 if (INTEGRAL_MODE_P (mode)
1737 && GET_CODE (op0) == NOT
1738 && trueop1 == const1_rtx)
1739 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1741 /* Handle both-operands-constant cases. We can only add
1742 CONST_INTs to constants since the sum of relocatable symbols
1743 can't be handled by most assemblers. Don't add CONST_INT
1744 to CONST_INT since overflow won't be computed properly if wider
1745 than HOST_BITS_PER_WIDE_INT. */
1747 if ((GET_CODE (op0) == CONST
1748 || GET_CODE (op0) == SYMBOL_REF
1749 || GET_CODE (op0) == LABEL_REF)
1750 && CONST_INT_P (op1))
1751 return plus_constant (op0, INTVAL (op1));
1752 else if ((GET_CODE (op1) == CONST
1753 || GET_CODE (op1) == SYMBOL_REF
1754 || GET_CODE (op1) == LABEL_REF)
1755 && CONST_INT_P (op0))
1756 return plus_constant (op1, INTVAL (op0));
1758 /* See if this is something like X * C - X or vice versa or
1759 if the multiplication is written as a shift. If so, we can
1760 distribute and make a new multiply, shift, or maybe just
1761 have X (if C is 2 in the example above). But don't make
1762 something more expensive than we had before. */
1764 if (SCALAR_INT_MODE_P (mode))
1766 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1767 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1768 rtx lhs = op0, rhs = op1;
1770 if (GET_CODE (lhs) == NEG)
1774 lhs = XEXP (lhs, 0);
1776 else if (GET_CODE (lhs) == MULT
1777 && CONST_INT_P (XEXP (lhs, 1)))
1779 coeff0l = INTVAL (XEXP (lhs, 1));
1780 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1781 lhs = XEXP (lhs, 0);
1783 else if (GET_CODE (lhs) == ASHIFT
1784 && CONST_INT_P (XEXP (lhs, 1))
1785 && INTVAL (XEXP (lhs, 1)) >= 0
1786 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1788 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1790 lhs = XEXP (lhs, 0);
1793 if (GET_CODE (rhs) == NEG)
1797 rhs = XEXP (rhs, 0);
1799 else if (GET_CODE (rhs) == MULT
1800 && CONST_INT_P (XEXP (rhs, 1)))
1802 coeff1l = INTVAL (XEXP (rhs, 1));
1803 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1804 rhs = XEXP (rhs, 0);
1806 else if (GET_CODE (rhs) == ASHIFT
1807 && CONST_INT_P (XEXP (rhs, 1))
1808 && INTVAL (XEXP (rhs, 1)) >= 0
1809 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1811 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1813 rhs = XEXP (rhs, 0);
1816 if (rtx_equal_p (lhs, rhs))
1818 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1820 unsigned HOST_WIDE_INT l;
1822 bool speed = optimize_function_for_speed_p (cfun);
1824 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1825 coeff = immed_double_const (l, h, mode);
1827 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1828 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1833 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1834 if ((CONST_INT_P (op1)
1835 || GET_CODE (op1) == CONST_DOUBLE)
1836 && GET_CODE (op0) == XOR
1837 && (CONST_INT_P (XEXP (op0, 1))
1838 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1839 && mode_signbit_p (mode, op1))
1840 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1841 simplify_gen_binary (XOR, mode, op1,
1844 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1845 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1846 && GET_CODE (op0) == MULT
1847 && GET_CODE (XEXP (op0, 0)) == NEG)
1851 in1 = XEXP (XEXP (op0, 0), 0);
1852 in2 = XEXP (op0, 1);
1853 return simplify_gen_binary (MINUS, mode, op1,
1854 simplify_gen_binary (MULT, mode,
1858 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1859 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1861 if (COMPARISON_P (op0)
1862 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1863 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1864 && (reversed = reversed_comparison (op0, mode)))
1866 simplify_gen_unary (NEG, mode, reversed, mode);
1868 /* If one of the operands is a PLUS or a MINUS, see if we can
1869 simplify this by the associative law.
1870 Don't use the associative law for floating point.
1871 The inaccuracy makes it nonassociative,
1872 and subtle programs can break if operations are associated. */
1874 if (INTEGRAL_MODE_P (mode)
1875 && (plus_minus_operand_p (op0)
1876 || plus_minus_operand_p (op1))
1877 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1880 /* Reassociate floating point addition only when the user
1881 specifies associative math operations. */
1882 if (FLOAT_MODE_P (mode)
1883 && flag_associative_math)
1885 tem = simplify_associative_operation (code, mode, op0, op1);
1892 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1893 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1894 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1895 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1897 rtx xop00 = XEXP (op0, 0);
1898 rtx xop10 = XEXP (op1, 0);
1901 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1903 if (REG_P (xop00) && REG_P (xop10)
1904 && GET_MODE (xop00) == GET_MODE (xop10)
1905 && REGNO (xop00) == REGNO (xop10)
1906 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1907 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1914 /* We can't assume x-x is 0 even with non-IEEE floating point,
1915 but since it is zero except in very strange circumstances, we
1916 will treat it as zero with -ffinite-math-only. */
1917 if (rtx_equal_p (trueop0, trueop1)
1918 && ! side_effects_p (op0)
1919 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1920 return CONST0_RTX (mode);
1922 /* Change subtraction from zero into negation. (0 - x) is the
1923 same as -x when x is NaN, infinite, or finite and nonzero.
1924 But if the mode has signed zeros, and does not round towards
1925 -infinity, then 0 - 0 is 0, not -0. */
1926 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1927 return simplify_gen_unary (NEG, mode, op1, mode);
1929 /* (-1 - a) is ~a. */
1930 if (trueop0 == constm1_rtx)
1931 return simplify_gen_unary (NOT, mode, op1, mode);
1933 /* Subtracting 0 has no effect unless the mode has signed zeros
1934 and supports rounding towards -infinity. In such a case,
1936 if (!(HONOR_SIGNED_ZEROS (mode)
1937 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1938 && trueop1 == CONST0_RTX (mode))
1941 /* See if this is something like X * C - X or vice versa or
1942 if the multiplication is written as a shift. If so, we can
1943 distribute and make a new multiply, shift, or maybe just
1944 have X (if C is 2 in the example above). But don't make
1945 something more expensive than we had before. */
1947 if (SCALAR_INT_MODE_P (mode))
1949 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1950 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1951 rtx lhs = op0, rhs = op1;
1953 if (GET_CODE (lhs) == NEG)
1957 lhs = XEXP (lhs, 0);
1959 else if (GET_CODE (lhs) == MULT
1960 && CONST_INT_P (XEXP (lhs, 1)))
1962 coeff0l = INTVAL (XEXP (lhs, 1));
1963 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1964 lhs = XEXP (lhs, 0);
1966 else if (GET_CODE (lhs) == ASHIFT
1967 && CONST_INT_P (XEXP (lhs, 1))
1968 && INTVAL (XEXP (lhs, 1)) >= 0
1969 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1971 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1973 lhs = XEXP (lhs, 0);
1976 if (GET_CODE (rhs) == NEG)
1980 rhs = XEXP (rhs, 0);
1982 else if (GET_CODE (rhs) == MULT
1983 && CONST_INT_P (XEXP (rhs, 1)))
1985 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1986 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1987 rhs = XEXP (rhs, 0);
1989 else if (GET_CODE (rhs) == ASHIFT
1990 && CONST_INT_P (XEXP (rhs, 1))
1991 && INTVAL (XEXP (rhs, 1)) >= 0
1992 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1994 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1996 rhs = XEXP (rhs, 0);
1999 if (rtx_equal_p (lhs, rhs))
2001 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2003 unsigned HOST_WIDE_INT l;
2005 bool speed = optimize_function_for_speed_p (cfun);
2007 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
2008 coeff = immed_double_const (l, h, mode);
2010 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2011 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2016 /* (a - (-b)) -> (a + b). True even for IEEE. */
2017 if (GET_CODE (op1) == NEG)
2018 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2020 /* (-x - c) may be simplified as (-c - x). */
2021 if (GET_CODE (op0) == NEG
2022 && (CONST_INT_P (op1)
2023 || GET_CODE (op1) == CONST_DOUBLE))
2025 tem = simplify_unary_operation (NEG, mode, op1, mode);
2027 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2030 /* Don't let a relocatable value get a negative coeff. */
2031 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2032 return simplify_gen_binary (PLUS, mode,
2034 neg_const_int (mode, op1));
2036 /* (x - (x & y)) -> (x & ~y) */
2037 if (GET_CODE (op1) == AND)
2039 if (rtx_equal_p (op0, XEXP (op1, 0)))
2041 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2042 GET_MODE (XEXP (op1, 1)));
2043 return simplify_gen_binary (AND, mode, op0, tem);
2045 if (rtx_equal_p (op0, XEXP (op1, 1)))
2047 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2048 GET_MODE (XEXP (op1, 0)));
2049 return simplify_gen_binary (AND, mode, op0, tem);
2053 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2054 by reversing the comparison code if valid. */
2055 if (STORE_FLAG_VALUE == 1
2056 && trueop0 == const1_rtx
2057 && COMPARISON_P (op1)
2058 && (reversed = reversed_comparison (op1, mode)))
2061 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2062 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2063 && GET_CODE (op1) == MULT
2064 && GET_CODE (XEXP (op1, 0)) == NEG)
2068 in1 = XEXP (XEXP (op1, 0), 0);
2069 in2 = XEXP (op1, 1);
2070 return simplify_gen_binary (PLUS, mode,
2071 simplify_gen_binary (MULT, mode,
2076 /* Canonicalize (minus (neg A) (mult B C)) to
2077 (minus (mult (neg B) C) A). */
2078 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2079 && GET_CODE (op1) == MULT
2080 && GET_CODE (op0) == NEG)
2084 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2085 in2 = XEXP (op1, 1);
2086 return simplify_gen_binary (MINUS, mode,
2087 simplify_gen_binary (MULT, mode,
2092 /* If one of the operands is a PLUS or a MINUS, see if we can
2093 simplify this by the associative law. This will, for example,
2094 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2095 Don't use the associative law for floating point.
2096 The inaccuracy makes it nonassociative,
2097 and subtle programs can break if operations are associated. */
2099 if (INTEGRAL_MODE_P (mode)
2100 && (plus_minus_operand_p (op0)
2101 || plus_minus_operand_p (op1))
2102 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2107 if (trueop1 == constm1_rtx)
2108 return simplify_gen_unary (NEG, mode, op0, mode);
2110 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2111 x is NaN, since x * 0 is then also NaN. Nor is it valid
2112 when the mode has signed zeros, since multiplying a negative
2113 number by 0 will give -0, not 0. */
2114 if (!HONOR_NANS (mode)
2115 && !HONOR_SIGNED_ZEROS (mode)
2116 && trueop1 == CONST0_RTX (mode)
2117 && ! side_effects_p (op0))
2120 /* In IEEE floating point, x*1 is not equivalent to x for
2122 if (!HONOR_SNANS (mode)
2123 && trueop1 == CONST1_RTX (mode))
2126 /* Convert multiply by constant power of two into shift unless
2127 we are still generating RTL. This test is a kludge. */
2128 if (CONST_INT_P (trueop1)
2129 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2130 /* If the mode is larger than the host word size, and the
2131 uppermost bit is set, then this isn't a power of two due
2132 to implicit sign extension. */
2133 && (width <= HOST_BITS_PER_WIDE_INT
2134 || val != HOST_BITS_PER_WIDE_INT - 1))
2135 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2137 /* Likewise for multipliers wider than a word. */
2138 if (GET_CODE (trueop1) == CONST_DOUBLE
2139 && (GET_MODE (trueop1) == VOIDmode
2140 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2141 && GET_MODE (op0) == mode
2142 && CONST_DOUBLE_LOW (trueop1) == 0
2143 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2144 return simplify_gen_binary (ASHIFT, mode, op0,
2145 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2147 /* x*2 is x+x and x*(-1) is -x */
2148 if (GET_CODE (trueop1) == CONST_DOUBLE
2149 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2150 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2151 && GET_MODE (op0) == mode)
2154 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2156 if (REAL_VALUES_EQUAL (d, dconst2))
2157 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2159 if (!HONOR_SNANS (mode)
2160 && REAL_VALUES_EQUAL (d, dconstm1))
2161 return simplify_gen_unary (NEG, mode, op0, mode);
2164 /* Optimize -x * -x as x * x. */
2165 if (FLOAT_MODE_P (mode)
2166 && GET_CODE (op0) == NEG
2167 && GET_CODE (op1) == NEG
2168 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2169 && !side_effects_p (XEXP (op0, 0)))
2170 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2172 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2173 if (SCALAR_FLOAT_MODE_P (mode)
2174 && GET_CODE (op0) == ABS
2175 && GET_CODE (op1) == ABS
2176 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2177 && !side_effects_p (XEXP (op0, 0)))
2178 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2180 /* Reassociate multiplication, but for floating point MULTs
2181 only when the user specifies unsafe math optimizations. */
2182 if (! FLOAT_MODE_P (mode)
2183 || flag_unsafe_math_optimizations)
2185 tem = simplify_associative_operation (code, mode, op0, op1);
2192 if (trueop1 == const0_rtx)
2194 if (CONST_INT_P (trueop1)
2195 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2196 == GET_MODE_MASK (mode)))
2198 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2200 /* A | (~A) -> -1 */
2201 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2202 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2203 && ! side_effects_p (op0)
2204 && SCALAR_INT_MODE_P (mode))
2207 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2208 if (CONST_INT_P (op1)
2209 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2210 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2213 /* Canonicalize (X & C1) | C2. */
2214 if (GET_CODE (op0) == AND
2215 && CONST_INT_P (trueop1)
2216 && CONST_INT_P (XEXP (op0, 1)))
2218 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2219 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2220 HOST_WIDE_INT c2 = INTVAL (trueop1);
2222 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2224 && !side_effects_p (XEXP (op0, 0)))
2227 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2228 if (((c1|c2) & mask) == mask)
2229 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2231 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2232 if (((c1 & ~c2) & mask) != (c1 & mask))
2234 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2235 gen_int_mode (c1 & ~c2, mode));
2236 return simplify_gen_binary (IOR, mode, tem, op1);
2240 /* Convert (A & B) | A to A. */
2241 if (GET_CODE (op0) == AND
2242 && (rtx_equal_p (XEXP (op0, 0), op1)
2243 || rtx_equal_p (XEXP (op0, 1), op1))
2244 && ! side_effects_p (XEXP (op0, 0))
2245 && ! side_effects_p (XEXP (op0, 1)))
2248 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2249 mode size to (rotate A CX). */
2251 if (GET_CODE (op1) == ASHIFT
2252 || GET_CODE (op1) == SUBREG)
2263 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2264 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2265 && CONST_INT_P (XEXP (opleft, 1))
2266 && CONST_INT_P (XEXP (opright, 1))
2267 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2268 == GET_MODE_BITSIZE (mode)))
2269 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2271 /* Same, but for ashift that has been "simplified" to a wider mode
2272 by simplify_shift_const. */
2274 if (GET_CODE (opleft) == SUBREG
2275 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2276 && GET_CODE (opright) == LSHIFTRT
2277 && GET_CODE (XEXP (opright, 0)) == SUBREG
2278 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2279 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2280 && (GET_MODE_SIZE (GET_MODE (opleft))
2281 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2282 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2283 SUBREG_REG (XEXP (opright, 0)))
2284 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2285 && CONST_INT_P (XEXP (opright, 1))
2286 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2287 == GET_MODE_BITSIZE (mode)))
2288 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2289 XEXP (SUBREG_REG (opleft), 1));
2291 /* If we have (ior (and (X C1) C2)), simplify this by making
2292 C1 as small as possible if C1 actually changes. */
2293 if (CONST_INT_P (op1)
2294 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2295 || INTVAL (op1) > 0)
2296 && GET_CODE (op0) == AND
2297 && CONST_INT_P (XEXP (op0, 1))
2298 && CONST_INT_P (op1)
2299 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2300 return simplify_gen_binary (IOR, mode,
2302 (AND, mode, XEXP (op0, 0),
2303 GEN_INT (INTVAL (XEXP (op0, 1))
2307 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2308 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2309 the PLUS does not affect any of the bits in OP1: then we can do
2310 the IOR as a PLUS and we can associate. This is valid if OP1
2311 can be safely shifted left C bits. */
2312 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2313 && GET_CODE (XEXP (op0, 0)) == PLUS
2314 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2315 && CONST_INT_P (XEXP (op0, 1))
2316 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2318 int count = INTVAL (XEXP (op0, 1));
2319 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2321 if (mask >> count == INTVAL (trueop1)
2322 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2323 return simplify_gen_binary (ASHIFTRT, mode,
2324 plus_constant (XEXP (op0, 0), mask),
2328 tem = simplify_associative_operation (code, mode, op0, op1);
2334 if (trueop1 == const0_rtx)
2336 if (CONST_INT_P (trueop1)
2337 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2338 == GET_MODE_MASK (mode)))
2339 return simplify_gen_unary (NOT, mode, op0, mode);
2340 if (rtx_equal_p (trueop0, trueop1)
2341 && ! side_effects_p (op0)
2342 && GET_MODE_CLASS (mode) != MODE_CC)
2343 return CONST0_RTX (mode);
2345 /* Canonicalize XOR of the most significant bit to PLUS. */
2346 if ((CONST_INT_P (op1)
2347 || GET_CODE (op1) == CONST_DOUBLE)
2348 && mode_signbit_p (mode, op1))
2349 return simplify_gen_binary (PLUS, mode, op0, op1);
2350 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2351 if ((CONST_INT_P (op1)
2352 || GET_CODE (op1) == CONST_DOUBLE)
2353 && GET_CODE (op0) == PLUS
2354 && (CONST_INT_P (XEXP (op0, 1))
2355 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2356 && mode_signbit_p (mode, XEXP (op0, 1)))
2357 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2358 simplify_gen_binary (XOR, mode, op1,
2361 /* If we are XORing two things that have no bits in common,
2362 convert them into an IOR. This helps to detect rotation encoded
2363 using those methods and possibly other simplifications. */
2365 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2366 && (nonzero_bits (op0, mode)
2367 & nonzero_bits (op1, mode)) == 0)
2368 return (simplify_gen_binary (IOR, mode, op0, op1));
2370 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2371 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2374 int num_negated = 0;
2376 if (GET_CODE (op0) == NOT)
2377 num_negated++, op0 = XEXP (op0, 0);
2378 if (GET_CODE (op1) == NOT)
2379 num_negated++, op1 = XEXP (op1, 0);
2381 if (num_negated == 2)
2382 return simplify_gen_binary (XOR, mode, op0, op1);
2383 else if (num_negated == 1)
2384 return simplify_gen_unary (NOT, mode,
2385 simplify_gen_binary (XOR, mode, op0, op1),
2389 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2390 correspond to a machine insn or result in further simplifications
2391 if B is a constant. */
2393 if (GET_CODE (op0) == AND
2394 && rtx_equal_p (XEXP (op0, 1), op1)
2395 && ! side_effects_p (op1))
2396 return simplify_gen_binary (AND, mode,
2397 simplify_gen_unary (NOT, mode,
2398 XEXP (op0, 0), mode),
2401 else if (GET_CODE (op0) == AND
2402 && rtx_equal_p (XEXP (op0, 0), op1)
2403 && ! side_effects_p (op1))
2404 return simplify_gen_binary (AND, mode,
2405 simplify_gen_unary (NOT, mode,
2406 XEXP (op0, 1), mode),
2409 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2410 comparison if STORE_FLAG_VALUE is 1. */
2411 if (STORE_FLAG_VALUE == 1
2412 && trueop1 == const1_rtx
2413 && COMPARISON_P (op0)
2414 && (reversed = reversed_comparison (op0, mode)))
2417 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2418 is (lt foo (const_int 0)), so we can perform the above
2419 simplification if STORE_FLAG_VALUE is 1. */
2421 if (STORE_FLAG_VALUE == 1
2422 && trueop1 == const1_rtx
2423 && GET_CODE (op0) == LSHIFTRT
2424 && CONST_INT_P (XEXP (op0, 1))
2425 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2426 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2428 /* (xor (comparison foo bar) (const_int sign-bit))
2429 when STORE_FLAG_VALUE is the sign bit. */
2430 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2431 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2432 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2433 && trueop1 == const_true_rtx
2434 && COMPARISON_P (op0)
2435 && (reversed = reversed_comparison (op0, mode)))
2438 tem = simplify_associative_operation (code, mode, op0, op1);
2444 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2446 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2448 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2449 HOST_WIDE_INT nzop1;
2450 if (CONST_INT_P (trueop1))
2452 HOST_WIDE_INT val1 = INTVAL (trueop1);
2453 /* If we are turning off bits already known off in OP0, we need
2455 if ((nzop0 & ~val1) == 0)
2458 nzop1 = nonzero_bits (trueop1, mode);
2459 /* If we are clearing all the nonzero bits, the result is zero. */
2460 if ((nzop1 & nzop0) == 0
2461 && !side_effects_p (op0) && !side_effects_p (op1))
2462 return CONST0_RTX (mode);
2464 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2465 && GET_MODE_CLASS (mode) != MODE_CC)
2468 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2469 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2470 && ! side_effects_p (op0)
2471 && GET_MODE_CLASS (mode) != MODE_CC)
2472 return CONST0_RTX (mode);
2474 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2475 there are no nonzero bits of C outside of X's mode. */
2476 if ((GET_CODE (op0) == SIGN_EXTEND
2477 || GET_CODE (op0) == ZERO_EXTEND)
2478 && CONST_INT_P (trueop1)
2479 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2480 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2481 & INTVAL (trueop1)) == 0)
2483 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2484 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2485 gen_int_mode (INTVAL (trueop1),
2487 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2490 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2491 we might be able to further simplify the AND with X and potentially
2492 remove the truncation altogether. */
2493 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2495 rtx x = XEXP (op0, 0);
2496 enum machine_mode xmode = GET_MODE (x);
2497 tem = simplify_gen_binary (AND, xmode, x,
2498 gen_int_mode (INTVAL (trueop1), xmode));
2499 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2502 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2503 if (GET_CODE (op0) == IOR
2504 && CONST_INT_P (trueop1)
2505 && CONST_INT_P (XEXP (op0, 1)))
2507 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2508 return simplify_gen_binary (IOR, mode,
2509 simplify_gen_binary (AND, mode,
2510 XEXP (op0, 0), op1),
2511 gen_int_mode (tmp, mode));
2514 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2515 insn (and may simplify more). */
2516 if (GET_CODE (op0) == XOR
2517 && rtx_equal_p (XEXP (op0, 0), op1)
2518 && ! side_effects_p (op1))
2519 return simplify_gen_binary (AND, mode,
2520 simplify_gen_unary (NOT, mode,
2521 XEXP (op0, 1), mode),
2524 if (GET_CODE (op0) == XOR
2525 && rtx_equal_p (XEXP (op0, 1), op1)
2526 && ! side_effects_p (op1))
2527 return simplify_gen_binary (AND, mode,
2528 simplify_gen_unary (NOT, mode,
2529 XEXP (op0, 0), mode),
2532 /* Similarly for (~(A ^ B)) & A. */
2533 if (GET_CODE (op0) == NOT
2534 && GET_CODE (XEXP (op0, 0)) == XOR
2535 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2536 && ! side_effects_p (op1))
2537 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2539 if (GET_CODE (op0) == NOT
2540 && GET_CODE (XEXP (op0, 0)) == XOR
2541 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2542 && ! side_effects_p (op1))
2543 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2545 /* Convert (A | B) & A to A. */
2546 if (GET_CODE (op0) == IOR
2547 && (rtx_equal_p (XEXP (op0, 0), op1)
2548 || rtx_equal_p (XEXP (op0, 1), op1))
2549 && ! side_effects_p (XEXP (op0, 0))
2550 && ! side_effects_p (XEXP (op0, 1)))
2553 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2554 ((A & N) + B) & M -> (A + B) & M
2555 Similarly if (N & M) == 0,
2556 ((A | N) + B) & M -> (A + B) & M
2557 and for - instead of + and/or ^ instead of |.
2558 Also, if (N & M) == 0, then
2559 (A +- N) & M -> A & M. */
2560 if (CONST_INT_P (trueop1)
2561 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2562 && ~INTVAL (trueop1)
2563 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2564 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2569 pmop[0] = XEXP (op0, 0);
2570 pmop[1] = XEXP (op0, 1);
2572 if (CONST_INT_P (pmop[1])
2573 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2574 return simplify_gen_binary (AND, mode, pmop[0], op1);
2576 for (which = 0; which < 2; which++)
2579 switch (GET_CODE (tem))
2582 if (CONST_INT_P (XEXP (tem, 1))
2583 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2584 == INTVAL (trueop1))
2585 pmop[which] = XEXP (tem, 0);
2589 if (CONST_INT_P (XEXP (tem, 1))
2590 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2591 pmop[which] = XEXP (tem, 0);
2598 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2600 tem = simplify_gen_binary (GET_CODE (op0), mode,
2602 return simplify_gen_binary (code, mode, tem, op1);
2606 /* (and X (ior (not X) Y) -> (and X Y) */
2607 if (GET_CODE (op1) == IOR
2608 && GET_CODE (XEXP (op1, 0)) == NOT
2609 && op0 == XEXP (XEXP (op1, 0), 0))
2610 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2612 /* (and (ior (not X) Y) X) -> (and X Y) */
2613 if (GET_CODE (op0) == IOR
2614 && GET_CODE (XEXP (op0, 0)) == NOT
2615 && op1 == XEXP (XEXP (op0, 0), 0))
2616 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2618 tem = simplify_associative_operation (code, mode, op0, op1);
2624 /* 0/x is 0 (or x&0 if x has side-effects). */
2625 if (trueop0 == CONST0_RTX (mode))
2627 if (side_effects_p (op1))
2628 return simplify_gen_binary (AND, mode, op1, trueop0);
2632 if (trueop1 == CONST1_RTX (mode))
2633 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2634 /* Convert divide by power of two into shift. */
2635 if (CONST_INT_P (trueop1)
2636 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2637 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2641 /* Handle floating point and integers separately. */
2642 if (SCALAR_FLOAT_MODE_P (mode))
2644 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2645 safe for modes with NaNs, since 0.0 / 0.0 will then be
2646 NaN rather than 0.0. Nor is it safe for modes with signed
2647 zeros, since dividing 0 by a negative number gives -0.0 */
2648 if (trueop0 == CONST0_RTX (mode)
2649 && !HONOR_NANS (mode)
2650 && !HONOR_SIGNED_ZEROS (mode)
2651 && ! side_effects_p (op1))
2654 if (trueop1 == CONST1_RTX (mode)
2655 && !HONOR_SNANS (mode))
2658 if (GET_CODE (trueop1) == CONST_DOUBLE
2659 && trueop1 != CONST0_RTX (mode))
2662 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2665 if (REAL_VALUES_EQUAL (d, dconstm1)
2666 && !HONOR_SNANS (mode))
2667 return simplify_gen_unary (NEG, mode, op0, mode);
2669 /* Change FP division by a constant into multiplication.
2670 Only do this with -freciprocal-math. */
2671 if (flag_reciprocal_math
2672 && !REAL_VALUES_EQUAL (d, dconst0))
2674 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2675 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2676 return simplify_gen_binary (MULT, mode, op0, tem);
2682 /* 0/x is 0 (or x&0 if x has side-effects). */
2683 if (trueop0 == CONST0_RTX (mode))
2685 if (side_effects_p (op1))
2686 return simplify_gen_binary (AND, mode, op1, trueop0);
2690 if (trueop1 == CONST1_RTX (mode))
2691 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2693 if (trueop1 == constm1_rtx)
2695 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2696 return simplify_gen_unary (NEG, mode, x, mode);
2702 /* 0%x is 0 (or x&0 if x has side-effects). */
2703 if (trueop0 == CONST0_RTX (mode))
2705 if (side_effects_p (op1))
2706 return simplify_gen_binary (AND, mode, op1, trueop0);
2709 /* x%1 is 0 (of x&0 if x has side-effects). */
2710 if (trueop1 == CONST1_RTX (mode))
2712 if (side_effects_p (op0))
2713 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2714 return CONST0_RTX (mode);
2716 /* Implement modulus by power of two as AND. */
2717 if (CONST_INT_P (trueop1)
2718 && exact_log2 (INTVAL (trueop1)) > 0)
2719 return simplify_gen_binary (AND, mode, op0,
2720 GEN_INT (INTVAL (op1) - 1));
2724 /* 0%x is 0 (or x&0 if x has side-effects). */
2725 if (trueop0 == CONST0_RTX (mode))
2727 if (side_effects_p (op1))
2728 return simplify_gen_binary (AND, mode, op1, trueop0);
2731 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2732 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2734 if (side_effects_p (op0))
2735 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2736 return CONST0_RTX (mode);
2743 if (trueop1 == CONST0_RTX (mode))
2745 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2747 /* Rotating ~0 always results in ~0. */
2748 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2749 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2750 && ! side_effects_p (op1))
2753 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2755 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2756 if (val != INTVAL (op1))
2757 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2764 if (trueop1 == CONST0_RTX (mode))
2766 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2768 goto canonicalize_shift;
2771 if (trueop1 == CONST0_RTX (mode))
2773 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2775 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2776 if (GET_CODE (op0) == CLZ
2777 && CONST_INT_P (trueop1)
2778 && STORE_FLAG_VALUE == 1
2779 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2781 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2782 unsigned HOST_WIDE_INT zero_val = 0;
2784 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2785 && zero_val == GET_MODE_BITSIZE (imode)
2786 && INTVAL (trueop1) == exact_log2 (zero_val))
2787 return simplify_gen_relational (EQ, mode, imode,
2788 XEXP (op0, 0), const0_rtx);
2790 goto canonicalize_shift;
2793 if (width <= HOST_BITS_PER_WIDE_INT
2794 && CONST_INT_P (trueop1)
2795 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2796 && ! side_effects_p (op0))
2798 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2800 tem = simplify_associative_operation (code, mode, op0, op1);
2806 if (width <= HOST_BITS_PER_WIDE_INT
2807 && CONST_INT_P (trueop1)
2808 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2809 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2810 && ! side_effects_p (op0))
2812 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2814 tem = simplify_associative_operation (code, mode, op0, op1);
2820 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2822 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2824 tem = simplify_associative_operation (code, mode, op0, op1);
2830 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2832 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2834 tem = simplify_associative_operation (code, mode, op0, op1);
2847 /* ??? There are simplifications that can be done. */
2851 if (!VECTOR_MODE_P (mode))
2853 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2854 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2855 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2856 gcc_assert (XVECLEN (trueop1, 0) == 1);
2857 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2859 if (GET_CODE (trueop0) == CONST_VECTOR)
2860 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2863 /* Extract a scalar element from a nested VEC_SELECT expression
2864 (with optional nested VEC_CONCAT expression). Some targets
2865 (i386) extract scalar element from a vector using chain of
2866 nested VEC_SELECT expressions. When input operand is a memory
2867 operand, this operation can be simplified to a simple scalar
2868 load from an offseted memory address. */
2869 if (GET_CODE (trueop0) == VEC_SELECT)
2871 rtx op0 = XEXP (trueop0, 0);
2872 rtx op1 = XEXP (trueop0, 1);
2874 enum machine_mode opmode = GET_MODE (op0);
2875 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2876 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2878 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2884 gcc_assert (GET_CODE (op1) == PARALLEL);
2885 gcc_assert (i < n_elts);
2887 /* Select element, pointed by nested selector. */
2888 elem = INTVAL (XVECEXP (op1, 0, i));
2890 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2891 if (GET_CODE (op0) == VEC_CONCAT)
2893 rtx op00 = XEXP (op0, 0);
2894 rtx op01 = XEXP (op0, 1);
2896 enum machine_mode mode00, mode01;
2897 int n_elts00, n_elts01;
2899 mode00 = GET_MODE (op00);
2900 mode01 = GET_MODE (op01);
2902 /* Find out number of elements of each operand. */
2903 if (VECTOR_MODE_P (mode00))
2905 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2906 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2911 if (VECTOR_MODE_P (mode01))
2913 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2914 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2919 gcc_assert (n_elts == n_elts00 + n_elts01);
2921 /* Select correct operand of VEC_CONCAT
2922 and adjust selector. */
2923 if (elem < n_elts01)
2934 vec = rtvec_alloc (1);
2935 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2937 tmp = gen_rtx_fmt_ee (code, mode,
2938 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2944 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2945 gcc_assert (GET_MODE_INNER (mode)
2946 == GET_MODE_INNER (GET_MODE (trueop0)));
2947 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2949 if (GET_CODE (trueop0) == CONST_VECTOR)
2951 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2952 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2953 rtvec v = rtvec_alloc (n_elts);
2956 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2957 for (i = 0; i < n_elts; i++)
2959 rtx x = XVECEXP (trueop1, 0, i);
2961 gcc_assert (CONST_INT_P (x));
2962 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2966 return gen_rtx_CONST_VECTOR (mode, v);
2970 if (XVECLEN (trueop1, 0) == 1
2971 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2972 && GET_CODE (trueop0) == VEC_CONCAT)
2975 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2977 /* Try to find the element in the VEC_CONCAT. */
2978 while (GET_MODE (vec) != mode
2979 && GET_CODE (vec) == VEC_CONCAT)
2981 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2982 if (offset < vec_size)
2983 vec = XEXP (vec, 0);
2987 vec = XEXP (vec, 1);
2989 vec = avoid_constant_pool_reference (vec);
2992 if (GET_MODE (vec) == mode)
2999 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3000 ? GET_MODE (trueop0)
3001 : GET_MODE_INNER (mode));
3002 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3003 ? GET_MODE (trueop1)
3004 : GET_MODE_INNER (mode));
3006 gcc_assert (VECTOR_MODE_P (mode));
3007 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3008 == GET_MODE_SIZE (mode));
3010 if (VECTOR_MODE_P (op0_mode))
3011 gcc_assert (GET_MODE_INNER (mode)
3012 == GET_MODE_INNER (op0_mode));
3014 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3016 if (VECTOR_MODE_P (op1_mode))
3017 gcc_assert (GET_MODE_INNER (mode)
3018 == GET_MODE_INNER (op1_mode));
3020 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3022 if ((GET_CODE (trueop0) == CONST_VECTOR
3023 || CONST_INT_P (trueop0)
3024 || GET_CODE (trueop0) == CONST_DOUBLE)
3025 && (GET_CODE (trueop1) == CONST_VECTOR
3026 || CONST_INT_P (trueop1)
3027 || GET_CODE (trueop1) == CONST_DOUBLE))
3029 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3030 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3031 rtvec v = rtvec_alloc (n_elts);
3033 unsigned in_n_elts = 1;
3035 if (VECTOR_MODE_P (op0_mode))
3036 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3037 for (i = 0; i < n_elts; i++)
3041 if (!VECTOR_MODE_P (op0_mode))
3042 RTVEC_ELT (v, i) = trueop0;
3044 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3048 if (!VECTOR_MODE_P (op1_mode))
3049 RTVEC_ELT (v, i) = trueop1;
3051 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3056 return gen_rtx_CONST_VECTOR (mode, v);
3069 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3072 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3074 unsigned int width = GET_MODE_BITSIZE (mode);
3076 if (VECTOR_MODE_P (mode)
3077 && code != VEC_CONCAT
3078 && GET_CODE (op0) == CONST_VECTOR
3079 && GET_CODE (op1) == CONST_VECTOR)
3081 unsigned n_elts = GET_MODE_NUNITS (mode);
3082 enum machine_mode op0mode = GET_MODE (op0);
3083 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3084 enum machine_mode op1mode = GET_MODE (op1);
3085 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3086 rtvec v = rtvec_alloc (n_elts);
3089 gcc_assert (op0_n_elts == n_elts);
3090 gcc_assert (op1_n_elts == n_elts);
3091 for (i = 0; i < n_elts; i++)
3093 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3094 CONST_VECTOR_ELT (op0, i),
3095 CONST_VECTOR_ELT (op1, i));
3098 RTVEC_ELT (v, i) = x;
3101 return gen_rtx_CONST_VECTOR (mode, v);
3104 if (VECTOR_MODE_P (mode)
3105 && code == VEC_CONCAT
3106 && (CONST_INT_P (op0)
3107 || GET_CODE (op0) == CONST_DOUBLE
3108 || GET_CODE (op0) == CONST_FIXED)
3109 && (CONST_INT_P (op1)
3110 || GET_CODE (op1) == CONST_DOUBLE
3111 || GET_CODE (op1) == CONST_FIXED))
3113 unsigned n_elts = GET_MODE_NUNITS (mode);
3114 rtvec v = rtvec_alloc (n_elts);
3116 gcc_assert (n_elts >= 2);
3119 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3120 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3122 RTVEC_ELT (v, 0) = op0;
3123 RTVEC_ELT (v, 1) = op1;
3127 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3128 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3131 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3132 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3133 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3135 for (i = 0; i < op0_n_elts; ++i)
3136 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3137 for (i = 0; i < op1_n_elts; ++i)
3138 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3141 return gen_rtx_CONST_VECTOR (mode, v);
3144 if (SCALAR_FLOAT_MODE_P (mode)
3145 && GET_CODE (op0) == CONST_DOUBLE
3146 && GET_CODE (op1) == CONST_DOUBLE
3147 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3158 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3160 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3162 for (i = 0; i < 4; i++)
3179 real_from_target (&r, tmp0, mode);
3180 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3184 REAL_VALUE_TYPE f0, f1, value, result;
3187 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3188 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3189 real_convert (&f0, mode, &f0);
3190 real_convert (&f1, mode, &f1);
3192 if (HONOR_SNANS (mode)
3193 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3197 && REAL_VALUES_EQUAL (f1, dconst0)
3198 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3201 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3202 && flag_trapping_math
3203 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3205 int s0 = REAL_VALUE_NEGATIVE (f0);
3206 int s1 = REAL_VALUE_NEGATIVE (f1);
3211 /* Inf + -Inf = NaN plus exception. */
3216 /* Inf - Inf = NaN plus exception. */
3221 /* Inf / Inf = NaN plus exception. */
3228 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3229 && flag_trapping_math
3230 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3231 || (REAL_VALUE_ISINF (f1)
3232 && REAL_VALUES_EQUAL (f0, dconst0))))
3233 /* Inf * 0 = NaN plus exception. */
3236 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3238 real_convert (&result, mode, &value);
3240 /* Don't constant fold this floating point operation if
3241 the result has overflowed and flag_trapping_math. */
3243 if (flag_trapping_math
3244 && MODE_HAS_INFINITIES (mode)
3245 && REAL_VALUE_ISINF (result)
3246 && !REAL_VALUE_ISINF (f0)
3247 && !REAL_VALUE_ISINF (f1))
3248 /* Overflow plus exception. */
3251 /* Don't constant fold this floating point operation if the
3252 result may dependent upon the run-time rounding mode and
3253 flag_rounding_math is set, or if GCC's software emulation
3254 is unable to accurately represent the result. */
3256 if ((flag_rounding_math
3257 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3258 && (inexact || !real_identical (&result, &value)))
3261 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3265 /* We can fold some multi-word operations. */
3266 if (GET_MODE_CLASS (mode) == MODE_INT
3267 && width == HOST_BITS_PER_WIDE_INT * 2
3268 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3269 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3271 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3272 HOST_WIDE_INT h1, h2, hv, ht;
3274 if (GET_CODE (op0) == CONST_DOUBLE)
3275 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3277 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3279 if (GET_CODE (op1) == CONST_DOUBLE)
3280 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3282 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3287 /* A - B == A + (-B). */
3288 neg_double (l2, h2, &lv, &hv);
3291 /* Fall through.... */
3294 add_double (l1, h1, l2, h2, &lv, &hv);
3298 mul_double (l1, h1, l2, h2, &lv, &hv);
3302 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3303 &lv, &hv, <, &ht))
3308 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3309 <, &ht, &lv, &hv))
3314 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3315 &lv, &hv, <, &ht))
3320 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3321 <, &ht, &lv, &hv))
3326 lv = l1 & l2, hv = h1 & h2;
3330 lv = l1 | l2, hv = h1 | h2;
3334 lv = l1 ^ l2, hv = h1 ^ h2;
3340 && ((unsigned HOST_WIDE_INT) l1
3341 < (unsigned HOST_WIDE_INT) l2)))
3350 && ((unsigned HOST_WIDE_INT) l1
3351 > (unsigned HOST_WIDE_INT) l2)))
3358 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3360 && ((unsigned HOST_WIDE_INT) l1
3361 < (unsigned HOST_WIDE_INT) l2)))
3368 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3370 && ((unsigned HOST_WIDE_INT) l1
3371 > (unsigned HOST_WIDE_INT) l2)))
3377 case LSHIFTRT: case ASHIFTRT:
3379 case ROTATE: case ROTATERT:
3380 if (SHIFT_COUNT_TRUNCATED)
3381 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3383 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3386 if (code == LSHIFTRT || code == ASHIFTRT)
3387 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3389 else if (code == ASHIFT)
3390 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3391 else if (code == ROTATE)
3392 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3393 else /* code == ROTATERT */
3394 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3401 return immed_double_const (lv, hv, mode);
3404 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3405 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3407 /* Get the integer argument values in two forms:
3408 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3410 arg0 = INTVAL (op0);
3411 arg1 = INTVAL (op1);
3413 if (width < HOST_BITS_PER_WIDE_INT)
3415 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3416 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3419 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3420 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3423 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3424 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3432 /* Compute the value of the arithmetic. */
3437 val = arg0s + arg1s;
3441 val = arg0s - arg1s;
3445 val = arg0s * arg1s;
3450 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3453 val = arg0s / arg1s;
3458 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3461 val = arg0s % arg1s;
3466 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3469 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3474 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3477 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3495 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3496 the value is in range. We can't return any old value for
3497 out-of-range arguments because either the middle-end (via
3498 shift_truncation_mask) or the back-end might be relying on
3499 target-specific knowledge. Nor can we rely on
3500 shift_truncation_mask, since the shift might not be part of an
3501 ashlM3, lshrM3 or ashrM3 instruction. */
3502 if (SHIFT_COUNT_TRUNCATED)
3503 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3504 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3507 val = (code == ASHIFT
3508 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3509 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3511 /* Sign-extend the result for arithmetic right shifts. */
3512 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3513 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3521 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3522 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3530 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3531 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3535 /* Do nothing here. */
3539 val = arg0s <= arg1s ? arg0s : arg1s;
3543 val = ((unsigned HOST_WIDE_INT) arg0
3544 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3548 val = arg0s > arg1s ? arg0s : arg1s;
3552 val = ((unsigned HOST_WIDE_INT) arg0
3553 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3566 /* ??? There are simplifications that can be done. */
3573 return gen_int_mode (val, mode);
3581 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3584 Rather than test for specific case, we do this by a brute-force method
3585 and do all possible simplifications until no more changes occur. Then
3586 we rebuild the operation. */
3588 struct simplify_plus_minus_op_data
3595 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3599 result = (commutative_operand_precedence (y)
3600 - commutative_operand_precedence (x));
3604 /* Group together equal REGs to do more simplification. */
3605 if (REG_P (x) && REG_P (y))
3606 return REGNO (x) > REGNO (y);
3612 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3615 struct simplify_plus_minus_op_data ops[8];
3617 int n_ops = 2, input_ops = 2;
3618 int changed, n_constants = 0, canonicalized = 0;
3621 memset (ops, 0, sizeof ops);
3623 /* Set up the two operands and then expand them until nothing has been
3624 changed. If we run out of room in our array, give up; this should
3625 almost never happen. */
3630 ops[1].neg = (code == MINUS);
3636 for (i = 0; i < n_ops; i++)
3638 rtx this_op = ops[i].op;
3639 int this_neg = ops[i].neg;
3640 enum rtx_code this_code = GET_CODE (this_op);
3649 ops[n_ops].op = XEXP (this_op, 1);
3650 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3653 ops[i].op = XEXP (this_op, 0);
3656 canonicalized |= this_neg;
3660 ops[i].op = XEXP (this_op, 0);
3661 ops[i].neg = ! this_neg;
3668 && GET_CODE (XEXP (this_op, 0)) == PLUS
3669 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3670 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3672 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3673 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3674 ops[n_ops].neg = this_neg;
3682 /* ~a -> (-a - 1) */
3685 ops[n_ops].op = constm1_rtx;
3686 ops[n_ops++].neg = this_neg;
3687 ops[i].op = XEXP (this_op, 0);
3688 ops[i].neg = !this_neg;
3698 ops[i].op = neg_const_int (mode, this_op);
3712 if (n_constants > 1)
3715 gcc_assert (n_ops >= 2);
3717 /* If we only have two operands, we can avoid the loops. */
3720 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3723 /* Get the two operands. Be careful with the order, especially for
3724 the cases where code == MINUS. */
3725 if (ops[0].neg && ops[1].neg)
3727 lhs = gen_rtx_NEG (mode, ops[0].op);
3730 else if (ops[0].neg)
3741 return simplify_const_binary_operation (code, mode, lhs, rhs);
3744 /* Now simplify each pair of operands until nothing changes. */
3747 /* Insertion sort is good enough for an eight-element array. */
3748 for (i = 1; i < n_ops; i++)
3750 struct simplify_plus_minus_op_data save;
3752 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3758 ops[j + 1] = ops[j];
3759 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3764 for (i = n_ops - 1; i > 0; i--)
3765 for (j = i - 1; j >= 0; j--)
3767 rtx lhs = ops[j].op, rhs = ops[i].op;
3768 int lneg = ops[j].neg, rneg = ops[i].neg;
3770 if (lhs != 0 && rhs != 0)
3772 enum rtx_code ncode = PLUS;
3778 tem = lhs, lhs = rhs, rhs = tem;
3780 else if (swap_commutative_operands_p (lhs, rhs))
3781 tem = lhs, lhs = rhs, rhs = tem;
3783 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3784 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3786 rtx tem_lhs, tem_rhs;
3788 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3789 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3790 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3792 if (tem && !CONSTANT_P (tem))
3793 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3796 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3798 /* Reject "simplifications" that just wrap the two
3799 arguments in a CONST. Failure to do so can result
3800 in infinite recursion with simplify_binary_operation
3801 when it calls us to simplify CONST operations. */
3803 && ! (GET_CODE (tem) == CONST
3804 && GET_CODE (XEXP (tem, 0)) == ncode
3805 && XEXP (XEXP (tem, 0), 0) == lhs
3806 && XEXP (XEXP (tem, 0), 1) == rhs))
3809 if (GET_CODE (tem) == NEG)
3810 tem = XEXP (tem, 0), lneg = !lneg;
3811 if (CONST_INT_P (tem) && lneg)
3812 tem = neg_const_int (mode, tem), lneg = 0;
3816 ops[j].op = NULL_RTX;
3823 /* If nothing changed, fail. */
3827 /* Pack all the operands to the lower-numbered entries. */
3828 for (i = 0, j = 0; j < n_ops; j++)
3838 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3840 && CONST_INT_P (ops[1].op)
3841 && CONSTANT_P (ops[0].op)
3843 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3845 /* We suppressed creation of trivial CONST expressions in the
3846 combination loop to avoid recursion. Create one manually now.
3847 The combination loop should have ensured that there is exactly
3848 one CONST_INT, and the sort will have ensured that it is last
3849 in the array and that any other constant will be next-to-last. */
3852 && CONST_INT_P (ops[n_ops - 1].op)
3853 && CONSTANT_P (ops[n_ops - 2].op))
3855 rtx value = ops[n_ops - 1].op;
3856 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3857 value = neg_const_int (mode, value);
3858 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3862 /* Put a non-negated operand first, if possible. */
3864 for (i = 0; i < n_ops && ops[i].neg; i++)
3867 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3876 /* Now make the result by performing the requested operations. */
3878 for (i = 1; i < n_ops; i++)
3879 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3880 mode, result, ops[i].op);
3885 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3887 plus_minus_operand_p (const_rtx x)
3889 return GET_CODE (x) == PLUS
3890 || GET_CODE (x) == MINUS
3891 || (GET_CODE (x) == CONST
3892 && GET_CODE (XEXP (x, 0)) == PLUS
3893 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3894 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3897 /* Like simplify_binary_operation except used for relational operators.
3898 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3899 not also be VOIDmode.
3901 CMP_MODE specifies in which mode the comparison is done in, so it is
3902 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3903 the operands or, if both are VOIDmode, the operands are compared in
3904 "infinite precision". */
3906 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3907 enum machine_mode cmp_mode, rtx op0, rtx op1)
3909 rtx tem, trueop0, trueop1;
3911 if (cmp_mode == VOIDmode)
3912 cmp_mode = GET_MODE (op0);
3913 if (cmp_mode == VOIDmode)
3914 cmp_mode = GET_MODE (op1);
3916 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3919 if (SCALAR_FLOAT_MODE_P (mode))
3921 if (tem == const0_rtx)
3922 return CONST0_RTX (mode);
3923 #ifdef FLOAT_STORE_FLAG_VALUE
3925 REAL_VALUE_TYPE val;
3926 val = FLOAT_STORE_FLAG_VALUE (mode);
3927 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3933 if (VECTOR_MODE_P (mode))
3935 if (tem == const0_rtx)
3936 return CONST0_RTX (mode);
3937 #ifdef VECTOR_STORE_FLAG_VALUE
3942 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3943 if (val == NULL_RTX)
3945 if (val == const1_rtx)
3946 return CONST1_RTX (mode);
3948 units = GET_MODE_NUNITS (mode);
3949 v = rtvec_alloc (units);
3950 for (i = 0; i < units; i++)
3951 RTVEC_ELT (v, i) = val;
3952 return gen_rtx_raw_CONST_VECTOR (mode, v);
3962 /* For the following tests, ensure const0_rtx is op1. */
3963 if (swap_commutative_operands_p (op0, op1)
3964 || (op0 == const0_rtx && op1 != const0_rtx))
3965 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3967 /* If op0 is a compare, extract the comparison arguments from it. */
3968 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3969 return simplify_gen_relational (code, mode, VOIDmode,
3970 XEXP (op0, 0), XEXP (op0, 1));
3972 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3976 trueop0 = avoid_constant_pool_reference (op0);
3977 trueop1 = avoid_constant_pool_reference (op1);
3978 return simplify_relational_operation_1 (code, mode, cmp_mode,
3982 /* This part of simplify_relational_operation is only used when CMP_MODE
3983 is not in class MODE_CC (i.e. it is a real comparison).
3985 MODE is the mode of the result, while CMP_MODE specifies in which
3986 mode the comparison is done in, so it is the mode of the operands. */
3989 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3990 enum machine_mode cmp_mode, rtx op0, rtx op1)
3992 enum rtx_code op0code = GET_CODE (op0);
3994 if (op1 == const0_rtx && COMPARISON_P (op0))
3996 /* If op0 is a comparison, extract the comparison arguments
4000 if (GET_MODE (op0) == mode)
4001 return simplify_rtx (op0);
4003 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4004 XEXP (op0, 0), XEXP (op0, 1));
4006 else if (code == EQ)
4008 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4009 if (new_code != UNKNOWN)
4010 return simplify_gen_relational (new_code, mode, VOIDmode,
4011 XEXP (op0, 0), XEXP (op0, 1));
4015 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4016 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4017 if ((code == LTU || code == GEU)
4018 && GET_CODE (op0) == PLUS
4019 && CONST_INT_P (XEXP (op0, 1))
4020 && (rtx_equal_p (op1, XEXP (op0, 0))
4021 || rtx_equal_p (op1, XEXP (op0, 1))))
4024 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4025 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4026 cmp_mode, XEXP (op0, 0), new_cmp);
4029 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4030 if ((code == LTU || code == GEU)
4031 && GET_CODE (op0) == PLUS
4032 && rtx_equal_p (op1, XEXP (op0, 1))
4033 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4034 && !rtx_equal_p (op1, XEXP (op0, 0)))
4035 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
4037 if (op1 == const0_rtx)
4039 /* Canonicalize (GTU x 0) as (NE x 0). */
4041 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4042 /* Canonicalize (LEU x 0) as (EQ x 0). */
4044 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4046 else if (op1 == const1_rtx)
4051 /* Canonicalize (GE x 1) as (GT x 0). */
4052 return simplify_gen_relational (GT, mode, cmp_mode,
4055 /* Canonicalize (GEU x 1) as (NE x 0). */
4056 return simplify_gen_relational (NE, mode, cmp_mode,
4059 /* Canonicalize (LT x 1) as (LE x 0). */
4060 return simplify_gen_relational (LE, mode, cmp_mode,
4063 /* Canonicalize (LTU x 1) as (EQ x 0). */
4064 return simplify_gen_relational (EQ, mode, cmp_mode,
4070 else if (op1 == constm1_rtx)
4072 /* Canonicalize (LE x -1) as (LT x 0). */
4074 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4075 /* Canonicalize (GT x -1) as (GE x 0). */
4077 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4080 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4081 if ((code == EQ || code == NE)
4082 && (op0code == PLUS || op0code == MINUS)
4084 && CONSTANT_P (XEXP (op0, 1))
4085 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4087 rtx x = XEXP (op0, 0);
4088 rtx c = XEXP (op0, 1);
4090 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4092 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4095 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4096 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4098 && op1 == const0_rtx
4099 && GET_MODE_CLASS (mode) == MODE_INT
4100 && cmp_mode != VOIDmode
4101 /* ??? Work-around BImode bugs in the ia64 backend. */
4103 && cmp_mode != BImode
4104 && nonzero_bits (op0, cmp_mode) == 1
4105 && STORE_FLAG_VALUE == 1)
4106 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4107 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4108 : lowpart_subreg (mode, op0, cmp_mode);
4110 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4111 if ((code == EQ || code == NE)
4112 && op1 == const0_rtx
4114 return simplify_gen_relational (code, mode, cmp_mode,
4115 XEXP (op0, 0), XEXP (op0, 1));
4117 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4118 if ((code == EQ || code == NE)
4120 && rtx_equal_p (XEXP (op0, 0), op1)
4121 && !side_effects_p (XEXP (op0, 0)))
4122 return simplify_gen_relational (code, mode, cmp_mode,
4123 XEXP (op0, 1), const0_rtx);
4125 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4126 if ((code == EQ || code == NE)
4128 && rtx_equal_p (XEXP (op0, 1), op1)
4129 && !side_effects_p (XEXP (op0, 1)))
4130 return simplify_gen_relational (code, mode, cmp_mode,
4131 XEXP (op0, 0), const0_rtx);
4133 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4134 if ((code == EQ || code == NE)
4136 && (CONST_INT_P (op1)
4137 || GET_CODE (op1) == CONST_DOUBLE)
4138 && (CONST_INT_P (XEXP (op0, 1))
4139 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4140 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4141 simplify_gen_binary (XOR, cmp_mode,
4142 XEXP (op0, 1), op1));
4144 if (op0code == POPCOUNT && op1 == const0_rtx)
4150 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4151 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4152 XEXP (op0, 0), const0_rtx);
4157 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4158 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4159 XEXP (op0, 0), const0_rtx);
4178 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4179 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4180 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4181 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4182 For floating-point comparisons, assume that the operands were ordered. */
4185 comparison_result (enum rtx_code code, int known_results)
4191 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4194 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4198 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4201 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4205 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4208 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4211 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4213 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4216 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4218 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4221 return const_true_rtx;
4229 /* Check if the given comparison (done in the given MODE) is actually a
4230 tautology or a contradiction.
4231 If no simplification is possible, this function returns zero.
4232 Otherwise, it returns either const_true_rtx or const0_rtx. */
4235 simplify_const_relational_operation (enum rtx_code code,
4236 enum machine_mode mode,
4243 gcc_assert (mode != VOIDmode
4244 || (GET_MODE (op0) == VOIDmode
4245 && GET_MODE (op1) == VOIDmode));
4247 /* If op0 is a compare, extract the comparison arguments from it. */
4248 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4250 op1 = XEXP (op0, 1);
4251 op0 = XEXP (op0, 0);
4253 if (GET_MODE (op0) != VOIDmode)
4254 mode = GET_MODE (op0);
4255 else if (GET_MODE (op1) != VOIDmode)
4256 mode = GET_MODE (op1);
4261 /* We can't simplify MODE_CC values since we don't know what the
4262 actual comparison is. */
4263 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4266 /* Make sure the constant is second. */
4267 if (swap_commutative_operands_p (op0, op1))
4269 tem = op0, op0 = op1, op1 = tem;
4270 code = swap_condition (code);
4273 trueop0 = avoid_constant_pool_reference (op0);
4274 trueop1 = avoid_constant_pool_reference (op1);
4276 /* For integer comparisons of A and B maybe we can simplify A - B and can
4277 then simplify a comparison of that with zero. If A and B are both either
4278 a register or a CONST_INT, this can't help; testing for these cases will
4279 prevent infinite recursion here and speed things up.
4281 We can only do this for EQ and NE comparisons as otherwise we may
4282 lose or introduce overflow which we cannot disregard as undefined as
4283 we do not know the signedness of the operation on either the left or
4284 the right hand side of the comparison. */
4286 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4287 && (code == EQ || code == NE)
4288 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4289 && (REG_P (op1) || CONST_INT_P (trueop1)))
4290 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4291 /* We cannot do this if tem is a nonzero address. */
4292 && ! nonzero_address_p (tem))
4293 return simplify_const_relational_operation (signed_condition (code),
4294 mode, tem, const0_rtx);
4296 if (! HONOR_NANS (mode) && code == ORDERED)
4297 return const_true_rtx;
4299 if (! HONOR_NANS (mode) && code == UNORDERED)
4302 /* For modes without NaNs, if the two operands are equal, we know the
4303 result except if they have side-effects. Even with NaNs we know
4304 the result of unordered comparisons and, if signaling NaNs are
4305 irrelevant, also the result of LT/GT/LTGT. */
4306 if ((! HONOR_NANS (GET_MODE (trueop0))
4307 || code == UNEQ || code == UNLE || code == UNGE
4308 || ((code == LT || code == GT || code == LTGT)
4309 && ! HONOR_SNANS (GET_MODE (trueop0))))
4310 && rtx_equal_p (trueop0, trueop1)
4311 && ! side_effects_p (trueop0))
4312 return comparison_result (code, CMP_EQ);
4314 /* If the operands are floating-point constants, see if we can fold
4316 if (GET_CODE (trueop0) == CONST_DOUBLE
4317 && GET_CODE (trueop1) == CONST_DOUBLE
4318 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4320 REAL_VALUE_TYPE d0, d1;
4322 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4323 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4325 /* Comparisons are unordered iff at least one of the values is NaN. */
4326 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4336 return const_true_rtx;
4349 return comparison_result (code,
4350 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4351 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4354 /* Otherwise, see if the operands are both integers. */
4355 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4356 && (GET_CODE (trueop0) == CONST_DOUBLE
4357 || CONST_INT_P (trueop0))
4358 && (GET_CODE (trueop1) == CONST_DOUBLE
4359 || CONST_INT_P (trueop1)))
4361 int width = GET_MODE_BITSIZE (mode);
4362 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4363 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4365 /* Get the two words comprising each integer constant. */
4366 if (GET_CODE (trueop0) == CONST_DOUBLE)
4368 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4369 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4373 l0u = l0s = INTVAL (trueop0);
4374 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4377 if (GET_CODE (trueop1) == CONST_DOUBLE)
4379 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4380 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4384 l1u = l1s = INTVAL (trueop1);
4385 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4388 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4389 we have to sign or zero-extend the values. */
4390 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4392 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4393 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4395 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4396 l0s |= ((HOST_WIDE_INT) (-1) << width);
4398 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4399 l1s |= ((HOST_WIDE_INT) (-1) << width);
4401 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4402 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4404 if (h0u == h1u && l0u == l1u)
4405 return comparison_result (code, CMP_EQ);
4409 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4410 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4411 return comparison_result (code, cr);
4415 /* Optimize comparisons with upper and lower bounds. */
4416 if (SCALAR_INT_MODE_P (mode)
4417 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4418 && CONST_INT_P (trueop1))
4421 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4422 HOST_WIDE_INT val = INTVAL (trueop1);
4423 HOST_WIDE_INT mmin, mmax;
4433 /* Get a reduced range if the sign bit is zero. */
4434 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4441 rtx mmin_rtx, mmax_rtx;
4442 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4444 mmin = INTVAL (mmin_rtx);
4445 mmax = INTVAL (mmax_rtx);
4448 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4450 mmin >>= (sign_copies - 1);
4451 mmax >>= (sign_copies - 1);
4457 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4459 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4460 return const_true_rtx;
4461 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4466 return const_true_rtx;
4471 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4473 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4474 return const_true_rtx;
4475 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4480 return const_true_rtx;
4486 /* x == y is always false for y out of range. */
4487 if (val < mmin || val > mmax)
4491 /* x > y is always false for y >= mmax, always true for y < mmin. */
4493 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4495 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4496 return const_true_rtx;
4502 return const_true_rtx;
4505 /* x < y is always false for y <= mmin, always true for y > mmax. */
4507 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4509 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4510 return const_true_rtx;
4516 return const_true_rtx;
4520 /* x != y is always true for y out of range. */
4521 if (val < mmin || val > mmax)
4522 return const_true_rtx;
4530 /* Optimize integer comparisons with zero. */
4531 if (trueop1 == const0_rtx)
4533 /* Some addresses are known to be nonzero. We don't know
4534 their sign, but equality comparisons are known. */
4535 if (nonzero_address_p (trueop0))
4537 if (code == EQ || code == LEU)
4539 if (code == NE || code == GTU)
4540 return const_true_rtx;
4543 /* See if the first operand is an IOR with a constant. If so, we
4544 may be able to determine the result of this comparison. */
4545 if (GET_CODE (op0) == IOR)
4547 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4548 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4550 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4551 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4552 && (INTVAL (inner_const)
4553 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4562 return const_true_rtx;
4566 return const_true_rtx;
4580 /* Optimize comparison of ABS with zero. */
4581 if (trueop1 == CONST0_RTX (mode)
4582 && (GET_CODE (trueop0) == ABS
4583 || (GET_CODE (trueop0) == FLOAT_EXTEND
4584 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4589 /* Optimize abs(x) < 0.0. */
4590 if (!HONOR_SNANS (mode)
4591 && (!INTEGRAL_MODE_P (mode)
4592 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4594 if (INTEGRAL_MODE_P (mode)
4595 && (issue_strict_overflow_warning
4596 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4597 warning (OPT_Wstrict_overflow,
4598 ("assuming signed overflow does not occur when "
4599 "assuming abs (x) < 0 is false"));
4605 /* Optimize abs(x) >= 0.0. */
4606 if (!HONOR_NANS (mode)
4607 && (!INTEGRAL_MODE_P (mode)
4608 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4610 if (INTEGRAL_MODE_P (mode)
4611 && (issue_strict_overflow_warning
4612 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4613 warning (OPT_Wstrict_overflow,
4614 ("assuming signed overflow does not occur when "
4615 "assuming abs (x) >= 0 is true"));
4616 return const_true_rtx;
4621 /* Optimize ! (abs(x) < 0.0). */
4622 return const_true_rtx;
4632 /* Simplify CODE, an operation with result mode MODE and three operands,
4633 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4634 a constant. Return 0 if no simplifications is possible. */
4637 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4638 enum machine_mode op0_mode, rtx op0, rtx op1,
4641 unsigned int width = GET_MODE_BITSIZE (mode);
4643 /* VOIDmode means "infinite" precision. */
4645 width = HOST_BITS_PER_WIDE_INT;
4651 if (CONST_INT_P (op0)
4652 && CONST_INT_P (op1)
4653 && CONST_INT_P (op2)
4654 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4655 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4657 /* Extracting a bit-field from a constant */
4658 HOST_WIDE_INT val = INTVAL (op0);
4660 if (BITS_BIG_ENDIAN)
4661 val >>= (GET_MODE_BITSIZE (op0_mode)
4662 - INTVAL (op2) - INTVAL (op1));
4664 val >>= INTVAL (op2);
4666 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4668 /* First zero-extend. */
4669 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4670 /* If desired, propagate sign bit. */
4671 if (code == SIGN_EXTRACT
4672 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4673 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4676 /* Clear the bits that don't belong in our mode,
4677 unless they and our sign bit are all one.
4678 So we get either a reasonable negative value or a reasonable
4679 unsigned value for this mode. */
4680 if (width < HOST_BITS_PER_WIDE_INT
4681 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4682 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4683 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4685 return gen_int_mode (val, mode);
4690 if (CONST_INT_P (op0))
4691 return op0 != const0_rtx ? op1 : op2;
4693 /* Convert c ? a : a into "a". */
4694 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4697 /* Convert a != b ? a : b into "a". */
4698 if (GET_CODE (op0) == NE
4699 && ! side_effects_p (op0)
4700 && ! HONOR_NANS (mode)
4701 && ! HONOR_SIGNED_ZEROS (mode)
4702 && ((rtx_equal_p (XEXP (op0, 0), op1)
4703 && rtx_equal_p (XEXP (op0, 1), op2))
4704 || (rtx_equal_p (XEXP (op0, 0), op2)
4705 && rtx_equal_p (XEXP (op0, 1), op1))))
4708 /* Convert a == b ? a : b into "b". */
4709 if (GET_CODE (op0) == EQ
4710 && ! side_effects_p (op0)
4711 && ! HONOR_NANS (mode)
4712 && ! HONOR_SIGNED_ZEROS (mode)
4713 && ((rtx_equal_p (XEXP (op0, 0), op1)
4714 && rtx_equal_p (XEXP (op0, 1), op2))
4715 || (rtx_equal_p (XEXP (op0, 0), op2)
4716 && rtx_equal_p (XEXP (op0, 1), op1))))
4719 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4721 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4722 ? GET_MODE (XEXP (op0, 1))
4723 : GET_MODE (XEXP (op0, 0)));
4726 /* Look for happy constants in op1 and op2. */
4727 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4729 HOST_WIDE_INT t = INTVAL (op1);
4730 HOST_WIDE_INT f = INTVAL (op2);
4732 if (t == STORE_FLAG_VALUE && f == 0)
4733 code = GET_CODE (op0);
4734 else if (t == 0 && f == STORE_FLAG_VALUE)
4737 tmp = reversed_comparison_code (op0, NULL_RTX);
4745 return simplify_gen_relational (code, mode, cmp_mode,
4746 XEXP (op0, 0), XEXP (op0, 1));
4749 if (cmp_mode == VOIDmode)
4750 cmp_mode = op0_mode;
4751 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4752 cmp_mode, XEXP (op0, 0),
4755 /* See if any simplifications were possible. */
4758 if (CONST_INT_P (temp))
4759 return temp == const0_rtx ? op2 : op1;
4761 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4767 gcc_assert (GET_MODE (op0) == mode);
4768 gcc_assert (GET_MODE (op1) == mode);
4769 gcc_assert (VECTOR_MODE_P (mode));
4770 op2 = avoid_constant_pool_reference (op2);
4771 if (CONST_INT_P (op2))
4773 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4774 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4775 int mask = (1 << n_elts) - 1;
4777 if (!(INTVAL (op2) & mask))
4779 if ((INTVAL (op2) & mask) == mask)
4782 op0 = avoid_constant_pool_reference (op0);
4783 op1 = avoid_constant_pool_reference (op1);
4784 if (GET_CODE (op0) == CONST_VECTOR
4785 && GET_CODE (op1) == CONST_VECTOR)
4787 rtvec v = rtvec_alloc (n_elts);
4790 for (i = 0; i < n_elts; i++)
4791 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4792 ? CONST_VECTOR_ELT (op0, i)
4793 : CONST_VECTOR_ELT (op1, i));
4794 return gen_rtx_CONST_VECTOR (mode, v);
4806 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4808 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4810 Works by unpacking OP into a collection of 8-bit values
4811 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4812 and then repacking them again for OUTERMODE. */
4815 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4816 enum machine_mode innermode, unsigned int byte)
4818 /* We support up to 512-bit values (for V8DFmode). */
4822 value_mask = (1 << value_bit) - 1
4824 unsigned char value[max_bitsize / value_bit];
4833 rtvec result_v = NULL;
4834 enum mode_class outer_class;
4835 enum machine_mode outer_submode;
4837 /* Some ports misuse CCmode. */
4838 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4841 /* We have no way to represent a complex constant at the rtl level. */
4842 if (COMPLEX_MODE_P (outermode))
4845 /* Unpack the value. */
4847 if (GET_CODE (op) == CONST_VECTOR)
4849 num_elem = CONST_VECTOR_NUNITS (op);
4850 elems = &CONST_VECTOR_ELT (op, 0);
4851 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4857 elem_bitsize = max_bitsize;
4859 /* If this asserts, it is too complicated; reducing value_bit may help. */
4860 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4861 /* I don't know how to handle endianness of sub-units. */
4862 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4864 for (elem = 0; elem < num_elem; elem++)
4867 rtx el = elems[elem];
4869 /* Vectors are kept in target memory order. (This is probably
4872 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4873 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4875 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4876 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4877 unsigned bytele = (subword_byte % UNITS_PER_WORD
4878 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4879 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4882 switch (GET_CODE (el))
4886 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4888 *vp++ = INTVAL (el) >> i;
4889 /* CONST_INTs are always logically sign-extended. */
4890 for (; i < elem_bitsize; i += value_bit)
4891 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4895 if (GET_MODE (el) == VOIDmode)
4897 /* If this triggers, someone should have generated a
4898 CONST_INT instead. */
4899 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4901 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4902 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4903 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4906 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4909 /* It shouldn't matter what's done here, so fill it with
4911 for (; i < elem_bitsize; i += value_bit)
4916 long tmp[max_bitsize / 32];
4917 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4919 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4920 gcc_assert (bitsize <= elem_bitsize);
4921 gcc_assert (bitsize % value_bit == 0);
4923 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4926 /* real_to_target produces its result in words affected by
4927 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4928 and use WORDS_BIG_ENDIAN instead; see the documentation
4929 of SUBREG in rtl.texi. */
4930 for (i = 0; i < bitsize; i += value_bit)
4933 if (WORDS_BIG_ENDIAN)
4934 ibase = bitsize - 1 - i;
4937 *vp++ = tmp[ibase / 32] >> i % 32;
4940 /* It shouldn't matter what's done here, so fill it with
4942 for (; i < elem_bitsize; i += value_bit)
4948 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4950 for (i = 0; i < elem_bitsize; i += value_bit)
4951 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4955 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4956 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4957 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4959 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4960 >> (i - HOST_BITS_PER_WIDE_INT);
4961 for (; i < elem_bitsize; i += value_bit)
4971 /* Now, pick the right byte to start with. */
4972 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4973 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4974 will already have offset 0. */
4975 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4977 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4979 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4980 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4981 byte = (subword_byte % UNITS_PER_WORD
4982 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4985 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4986 so if it's become negative it will instead be very large.) */
4987 gcc_assert (byte < GET_MODE_SIZE (innermode));
4989 /* Convert from bytes to chunks of size value_bit. */
4990 value_start = byte * (BITS_PER_UNIT / value_bit);
4992 /* Re-pack the value. */
4994 if (VECTOR_MODE_P (outermode))
4996 num_elem = GET_MODE_NUNITS (outermode);
4997 result_v = rtvec_alloc (num_elem);
4998 elems = &RTVEC_ELT (result_v, 0);
4999 outer_submode = GET_MODE_INNER (outermode);
5005 outer_submode = outermode;
5008 outer_class = GET_MODE_CLASS (outer_submode);
5009 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5011 gcc_assert (elem_bitsize % value_bit == 0);
5012 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5014 for (elem = 0; elem < num_elem; elem++)
5018 /* Vectors are stored in target memory order. (This is probably
5021 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5022 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5024 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5025 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5026 unsigned bytele = (subword_byte % UNITS_PER_WORD
5027 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5028 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5031 switch (outer_class)
5034 case MODE_PARTIAL_INT:
5036 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5039 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5041 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5042 for (; i < elem_bitsize; i += value_bit)
5043 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5044 << (i - HOST_BITS_PER_WIDE_INT));
5046 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5048 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5049 elems[elem] = gen_int_mode (lo, outer_submode);
5050 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5051 elems[elem] = immed_double_const (lo, hi, outer_submode);
5058 case MODE_DECIMAL_FLOAT:
5061 long tmp[max_bitsize / 32];
5063 /* real_from_target wants its input in words affected by
5064 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5065 and use WORDS_BIG_ENDIAN instead; see the documentation
5066 of SUBREG in rtl.texi. */
5067 for (i = 0; i < max_bitsize / 32; i++)
5069 for (i = 0; i < elem_bitsize; i += value_bit)
5072 if (WORDS_BIG_ENDIAN)
5073 ibase = elem_bitsize - 1 - i;
5076 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5079 real_from_target (&r, tmp, outer_submode);
5080 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5092 f.mode = outer_submode;
5095 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5097 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5098 for (; i < elem_bitsize; i += value_bit)
5099 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5100 << (i - HOST_BITS_PER_WIDE_INT));
5102 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5110 if (VECTOR_MODE_P (outermode))
5111 return gen_rtx_CONST_VECTOR (outermode, result_v);
5116 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5117 Return 0 if no simplifications are possible. */
5119 simplify_subreg (enum machine_mode outermode, rtx op,
5120 enum machine_mode innermode, unsigned int byte)
5122 /* Little bit of sanity checking. */
5123 gcc_assert (innermode != VOIDmode);
5124 gcc_assert (outermode != VOIDmode);
5125 gcc_assert (innermode != BLKmode);
5126 gcc_assert (outermode != BLKmode);
5128 gcc_assert (GET_MODE (op) == innermode
5129 || GET_MODE (op) == VOIDmode);
5131 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5132 gcc_assert (byte < GET_MODE_SIZE (innermode));
5134 if (outermode == innermode && !byte)
5137 if (CONST_INT_P (op)
5138 || GET_CODE (op) == CONST_DOUBLE
5139 || GET_CODE (op) == CONST_FIXED
5140 || GET_CODE (op) == CONST_VECTOR)
5141 return simplify_immed_subreg (outermode, op, innermode, byte);
5143 /* Changing mode twice with SUBREG => just change it once,
5144 or not at all if changing back op starting mode. */
5145 if (GET_CODE (op) == SUBREG)
5147 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5148 int final_offset = byte + SUBREG_BYTE (op);
5151 if (outermode == innermostmode
5152 && byte == 0 && SUBREG_BYTE (op) == 0)
5153 return SUBREG_REG (op);
5155 /* The SUBREG_BYTE represents offset, as if the value were stored
5156 in memory. Irritating exception is paradoxical subreg, where
5157 we define SUBREG_BYTE to be 0. On big endian machines, this
5158 value should be negative. For a moment, undo this exception. */
5159 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5161 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5162 if (WORDS_BIG_ENDIAN)
5163 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5164 if (BYTES_BIG_ENDIAN)
5165 final_offset += difference % UNITS_PER_WORD;
5167 if (SUBREG_BYTE (op) == 0
5168 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5170 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5171 if (WORDS_BIG_ENDIAN)
5172 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5173 if (BYTES_BIG_ENDIAN)
5174 final_offset += difference % UNITS_PER_WORD;
5177 /* See whether resulting subreg will be paradoxical. */
5178 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5180 /* In nonparadoxical subregs we can't handle negative offsets. */
5181 if (final_offset < 0)
5183 /* Bail out in case resulting subreg would be incorrect. */
5184 if (final_offset % GET_MODE_SIZE (outermode)
5185 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5191 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5193 /* In paradoxical subreg, see if we are still looking on lower part.
5194 If so, our SUBREG_BYTE will be 0. */
5195 if (WORDS_BIG_ENDIAN)
5196 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5197 if (BYTES_BIG_ENDIAN)
5198 offset += difference % UNITS_PER_WORD;
5199 if (offset == final_offset)
5205 /* Recurse for further possible simplifications. */
5206 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5210 if (validate_subreg (outermode, innermostmode,
5211 SUBREG_REG (op), final_offset))
5213 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5214 if (SUBREG_PROMOTED_VAR_P (op)
5215 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5216 && GET_MODE_CLASS (outermode) == MODE_INT
5217 && IN_RANGE (GET_MODE_SIZE (outermode),
5218 GET_MODE_SIZE (innermode),
5219 GET_MODE_SIZE (innermostmode))
5220 && subreg_lowpart_p (newx))
5222 SUBREG_PROMOTED_VAR_P (newx) = 1;
5223 SUBREG_PROMOTED_UNSIGNED_SET
5224 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5231 /* Merge implicit and explicit truncations. */
5233 if (GET_CODE (op) == TRUNCATE
5234 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5235 && subreg_lowpart_offset (outermode, innermode) == byte)
5236 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5237 GET_MODE (XEXP (op, 0)));
5239 /* SUBREG of a hard register => just change the register number
5240 and/or mode. If the hard register is not valid in that mode,
5241 suppress this simplification. If the hard register is the stack,
5242 frame, or argument pointer, leave this as a SUBREG. */
5244 if (REG_P (op) && HARD_REGISTER_P (op))
5246 unsigned int regno, final_regno;
5249 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5250 if (HARD_REGISTER_NUM_P (final_regno))
5253 int final_offset = byte;
5255 /* Adjust offset for paradoxical subregs. */
5257 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5259 int difference = (GET_MODE_SIZE (innermode)
5260 - GET_MODE_SIZE (outermode));
5261 if (WORDS_BIG_ENDIAN)
5262 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5263 if (BYTES_BIG_ENDIAN)
5264 final_offset += difference % UNITS_PER_WORD;
5267 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5269 /* Propagate original regno. We don't have any way to specify
5270 the offset inside original regno, so do so only for lowpart.
5271 The information is used only by alias analysis that can not
5272 grog partial register anyway. */
5274 if (subreg_lowpart_offset (outermode, innermode) == byte)
5275 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5280 /* If we have a SUBREG of a register that we are replacing and we are
5281 replacing it with a MEM, make a new MEM and try replacing the
5282 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5283 or if we would be widening it. */
5286 && ! mode_dependent_address_p (XEXP (op, 0))
5287 /* Allow splitting of volatile memory references in case we don't
5288 have instruction to move the whole thing. */
5289 && (! MEM_VOLATILE_P (op)
5290 || ! have_insn_for (SET, innermode))
5291 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5292 return adjust_address_nv (op, outermode, byte);
5294 /* Handle complex values represented as CONCAT
5295 of real and imaginary part. */
5296 if (GET_CODE (op) == CONCAT)
5298 unsigned int part_size, final_offset;
5301 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5302 if (byte < part_size)
5304 part = XEXP (op, 0);
5305 final_offset = byte;
5309 part = XEXP (op, 1);
5310 final_offset = byte - part_size;
5313 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5316 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5319 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5320 return gen_rtx_SUBREG (outermode, part, final_offset);
5324 /* Optimize SUBREG truncations of zero and sign extended values. */
5325 if ((GET_CODE (op) == ZERO_EXTEND
5326 || GET_CODE (op) == SIGN_EXTEND)
5327 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5329 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5331 /* If we're requesting the lowpart of a zero or sign extension,
5332 there are three possibilities. If the outermode is the same
5333 as the origmode, we can omit both the extension and the subreg.
5334 If the outermode is not larger than the origmode, we can apply
5335 the truncation without the extension. Finally, if the outermode
5336 is larger than the origmode, but both are integer modes, we
5337 can just extend to the appropriate mode. */
5340 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5341 if (outermode == origmode)
5342 return XEXP (op, 0);
5343 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5344 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5345 subreg_lowpart_offset (outermode,
5347 if (SCALAR_INT_MODE_P (outermode))
5348 return simplify_gen_unary (GET_CODE (op), outermode,
5349 XEXP (op, 0), origmode);
5352 /* A SUBREG resulting from a zero extension may fold to zero if
5353 it extracts higher bits that the ZERO_EXTEND's source bits. */
5354 if (GET_CODE (op) == ZERO_EXTEND
5355 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5356 return CONST0_RTX (outermode);
5359 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5360 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5361 the outer subreg is effectively a truncation to the original mode. */
5362 if ((GET_CODE (op) == LSHIFTRT
5363 || GET_CODE (op) == ASHIFTRT)
5364 && SCALAR_INT_MODE_P (outermode)
5365 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5366 to avoid the possibility that an outer LSHIFTRT shifts by more
5367 than the sign extension's sign_bit_copies and introduces zeros
5368 into the high bits of the result. */
5369 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5370 && CONST_INT_P (XEXP (op, 1))
5371 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5372 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5373 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5374 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5375 return simplify_gen_binary (ASHIFTRT, outermode,
5376 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5378 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5379 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5380 the outer subreg is effectively a truncation to the original mode. */
5381 if ((GET_CODE (op) == LSHIFTRT
5382 || GET_CODE (op) == ASHIFTRT)
5383 && SCALAR_INT_MODE_P (outermode)
5384 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5385 && CONST_INT_P (XEXP (op, 1))
5386 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5387 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5388 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5389 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5390 return simplify_gen_binary (LSHIFTRT, outermode,
5391 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5393 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5394 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5395 the outer subreg is effectively a truncation to the original mode. */
5396 if (GET_CODE (op) == ASHIFT
5397 && SCALAR_INT_MODE_P (outermode)
5398 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5399 && CONST_INT_P (XEXP (op, 1))
5400 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5401 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5402 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5403 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5404 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5405 return simplify_gen_binary (ASHIFT, outermode,
5406 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5408 /* Recognize a word extraction from a multi-word subreg. */
5409 if ((GET_CODE (op) == LSHIFTRT
5410 || GET_CODE (op) == ASHIFTRT)
5411 && SCALAR_INT_MODE_P (outermode)
5412 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5413 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5414 && CONST_INT_P (XEXP (op, 1))
5415 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5416 && INTVAL (XEXP (op, 1)) >= 0
5417 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5418 && byte == subreg_lowpart_offset (outermode, innermode))
5420 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5421 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5423 ? byte - shifted_bytes
5424 : byte + shifted_bytes));
5430 /* Make a SUBREG operation or equivalent if it folds. */
5433 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5434 enum machine_mode innermode, unsigned int byte)
5438 newx = simplify_subreg (outermode, op, innermode, byte);
5442 if (GET_CODE (op) == SUBREG
5443 || GET_CODE (op) == CONCAT
5444 || GET_MODE (op) == VOIDmode)
5447 if (validate_subreg (outermode, innermode, op, byte))
5448 return gen_rtx_SUBREG (outermode, op, byte);
5453 /* Simplify X, an rtx expression.
5455 Return the simplified expression or NULL if no simplifications
5458 This is the preferred entry point into the simplification routines;
5459 however, we still allow passes to call the more specific routines.
5461 Right now GCC has three (yes, three) major bodies of RTL simplification
5462 code that need to be unified.
5464 1. fold_rtx in cse.c. This code uses various CSE specific
5465 information to aid in RTL simplification.
5467 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5468 it uses combine specific information to aid in RTL
5471 3. The routines in this file.
5474 Long term we want to only have one body of simplification code; to
5475 get to that state I recommend the following steps:
5477 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5478 which are not pass dependent state into these routines.
5480 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5481 use this routine whenever possible.
5483 3. Allow for pass dependent state to be provided to these
5484 routines and add simplifications based on the pass dependent
5485 state. Remove code from cse.c & combine.c that becomes
5488 It will take time, but ultimately the compiler will be easier to
5489 maintain and improve. It's totally silly that when we add a
5490 simplification that it needs to be added to 4 places (3 for RTL
5491 simplification and 1 for tree simplification. */
5494 simplify_rtx (const_rtx x)
5496 const enum rtx_code code = GET_CODE (x);
5497 const enum machine_mode mode = GET_MODE (x);
5499 switch (GET_RTX_CLASS (code))
5502 return simplify_unary_operation (code, mode,
5503 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5504 case RTX_COMM_ARITH:
5505 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5506 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5508 /* Fall through.... */
5511 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5514 case RTX_BITFIELD_OPS:
5515 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5516 XEXP (x, 0), XEXP (x, 1),
5520 case RTX_COMM_COMPARE:
5521 return simplify_relational_operation (code, mode,
5522 ((GET_MODE (XEXP (x, 0))
5524 ? GET_MODE (XEXP (x, 0))
5525 : GET_MODE (XEXP (x, 1))),
5531 return simplify_subreg (mode, SUBREG_REG (x),
5532 GET_MODE (SUBREG_REG (x)),
5539 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5540 if (GET_CODE (XEXP (x, 0)) == HIGH
5541 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))