1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 if (GET_MODE (x) == BLKmode)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
215 || GET_CODE (MEM_OFFSET (x)) == CONST_INT))
217 tree decl = MEM_EXPR (x);
218 enum machine_mode mode = GET_MODE (x);
219 HOST_WIDE_INT offset = 0;
221 switch (TREE_CODE (decl))
231 case ARRAY_RANGE_REF:
236 case VIEW_CONVERT_EXPR:
238 HOST_WIDE_INT bitsize, bitpos;
240 int unsignedp = 0, volatilep = 0;
242 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
243 &mode, &unsignedp, &volatilep, false);
244 if (bitsize != GET_MODE_BITSIZE (mode)
245 || (bitpos % BITS_PER_UNIT)
246 || (toffset && !host_integerp (toffset, 0)))
250 offset += bitpos / BITS_PER_UNIT;
252 offset += TREE_INT_CST_LOW (toffset);
259 && mode == GET_MODE (x)
260 && TREE_CODE (decl) == VAR_DECL
261 && (TREE_STATIC (decl)
262 || DECL_THREAD_LOCAL_P (decl))
263 && DECL_RTL_SET_P (decl)
264 && MEM_P (DECL_RTL (decl)))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* Replace all occurrences of OLD_RTX in X with FN (X', DATA), where X'
354 is an expression in X that is equal to OLD_RTX. Canonicalize and
357 If FN is null, assume FN (X', DATA) == copy_rtx (DATA). */
360 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
361 rtx (*fn) (rtx, void *), void *data)
363 enum rtx_code code = GET_CODE (x);
364 enum machine_mode mode = GET_MODE (x);
365 enum machine_mode op_mode;
367 rtx op0, op1, op2, newx, op;
371 /* If X is OLD_RTX, return FN (X, DATA), with a null FN. Otherwise,
372 if this is an expression, try to build a new expression, substituting
373 recursively. If we can't do anything, return our input. */
375 if (rtx_equal_p (x, old_rtx))
380 return copy_rtx ((rtx) data);
383 switch (GET_RTX_CLASS (code))
387 op_mode = GET_MODE (op0);
388 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
389 if (op0 == XEXP (x, 0))
391 return simplify_gen_unary (code, mode, op0, op_mode);
395 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
396 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
397 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
399 return simplify_gen_binary (code, mode, op0, op1);
402 case RTX_COMM_COMPARE:
405 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
406 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
407 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
408 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
410 return simplify_gen_relational (code, mode, op_mode, op0, op1);
413 case RTX_BITFIELD_OPS:
415 op_mode = GET_MODE (op0);
416 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
417 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
418 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
419 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
421 if (op_mode == VOIDmode)
422 op_mode = GET_MODE (op0);
423 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
428 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
429 if (op0 == SUBREG_REG (x))
431 op0 = simplify_gen_subreg (GET_MODE (x), op0,
432 GET_MODE (SUBREG_REG (x)),
434 return op0 ? op0 : x;
441 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
442 if (op0 == XEXP (x, 0))
444 return replace_equiv_address_nv (x, op0);
446 else if (code == LO_SUM)
448 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
449 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
451 /* (lo_sum (high x) x) -> x */
452 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
455 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
457 return gen_rtx_LO_SUM (mode, op0, op1);
466 fmt = GET_RTX_FORMAT (code);
467 for (i = 0; fmt[i]; i++)
472 newvec = XVEC (newx, i);
473 for (j = 0; j < GET_NUM_ELEM (vec); j++)
475 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
477 if (op != RTVEC_ELT (vec, j))
481 newvec = shallow_copy_rtvec (vec);
483 newx = shallow_copy_rtx (x);
484 XVEC (newx, i) = newvec;
486 RTVEC_ELT (newvec, j) = op;
492 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
493 if (op != XEXP (x, i))
496 newx = shallow_copy_rtx (x);
504 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
505 resulting RTX. Return a new RTX which is as simplified as possible. */
508 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
510 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
513 /* Try to simplify a unary operation CODE whose output mode is to be
514 MODE with input operand OP whose mode was originally OP_MODE.
515 Return zero if no simplification can be made. */
517 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
518 rtx op, enum machine_mode op_mode)
522 trueop = avoid_constant_pool_reference (op);
524 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
528 return simplify_unary_operation_1 (code, mode, op);
531 /* Perform some simplifications we can do even if the operands
534 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
536 enum rtx_code reversed;
542 /* (not (not X)) == X. */
543 if (GET_CODE (op) == NOT)
546 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
547 comparison is all ones. */
548 if (COMPARISON_P (op)
549 && (mode == BImode || STORE_FLAG_VALUE == -1)
550 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
551 return simplify_gen_relational (reversed, mode, VOIDmode,
552 XEXP (op, 0), XEXP (op, 1));
554 /* (not (plus X -1)) can become (neg X). */
555 if (GET_CODE (op) == PLUS
556 && XEXP (op, 1) == constm1_rtx)
557 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
559 /* Similarly, (not (neg X)) is (plus X -1). */
560 if (GET_CODE (op) == NEG)
561 return plus_constant (XEXP (op, 0), -1);
563 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
564 if (GET_CODE (op) == XOR
565 && CONST_INT_P (XEXP (op, 1))
566 && (temp = simplify_unary_operation (NOT, mode,
567 XEXP (op, 1), mode)) != 0)
568 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
570 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
571 if (GET_CODE (op) == PLUS
572 && CONST_INT_P (XEXP (op, 1))
573 && mode_signbit_p (mode, XEXP (op, 1))
574 && (temp = simplify_unary_operation (NOT, mode,
575 XEXP (op, 1), mode)) != 0)
576 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
579 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
580 operands other than 1, but that is not valid. We could do a
581 similar simplification for (not (lshiftrt C X)) where C is
582 just the sign bit, but this doesn't seem common enough to
584 if (GET_CODE (op) == ASHIFT
585 && XEXP (op, 0) == const1_rtx)
587 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
588 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
591 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
592 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
593 so we can perform the above simplification. */
595 if (STORE_FLAG_VALUE == -1
596 && GET_CODE (op) == ASHIFTRT
597 && GET_CODE (XEXP (op, 1))
598 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
599 return simplify_gen_relational (GE, mode, VOIDmode,
600 XEXP (op, 0), const0_rtx);
603 if (GET_CODE (op) == SUBREG
604 && subreg_lowpart_p (op)
605 && (GET_MODE_SIZE (GET_MODE (op))
606 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
607 && GET_CODE (SUBREG_REG (op)) == ASHIFT
608 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
610 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
613 x = gen_rtx_ROTATE (inner_mode,
614 simplify_gen_unary (NOT, inner_mode, const1_rtx,
616 XEXP (SUBREG_REG (op), 1));
617 return rtl_hooks.gen_lowpart_no_emit (mode, x);
620 /* Apply De Morgan's laws to reduce number of patterns for machines
621 with negating logical insns (and-not, nand, etc.). If result has
622 only one NOT, put it first, since that is how the patterns are
625 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
627 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
628 enum machine_mode op_mode;
630 op_mode = GET_MODE (in1);
631 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
633 op_mode = GET_MODE (in2);
634 if (op_mode == VOIDmode)
636 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
638 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
641 in2 = in1; in1 = tem;
644 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
650 /* (neg (neg X)) == X. */
651 if (GET_CODE (op) == NEG)
654 /* (neg (plus X 1)) can become (not X). */
655 if (GET_CODE (op) == PLUS
656 && XEXP (op, 1) == const1_rtx)
657 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
659 /* Similarly, (neg (not X)) is (plus X 1). */
660 if (GET_CODE (op) == NOT)
661 return plus_constant (XEXP (op, 0), 1);
663 /* (neg (minus X Y)) can become (minus Y X). This transformation
664 isn't safe for modes with signed zeros, since if X and Y are
665 both +0, (minus Y X) is the same as (minus X Y). If the
666 rounding mode is towards +infinity (or -infinity) then the two
667 expressions will be rounded differently. */
668 if (GET_CODE (op) == MINUS
669 && !HONOR_SIGNED_ZEROS (mode)
670 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
671 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
673 if (GET_CODE (op) == PLUS
674 && !HONOR_SIGNED_ZEROS (mode)
675 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
677 /* (neg (plus A C)) is simplified to (minus -C A). */
678 if (CONST_INT_P (XEXP (op, 1))
679 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
681 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
683 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
686 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
687 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
688 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
691 /* (neg (mult A B)) becomes (mult (neg A) B).
692 This works even for floating-point values. */
693 if (GET_CODE (op) == MULT
694 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
696 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
697 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
700 /* NEG commutes with ASHIFT since it is multiplication. Only do
701 this if we can then eliminate the NEG (e.g., if the operand
703 if (GET_CODE (op) == ASHIFT)
705 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
707 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
710 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
711 C is equal to the width of MODE minus 1. */
712 if (GET_CODE (op) == ASHIFTRT
713 && CONST_INT_P (XEXP (op, 1))
714 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
715 return simplify_gen_binary (LSHIFTRT, mode,
716 XEXP (op, 0), XEXP (op, 1));
718 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
719 C is equal to the width of MODE minus 1. */
720 if (GET_CODE (op) == LSHIFTRT
721 && CONST_INT_P (XEXP (op, 1))
722 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
723 return simplify_gen_binary (ASHIFTRT, mode,
724 XEXP (op, 0), XEXP (op, 1));
726 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
727 if (GET_CODE (op) == XOR
728 && XEXP (op, 1) == const1_rtx
729 && nonzero_bits (XEXP (op, 0), mode) == 1)
730 return plus_constant (XEXP (op, 0), -1);
732 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
733 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
734 if (GET_CODE (op) == LT
735 && XEXP (op, 1) == const0_rtx
736 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
738 enum machine_mode inner = GET_MODE (XEXP (op, 0));
739 int isize = GET_MODE_BITSIZE (inner);
740 if (STORE_FLAG_VALUE == 1)
742 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
743 GEN_INT (isize - 1));
746 if (GET_MODE_BITSIZE (mode) > isize)
747 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
748 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
750 else if (STORE_FLAG_VALUE == -1)
752 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
753 GEN_INT (isize - 1));
756 if (GET_MODE_BITSIZE (mode) > isize)
757 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
758 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
764 /* We can't handle truncation to a partial integer mode here
765 because we don't know the real bitsize of the partial
767 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
770 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
771 if ((GET_CODE (op) == SIGN_EXTEND
772 || GET_CODE (op) == ZERO_EXTEND)
773 && GET_MODE (XEXP (op, 0)) == mode)
776 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
777 (OP:SI foo:SI) if OP is NEG or ABS. */
778 if ((GET_CODE (op) == ABS
779 || GET_CODE (op) == NEG)
780 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
781 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
782 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
783 return simplify_gen_unary (GET_CODE (op), mode,
784 XEXP (XEXP (op, 0), 0), mode);
786 /* (truncate:A (subreg:B (truncate:C X) 0)) is
788 if (GET_CODE (op) == SUBREG
789 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
790 && subreg_lowpart_p (op))
791 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
792 GET_MODE (XEXP (SUBREG_REG (op), 0)));
794 /* If we know that the value is already truncated, we can
795 replace the TRUNCATE with a SUBREG. Note that this is also
796 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
797 modes we just have to apply a different definition for
798 truncation. But don't do this for an (LSHIFTRT (MULT ...))
799 since this will cause problems with the umulXi3_highpart
801 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
802 GET_MODE_BITSIZE (GET_MODE (op)))
803 ? (num_sign_bit_copies (op, GET_MODE (op))
804 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
805 - GET_MODE_BITSIZE (mode)))
806 : truncated_to_mode (mode, op))
807 && ! (GET_CODE (op) == LSHIFTRT
808 && GET_CODE (XEXP (op, 0)) == MULT))
809 return rtl_hooks.gen_lowpart_no_emit (mode, op);
811 /* A truncate of a comparison can be replaced with a subreg if
812 STORE_FLAG_VALUE permits. This is like the previous test,
813 but it works even if the comparison is done in a mode larger
814 than HOST_BITS_PER_WIDE_INT. */
815 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
817 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
818 return rtl_hooks.gen_lowpart_no_emit (mode, op);
822 if (DECIMAL_FLOAT_MODE_P (mode))
825 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
826 if (GET_CODE (op) == FLOAT_EXTEND
827 && GET_MODE (XEXP (op, 0)) == mode)
830 /* (float_truncate:SF (float_truncate:DF foo:XF))
831 = (float_truncate:SF foo:XF).
832 This may eliminate double rounding, so it is unsafe.
834 (float_truncate:SF (float_extend:XF foo:DF))
835 = (float_truncate:SF foo:DF).
837 (float_truncate:DF (float_extend:XF foo:SF))
838 = (float_extend:SF foo:DF). */
839 if ((GET_CODE (op) == FLOAT_TRUNCATE
840 && flag_unsafe_math_optimizations)
841 || GET_CODE (op) == FLOAT_EXTEND)
842 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
844 > GET_MODE_SIZE (mode)
845 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
849 /* (float_truncate (float x)) is (float x) */
850 if (GET_CODE (op) == FLOAT
851 && (flag_unsafe_math_optimizations
852 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
853 && ((unsigned)significand_size (GET_MODE (op))
854 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
855 - num_sign_bit_copies (XEXP (op, 0),
856 GET_MODE (XEXP (op, 0))))))))
857 return simplify_gen_unary (FLOAT, mode,
859 GET_MODE (XEXP (op, 0)));
861 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
862 (OP:SF foo:SF) if OP is NEG or ABS. */
863 if ((GET_CODE (op) == ABS
864 || GET_CODE (op) == NEG)
865 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
866 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
867 return simplify_gen_unary (GET_CODE (op), mode,
868 XEXP (XEXP (op, 0), 0), mode);
870 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
871 is (float_truncate:SF x). */
872 if (GET_CODE (op) == SUBREG
873 && subreg_lowpart_p (op)
874 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
875 return SUBREG_REG (op);
879 if (DECIMAL_FLOAT_MODE_P (mode))
882 /* (float_extend (float_extend x)) is (float_extend x)
884 (float_extend (float x)) is (float x) assuming that double
885 rounding can't happen.
887 if (GET_CODE (op) == FLOAT_EXTEND
888 || (GET_CODE (op) == FLOAT
889 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
890 && ((unsigned)significand_size (GET_MODE (op))
891 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
892 - num_sign_bit_copies (XEXP (op, 0),
893 GET_MODE (XEXP (op, 0)))))))
894 return simplify_gen_unary (GET_CODE (op), mode,
896 GET_MODE (XEXP (op, 0)));
901 /* (abs (neg <foo>)) -> (abs <foo>) */
902 if (GET_CODE (op) == NEG)
903 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
904 GET_MODE (XEXP (op, 0)));
906 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
908 if (GET_MODE (op) == VOIDmode)
911 /* If operand is something known to be positive, ignore the ABS. */
912 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
913 || ((GET_MODE_BITSIZE (GET_MODE (op))
914 <= HOST_BITS_PER_WIDE_INT)
915 && ((nonzero_bits (op, GET_MODE (op))
917 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
921 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
922 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
923 return gen_rtx_NEG (mode, op);
928 /* (ffs (*_extend <X>)) = (ffs <X>) */
929 if (GET_CODE (op) == SIGN_EXTEND
930 || GET_CODE (op) == ZERO_EXTEND)
931 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
932 GET_MODE (XEXP (op, 0)));
936 switch (GET_CODE (op))
940 /* (popcount (zero_extend <X>)) = (popcount <X>) */
941 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
942 GET_MODE (XEXP (op, 0)));
946 /* Rotations don't affect popcount. */
947 if (!side_effects_p (XEXP (op, 1)))
948 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
949 GET_MODE (XEXP (op, 0)));
958 switch (GET_CODE (op))
964 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
965 GET_MODE (XEXP (op, 0)));
969 /* Rotations don't affect parity. */
970 if (!side_effects_p (XEXP (op, 1)))
971 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
972 GET_MODE (XEXP (op, 0)));
981 /* (bswap (bswap x)) -> x. */
982 if (GET_CODE (op) == BSWAP)
987 /* (float (sign_extend <X>)) = (float <X>). */
988 if (GET_CODE (op) == SIGN_EXTEND)
989 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
990 GET_MODE (XEXP (op, 0)));
994 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
995 becomes just the MINUS if its mode is MODE. This allows
996 folding switch statements on machines using casesi (such as
998 if (GET_CODE (op) == TRUNCATE
999 && GET_MODE (XEXP (op, 0)) == mode
1000 && GET_CODE (XEXP (op, 0)) == MINUS
1001 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1002 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1003 return XEXP (op, 0);
1005 /* Check for a sign extension of a subreg of a promoted
1006 variable, where the promotion is sign-extended, and the
1007 target mode is the same as the variable's promotion. */
1008 if (GET_CODE (op) == SUBREG
1009 && SUBREG_PROMOTED_VAR_P (op)
1010 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1011 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1012 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1014 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1015 /* As we do not know which address space the pointer is refering to,
1016 we can do this only if the target does not support different pointer
1017 or address modes depending on the address space. */
1018 if (target_default_pointer_address_modes_p ()
1019 && ! POINTERS_EXTEND_UNSIGNED
1020 && mode == Pmode && GET_MODE (op) == ptr_mode
1022 || (GET_CODE (op) == SUBREG
1023 && REG_P (SUBREG_REG (op))
1024 && REG_POINTER (SUBREG_REG (op))
1025 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1026 return convert_memory_address (Pmode, op);
1031 /* Check for a zero extension of a subreg of a promoted
1032 variable, where the promotion is zero-extended, and the
1033 target mode is the same as the variable's promotion. */
1034 if (GET_CODE (op) == SUBREG
1035 && SUBREG_PROMOTED_VAR_P (op)
1036 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1037 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1038 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1040 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1041 /* As we do not know which address space the pointer is refering to,
1042 we can do this only if the target does not support different pointer
1043 or address modes depending on the address space. */
1044 if (target_default_pointer_address_modes_p ()
1045 && POINTERS_EXTEND_UNSIGNED > 0
1046 && mode == Pmode && GET_MODE (op) == ptr_mode
1048 || (GET_CODE (op) == SUBREG
1049 && REG_P (SUBREG_REG (op))
1050 && REG_POINTER (SUBREG_REG (op))
1051 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1052 return convert_memory_address (Pmode, op);
1063 /* Try to compute the value of a unary operation CODE whose output mode is to
1064 be MODE with input operand OP whose mode was originally OP_MODE.
1065 Return zero if the value cannot be computed. */
1067 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1068 rtx op, enum machine_mode op_mode)
1070 unsigned int width = GET_MODE_BITSIZE (mode);
1072 if (code == VEC_DUPLICATE)
1074 gcc_assert (VECTOR_MODE_P (mode));
1075 if (GET_MODE (op) != VOIDmode)
1077 if (!VECTOR_MODE_P (GET_MODE (op)))
1078 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1080 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1083 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1084 || GET_CODE (op) == CONST_VECTOR)
1086 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1087 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1088 rtvec v = rtvec_alloc (n_elts);
1091 if (GET_CODE (op) != CONST_VECTOR)
1092 for (i = 0; i < n_elts; i++)
1093 RTVEC_ELT (v, i) = op;
1096 enum machine_mode inmode = GET_MODE (op);
1097 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1098 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1100 gcc_assert (in_n_elts < n_elts);
1101 gcc_assert ((n_elts % in_n_elts) == 0);
1102 for (i = 0; i < n_elts; i++)
1103 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1105 return gen_rtx_CONST_VECTOR (mode, v);
1109 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1111 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1112 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1113 enum machine_mode opmode = GET_MODE (op);
1114 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1115 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1116 rtvec v = rtvec_alloc (n_elts);
1119 gcc_assert (op_n_elts == n_elts);
1120 for (i = 0; i < n_elts; i++)
1122 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1123 CONST_VECTOR_ELT (op, i),
1124 GET_MODE_INNER (opmode));
1127 RTVEC_ELT (v, i) = x;
1129 return gen_rtx_CONST_VECTOR (mode, v);
1132 /* The order of these tests is critical so that, for example, we don't
1133 check the wrong mode (input vs. output) for a conversion operation,
1134 such as FIX. At some point, this should be simplified. */
1136 if (code == FLOAT && GET_MODE (op) == VOIDmode
1137 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1139 HOST_WIDE_INT hv, lv;
1142 if (CONST_INT_P (op))
1143 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1145 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1147 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1148 d = real_value_truncate (mode, d);
1149 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1151 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1152 && (GET_CODE (op) == CONST_DOUBLE
1153 || CONST_INT_P (op)))
1155 HOST_WIDE_INT hv, lv;
1158 if (CONST_INT_P (op))
1159 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1161 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1163 if (op_mode == VOIDmode)
1165 /* We don't know how to interpret negative-looking numbers in
1166 this case, so don't try to fold those. */
1170 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1173 hv = 0, lv &= GET_MODE_MASK (op_mode);
1175 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1176 d = real_value_truncate (mode, d);
1177 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1180 if (CONST_INT_P (op)
1181 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1183 HOST_WIDE_INT arg0 = INTVAL (op);
1197 val = (arg0 >= 0 ? arg0 : - arg0);
1201 /* Don't use ffs here. Instead, get low order bit and then its
1202 number. If arg0 is zero, this will return 0, as desired. */
1203 arg0 &= GET_MODE_MASK (mode);
1204 val = exact_log2 (arg0 & (- arg0)) + 1;
1208 arg0 &= GET_MODE_MASK (mode);
1209 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1212 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1216 arg0 &= GET_MODE_MASK (mode);
1219 /* Even if the value at zero is undefined, we have to come
1220 up with some replacement. Seems good enough. */
1221 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1222 val = GET_MODE_BITSIZE (mode);
1225 val = exact_log2 (arg0 & -arg0);
1229 arg0 &= GET_MODE_MASK (mode);
1232 val++, arg0 &= arg0 - 1;
1236 arg0 &= GET_MODE_MASK (mode);
1239 val++, arg0 &= arg0 - 1;
1248 for (s = 0; s < width; s += 8)
1250 unsigned int d = width - s - 8;
1251 unsigned HOST_WIDE_INT byte;
1252 byte = (arg0 >> s) & 0xff;
1263 /* When zero-extending a CONST_INT, we need to know its
1265 gcc_assert (op_mode != VOIDmode);
1266 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1268 /* If we were really extending the mode,
1269 we would have to distinguish between zero-extension
1270 and sign-extension. */
1271 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1274 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1275 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1281 if (op_mode == VOIDmode)
1283 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1285 /* If we were really extending the mode,
1286 we would have to distinguish between zero-extension
1287 and sign-extension. */
1288 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1291 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1294 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1296 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1297 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1305 case FLOAT_TRUNCATE:
1317 return gen_int_mode (val, mode);
1320 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1321 for a DImode operation on a CONST_INT. */
1322 else if (GET_MODE (op) == VOIDmode
1323 && width <= HOST_BITS_PER_WIDE_INT * 2
1324 && (GET_CODE (op) == CONST_DOUBLE
1325 || CONST_INT_P (op)))
1327 unsigned HOST_WIDE_INT l1, lv;
1328 HOST_WIDE_INT h1, hv;
1330 if (GET_CODE (op) == CONST_DOUBLE)
1331 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1333 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1343 neg_double (l1, h1, &lv, &hv);
1348 neg_double (l1, h1, &lv, &hv);
1360 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1363 lv = exact_log2 (l1 & -l1) + 1;
1369 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1370 - HOST_BITS_PER_WIDE_INT;
1372 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1373 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1374 lv = GET_MODE_BITSIZE (mode);
1380 lv = exact_log2 (l1 & -l1);
1382 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1383 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1384 lv = GET_MODE_BITSIZE (mode);
1412 for (s = 0; s < width; s += 8)
1414 unsigned int d = width - s - 8;
1415 unsigned HOST_WIDE_INT byte;
1417 if (s < HOST_BITS_PER_WIDE_INT)
1418 byte = (l1 >> s) & 0xff;
1420 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1422 if (d < HOST_BITS_PER_WIDE_INT)
1425 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1431 /* This is just a change-of-mode, so do nothing. */
1436 gcc_assert (op_mode != VOIDmode);
1438 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1442 lv = l1 & GET_MODE_MASK (op_mode);
1446 if (op_mode == VOIDmode
1447 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1451 lv = l1 & GET_MODE_MASK (op_mode);
1452 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1453 && (lv & ((HOST_WIDE_INT) 1
1454 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1455 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1457 hv = HWI_SIGN_EXTEND (lv);
1468 return immed_double_const (lv, hv, mode);
1471 else if (GET_CODE (op) == CONST_DOUBLE
1472 && SCALAR_FLOAT_MODE_P (mode))
1474 REAL_VALUE_TYPE d, t;
1475 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1480 if (HONOR_SNANS (mode) && real_isnan (&d))
1482 real_sqrt (&t, mode, &d);
1486 d = REAL_VALUE_ABS (d);
1489 d = REAL_VALUE_NEGATE (d);
1491 case FLOAT_TRUNCATE:
1492 d = real_value_truncate (mode, d);
1495 /* All this does is change the mode. */
1498 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1505 real_to_target (tmp, &d, GET_MODE (op));
1506 for (i = 0; i < 4; i++)
1508 real_from_target (&d, tmp, mode);
1514 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1517 else if (GET_CODE (op) == CONST_DOUBLE
1518 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1519 && GET_MODE_CLASS (mode) == MODE_INT
1520 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1522 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1523 operators are intentionally left unspecified (to ease implementation
1524 by target backends), for consistency, this routine implements the
1525 same semantics for constant folding as used by the middle-end. */
1527 /* This was formerly used only for non-IEEE float.
1528 eggert@twinsun.com says it is safe for IEEE also. */
1529 HOST_WIDE_INT xh, xl, th, tl;
1530 REAL_VALUE_TYPE x, t;
1531 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1535 if (REAL_VALUE_ISNAN (x))
1538 /* Test against the signed upper bound. */
1539 if (width > HOST_BITS_PER_WIDE_INT)
1541 th = ((unsigned HOST_WIDE_INT) 1
1542 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1548 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1550 real_from_integer (&t, VOIDmode, tl, th, 0);
1551 if (REAL_VALUES_LESS (t, x))
1558 /* Test against the signed lower bound. */
1559 if (width > HOST_BITS_PER_WIDE_INT)
1561 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1567 tl = (HOST_WIDE_INT) -1 << (width - 1);
1569 real_from_integer (&t, VOIDmode, tl, th, 0);
1570 if (REAL_VALUES_LESS (x, t))
1576 REAL_VALUE_TO_INT (&xl, &xh, x);
1580 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1583 /* Test against the unsigned upper bound. */
1584 if (width == 2*HOST_BITS_PER_WIDE_INT)
1589 else if (width >= HOST_BITS_PER_WIDE_INT)
1591 th = ((unsigned HOST_WIDE_INT) 1
1592 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1598 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1600 real_from_integer (&t, VOIDmode, tl, th, 1);
1601 if (REAL_VALUES_LESS (t, x))
1608 REAL_VALUE_TO_INT (&xl, &xh, x);
1614 return immed_double_const (xl, xh, mode);
1620 /* Subroutine of simplify_binary_operation to simplify a commutative,
1621 associative binary operation CODE with result mode MODE, operating
1622 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1623 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1624 canonicalization is possible. */
1627 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1632 /* Linearize the operator to the left. */
1633 if (GET_CODE (op1) == code)
1635 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1636 if (GET_CODE (op0) == code)
1638 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1639 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1642 /* "a op (b op c)" becomes "(b op c) op a". */
1643 if (! swap_commutative_operands_p (op1, op0))
1644 return simplify_gen_binary (code, mode, op1, op0);
1651 if (GET_CODE (op0) == code)
1653 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1654 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1656 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1657 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1660 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1661 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1663 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1665 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1666 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1668 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1675 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1676 and OP1. Return 0 if no simplification is possible.
1678 Don't use this for relational operations such as EQ or LT.
1679 Use simplify_relational_operation instead. */
1681 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1684 rtx trueop0, trueop1;
1687 /* Relational operations don't work here. We must know the mode
1688 of the operands in order to do the comparison correctly.
1689 Assuming a full word can give incorrect results.
1690 Consider comparing 128 with -128 in QImode. */
1691 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1692 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1694 /* Make sure the constant is second. */
1695 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1696 && swap_commutative_operands_p (op0, op1))
1698 tem = op0, op0 = op1, op1 = tem;
1701 trueop0 = avoid_constant_pool_reference (op0);
1702 trueop1 = avoid_constant_pool_reference (op1);
1704 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1707 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1710 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1711 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1712 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1713 actual constants. */
1716 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1717 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1719 rtx tem, reversed, opleft, opright;
1721 unsigned int width = GET_MODE_BITSIZE (mode);
1723 /* Even if we can't compute a constant result,
1724 there are some cases worth simplifying. */
1729 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1730 when x is NaN, infinite, or finite and nonzero. They aren't
1731 when x is -0 and the rounding mode is not towards -infinity,
1732 since (-0) + 0 is then 0. */
1733 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1736 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1737 transformations are safe even for IEEE. */
1738 if (GET_CODE (op0) == NEG)
1739 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1740 else if (GET_CODE (op1) == NEG)
1741 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1743 /* (~a) + 1 -> -a */
1744 if (INTEGRAL_MODE_P (mode)
1745 && GET_CODE (op0) == NOT
1746 && trueop1 == const1_rtx)
1747 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1749 /* Handle both-operands-constant cases. We can only add
1750 CONST_INTs to constants since the sum of relocatable symbols
1751 can't be handled by most assemblers. Don't add CONST_INT
1752 to CONST_INT since overflow won't be computed properly if wider
1753 than HOST_BITS_PER_WIDE_INT. */
1755 if ((GET_CODE (op0) == CONST
1756 || GET_CODE (op0) == SYMBOL_REF
1757 || GET_CODE (op0) == LABEL_REF)
1758 && CONST_INT_P (op1))
1759 return plus_constant (op0, INTVAL (op1));
1760 else if ((GET_CODE (op1) == CONST
1761 || GET_CODE (op1) == SYMBOL_REF
1762 || GET_CODE (op1) == LABEL_REF)
1763 && CONST_INT_P (op0))
1764 return plus_constant (op1, INTVAL (op0));
1766 /* See if this is something like X * C - X or vice versa or
1767 if the multiplication is written as a shift. If so, we can
1768 distribute and make a new multiply, shift, or maybe just
1769 have X (if C is 2 in the example above). But don't make
1770 something more expensive than we had before. */
1772 if (SCALAR_INT_MODE_P (mode))
1774 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1775 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1776 rtx lhs = op0, rhs = op1;
1778 if (GET_CODE (lhs) == NEG)
1782 lhs = XEXP (lhs, 0);
1784 else if (GET_CODE (lhs) == MULT
1785 && CONST_INT_P (XEXP (lhs, 1)))
1787 coeff0l = INTVAL (XEXP (lhs, 1));
1788 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1789 lhs = XEXP (lhs, 0);
1791 else if (GET_CODE (lhs) == ASHIFT
1792 && CONST_INT_P (XEXP (lhs, 1))
1793 && INTVAL (XEXP (lhs, 1)) >= 0
1794 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1796 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1798 lhs = XEXP (lhs, 0);
1801 if (GET_CODE (rhs) == NEG)
1805 rhs = XEXP (rhs, 0);
1807 else if (GET_CODE (rhs) == MULT
1808 && CONST_INT_P (XEXP (rhs, 1)))
1810 coeff1l = INTVAL (XEXP (rhs, 1));
1811 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1812 rhs = XEXP (rhs, 0);
1814 else if (GET_CODE (rhs) == ASHIFT
1815 && CONST_INT_P (XEXP (rhs, 1))
1816 && INTVAL (XEXP (rhs, 1)) >= 0
1817 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1819 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1821 rhs = XEXP (rhs, 0);
1824 if (rtx_equal_p (lhs, rhs))
1826 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1828 unsigned HOST_WIDE_INT l;
1830 bool speed = optimize_function_for_speed_p (cfun);
1832 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1833 coeff = immed_double_const (l, h, mode);
1835 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1836 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1841 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1842 if ((CONST_INT_P (op1)
1843 || GET_CODE (op1) == CONST_DOUBLE)
1844 && GET_CODE (op0) == XOR
1845 && (CONST_INT_P (XEXP (op0, 1))
1846 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1847 && mode_signbit_p (mode, op1))
1848 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1849 simplify_gen_binary (XOR, mode, op1,
1852 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1853 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1854 && GET_CODE (op0) == MULT
1855 && GET_CODE (XEXP (op0, 0)) == NEG)
1859 in1 = XEXP (XEXP (op0, 0), 0);
1860 in2 = XEXP (op0, 1);
1861 return simplify_gen_binary (MINUS, mode, op1,
1862 simplify_gen_binary (MULT, mode,
1866 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1867 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1869 if (COMPARISON_P (op0)
1870 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1871 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1872 && (reversed = reversed_comparison (op0, mode)))
1874 simplify_gen_unary (NEG, mode, reversed, mode);
1876 /* If one of the operands is a PLUS or a MINUS, see if we can
1877 simplify this by the associative law.
1878 Don't use the associative law for floating point.
1879 The inaccuracy makes it nonassociative,
1880 and subtle programs can break if operations are associated. */
1882 if (INTEGRAL_MODE_P (mode)
1883 && (plus_minus_operand_p (op0)
1884 || plus_minus_operand_p (op1))
1885 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1888 /* Reassociate floating point addition only when the user
1889 specifies associative math operations. */
1890 if (FLOAT_MODE_P (mode)
1891 && flag_associative_math)
1893 tem = simplify_associative_operation (code, mode, op0, op1);
1900 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1901 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1902 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1903 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1905 rtx xop00 = XEXP (op0, 0);
1906 rtx xop10 = XEXP (op1, 0);
1909 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1911 if (REG_P (xop00) && REG_P (xop10)
1912 && GET_MODE (xop00) == GET_MODE (xop10)
1913 && REGNO (xop00) == REGNO (xop10)
1914 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1915 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1922 /* We can't assume x-x is 0 even with non-IEEE floating point,
1923 but since it is zero except in very strange circumstances, we
1924 will treat it as zero with -ffinite-math-only. */
1925 if (rtx_equal_p (trueop0, trueop1)
1926 && ! side_effects_p (op0)
1927 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1928 return CONST0_RTX (mode);
1930 /* Change subtraction from zero into negation. (0 - x) is the
1931 same as -x when x is NaN, infinite, or finite and nonzero.
1932 But if the mode has signed zeros, and does not round towards
1933 -infinity, then 0 - 0 is 0, not -0. */
1934 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1935 return simplify_gen_unary (NEG, mode, op1, mode);
1937 /* (-1 - a) is ~a. */
1938 if (trueop0 == constm1_rtx)
1939 return simplify_gen_unary (NOT, mode, op1, mode);
1941 /* Subtracting 0 has no effect unless the mode has signed zeros
1942 and supports rounding towards -infinity. In such a case,
1944 if (!(HONOR_SIGNED_ZEROS (mode)
1945 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1946 && trueop1 == CONST0_RTX (mode))
1949 /* See if this is something like X * C - X or vice versa or
1950 if the multiplication is written as a shift. If so, we can
1951 distribute and make a new multiply, shift, or maybe just
1952 have X (if C is 2 in the example above). But don't make
1953 something more expensive than we had before. */
1955 if (SCALAR_INT_MODE_P (mode))
1957 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1958 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1959 rtx lhs = op0, rhs = op1;
1961 if (GET_CODE (lhs) == NEG)
1965 lhs = XEXP (lhs, 0);
1967 else if (GET_CODE (lhs) == MULT
1968 && CONST_INT_P (XEXP (lhs, 1)))
1970 coeff0l = INTVAL (XEXP (lhs, 1));
1971 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1972 lhs = XEXP (lhs, 0);
1974 else if (GET_CODE (lhs) == ASHIFT
1975 && CONST_INT_P (XEXP (lhs, 1))
1976 && INTVAL (XEXP (lhs, 1)) >= 0
1977 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1979 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1981 lhs = XEXP (lhs, 0);
1984 if (GET_CODE (rhs) == NEG)
1988 rhs = XEXP (rhs, 0);
1990 else if (GET_CODE (rhs) == MULT
1991 && CONST_INT_P (XEXP (rhs, 1)))
1993 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1994 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1995 rhs = XEXP (rhs, 0);
1997 else if (GET_CODE (rhs) == ASHIFT
1998 && CONST_INT_P (XEXP (rhs, 1))
1999 && INTVAL (XEXP (rhs, 1)) >= 0
2000 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2002 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
2004 rhs = XEXP (rhs, 0);
2007 if (rtx_equal_p (lhs, rhs))
2009 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2011 unsigned HOST_WIDE_INT l;
2013 bool speed = optimize_function_for_speed_p (cfun);
2015 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
2016 coeff = immed_double_const (l, h, mode);
2018 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2019 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2024 /* (a - (-b)) -> (a + b). True even for IEEE. */
2025 if (GET_CODE (op1) == NEG)
2026 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2028 /* (-x - c) may be simplified as (-c - x). */
2029 if (GET_CODE (op0) == NEG
2030 && (CONST_INT_P (op1)
2031 || GET_CODE (op1) == CONST_DOUBLE))
2033 tem = simplify_unary_operation (NEG, mode, op1, mode);
2035 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2038 /* Don't let a relocatable value get a negative coeff. */
2039 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2040 return simplify_gen_binary (PLUS, mode,
2042 neg_const_int (mode, op1));
2044 /* (x - (x & y)) -> (x & ~y) */
2045 if (GET_CODE (op1) == AND)
2047 if (rtx_equal_p (op0, XEXP (op1, 0)))
2049 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2050 GET_MODE (XEXP (op1, 1)));
2051 return simplify_gen_binary (AND, mode, op0, tem);
2053 if (rtx_equal_p (op0, XEXP (op1, 1)))
2055 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2056 GET_MODE (XEXP (op1, 0)));
2057 return simplify_gen_binary (AND, mode, op0, tem);
2061 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2062 by reversing the comparison code if valid. */
2063 if (STORE_FLAG_VALUE == 1
2064 && trueop0 == const1_rtx
2065 && COMPARISON_P (op1)
2066 && (reversed = reversed_comparison (op1, mode)))
2069 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2070 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2071 && GET_CODE (op1) == MULT
2072 && GET_CODE (XEXP (op1, 0)) == NEG)
2076 in1 = XEXP (XEXP (op1, 0), 0);
2077 in2 = XEXP (op1, 1);
2078 return simplify_gen_binary (PLUS, mode,
2079 simplify_gen_binary (MULT, mode,
2084 /* Canonicalize (minus (neg A) (mult B C)) to
2085 (minus (mult (neg B) C) A). */
2086 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2087 && GET_CODE (op1) == MULT
2088 && GET_CODE (op0) == NEG)
2092 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2093 in2 = XEXP (op1, 1);
2094 return simplify_gen_binary (MINUS, mode,
2095 simplify_gen_binary (MULT, mode,
2100 /* If one of the operands is a PLUS or a MINUS, see if we can
2101 simplify this by the associative law. This will, for example,
2102 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2103 Don't use the associative law for floating point.
2104 The inaccuracy makes it nonassociative,
2105 and subtle programs can break if operations are associated. */
2107 if (INTEGRAL_MODE_P (mode)
2108 && (plus_minus_operand_p (op0)
2109 || plus_minus_operand_p (op1))
2110 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2115 if (trueop1 == constm1_rtx)
2116 return simplify_gen_unary (NEG, mode, op0, mode);
2118 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2119 x is NaN, since x * 0 is then also NaN. Nor is it valid
2120 when the mode has signed zeros, since multiplying a negative
2121 number by 0 will give -0, not 0. */
2122 if (!HONOR_NANS (mode)
2123 && !HONOR_SIGNED_ZEROS (mode)
2124 && trueop1 == CONST0_RTX (mode)
2125 && ! side_effects_p (op0))
2128 /* In IEEE floating point, x*1 is not equivalent to x for
2130 if (!HONOR_SNANS (mode)
2131 && trueop1 == CONST1_RTX (mode))
2134 /* Convert multiply by constant power of two into shift unless
2135 we are still generating RTL. This test is a kludge. */
2136 if (CONST_INT_P (trueop1)
2137 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2138 /* If the mode is larger than the host word size, and the
2139 uppermost bit is set, then this isn't a power of two due
2140 to implicit sign extension. */
2141 && (width <= HOST_BITS_PER_WIDE_INT
2142 || val != HOST_BITS_PER_WIDE_INT - 1))
2143 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2145 /* Likewise for multipliers wider than a word. */
2146 if (GET_CODE (trueop1) == CONST_DOUBLE
2147 && (GET_MODE (trueop1) == VOIDmode
2148 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2149 && GET_MODE (op0) == mode
2150 && CONST_DOUBLE_LOW (trueop1) == 0
2151 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2152 return simplify_gen_binary (ASHIFT, mode, op0,
2153 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2155 /* x*2 is x+x and x*(-1) is -x */
2156 if (GET_CODE (trueop1) == CONST_DOUBLE
2157 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2158 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2159 && GET_MODE (op0) == mode)
2162 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2164 if (REAL_VALUES_EQUAL (d, dconst2))
2165 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2167 if (!HONOR_SNANS (mode)
2168 && REAL_VALUES_EQUAL (d, dconstm1))
2169 return simplify_gen_unary (NEG, mode, op0, mode);
2172 /* Optimize -x * -x as x * x. */
2173 if (FLOAT_MODE_P (mode)
2174 && GET_CODE (op0) == NEG
2175 && GET_CODE (op1) == NEG
2176 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2177 && !side_effects_p (XEXP (op0, 0)))
2178 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2180 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2181 if (SCALAR_FLOAT_MODE_P (mode)
2182 && GET_CODE (op0) == ABS
2183 && GET_CODE (op1) == ABS
2184 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2185 && !side_effects_p (XEXP (op0, 0)))
2186 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2188 /* Reassociate multiplication, but for floating point MULTs
2189 only when the user specifies unsafe math optimizations. */
2190 if (! FLOAT_MODE_P (mode)
2191 || flag_unsafe_math_optimizations)
2193 tem = simplify_associative_operation (code, mode, op0, op1);
2200 if (trueop1 == const0_rtx)
2202 if (CONST_INT_P (trueop1)
2203 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2204 == GET_MODE_MASK (mode)))
2206 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2208 /* A | (~A) -> -1 */
2209 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2210 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2211 && ! side_effects_p (op0)
2212 && SCALAR_INT_MODE_P (mode))
2215 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2216 if (CONST_INT_P (op1)
2217 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2218 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2221 /* Canonicalize (X & C1) | C2. */
2222 if (GET_CODE (op0) == AND
2223 && CONST_INT_P (trueop1)
2224 && CONST_INT_P (XEXP (op0, 1)))
2226 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2227 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2228 HOST_WIDE_INT c2 = INTVAL (trueop1);
2230 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2232 && !side_effects_p (XEXP (op0, 0)))
2235 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2236 if (((c1|c2) & mask) == mask)
2237 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2239 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2240 if (((c1 & ~c2) & mask) != (c1 & mask))
2242 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2243 gen_int_mode (c1 & ~c2, mode));
2244 return simplify_gen_binary (IOR, mode, tem, op1);
2248 /* Convert (A & B) | A to A. */
2249 if (GET_CODE (op0) == AND
2250 && (rtx_equal_p (XEXP (op0, 0), op1)
2251 || rtx_equal_p (XEXP (op0, 1), op1))
2252 && ! side_effects_p (XEXP (op0, 0))
2253 && ! side_effects_p (XEXP (op0, 1)))
2256 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2257 mode size to (rotate A CX). */
2259 if (GET_CODE (op1) == ASHIFT
2260 || GET_CODE (op1) == SUBREG)
2271 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2272 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2273 && CONST_INT_P (XEXP (opleft, 1))
2274 && CONST_INT_P (XEXP (opright, 1))
2275 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2276 == GET_MODE_BITSIZE (mode)))
2277 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2279 /* Same, but for ashift that has been "simplified" to a wider mode
2280 by simplify_shift_const. */
2282 if (GET_CODE (opleft) == SUBREG
2283 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2284 && GET_CODE (opright) == LSHIFTRT
2285 && GET_CODE (XEXP (opright, 0)) == SUBREG
2286 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2287 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2288 && (GET_MODE_SIZE (GET_MODE (opleft))
2289 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2290 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2291 SUBREG_REG (XEXP (opright, 0)))
2292 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2293 && CONST_INT_P (XEXP (opright, 1))
2294 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2295 == GET_MODE_BITSIZE (mode)))
2296 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2297 XEXP (SUBREG_REG (opleft), 1));
2299 /* If we have (ior (and (X C1) C2)), simplify this by making
2300 C1 as small as possible if C1 actually changes. */
2301 if (CONST_INT_P (op1)
2302 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2303 || INTVAL (op1) > 0)
2304 && GET_CODE (op0) == AND
2305 && CONST_INT_P (XEXP (op0, 1))
2306 && CONST_INT_P (op1)
2307 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2308 return simplify_gen_binary (IOR, mode,
2310 (AND, mode, XEXP (op0, 0),
2311 GEN_INT (INTVAL (XEXP (op0, 1))
2315 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2316 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2317 the PLUS does not affect any of the bits in OP1: then we can do
2318 the IOR as a PLUS and we can associate. This is valid if OP1
2319 can be safely shifted left C bits. */
2320 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2321 && GET_CODE (XEXP (op0, 0)) == PLUS
2322 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2323 && CONST_INT_P (XEXP (op0, 1))
2324 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2326 int count = INTVAL (XEXP (op0, 1));
2327 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2329 if (mask >> count == INTVAL (trueop1)
2330 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2331 return simplify_gen_binary (ASHIFTRT, mode,
2332 plus_constant (XEXP (op0, 0), mask),
2336 tem = simplify_associative_operation (code, mode, op0, op1);
2342 if (trueop1 == const0_rtx)
2344 if (CONST_INT_P (trueop1)
2345 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2346 == GET_MODE_MASK (mode)))
2347 return simplify_gen_unary (NOT, mode, op0, mode);
2348 if (rtx_equal_p (trueop0, trueop1)
2349 && ! side_effects_p (op0)
2350 && GET_MODE_CLASS (mode) != MODE_CC)
2351 return CONST0_RTX (mode);
2353 /* Canonicalize XOR of the most significant bit to PLUS. */
2354 if ((CONST_INT_P (op1)
2355 || GET_CODE (op1) == CONST_DOUBLE)
2356 && mode_signbit_p (mode, op1))
2357 return simplify_gen_binary (PLUS, mode, op0, op1);
2358 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2359 if ((CONST_INT_P (op1)
2360 || GET_CODE (op1) == CONST_DOUBLE)
2361 && GET_CODE (op0) == PLUS
2362 && (CONST_INT_P (XEXP (op0, 1))
2363 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2364 && mode_signbit_p (mode, XEXP (op0, 1)))
2365 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2366 simplify_gen_binary (XOR, mode, op1,
2369 /* If we are XORing two things that have no bits in common,
2370 convert them into an IOR. This helps to detect rotation encoded
2371 using those methods and possibly other simplifications. */
2373 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2374 && (nonzero_bits (op0, mode)
2375 & nonzero_bits (op1, mode)) == 0)
2376 return (simplify_gen_binary (IOR, mode, op0, op1));
2378 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2379 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2382 int num_negated = 0;
2384 if (GET_CODE (op0) == NOT)
2385 num_negated++, op0 = XEXP (op0, 0);
2386 if (GET_CODE (op1) == NOT)
2387 num_negated++, op1 = XEXP (op1, 0);
2389 if (num_negated == 2)
2390 return simplify_gen_binary (XOR, mode, op0, op1);
2391 else if (num_negated == 1)
2392 return simplify_gen_unary (NOT, mode,
2393 simplify_gen_binary (XOR, mode, op0, op1),
2397 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2398 correspond to a machine insn or result in further simplifications
2399 if B is a constant. */
2401 if (GET_CODE (op0) == AND
2402 && rtx_equal_p (XEXP (op0, 1), op1)
2403 && ! side_effects_p (op1))
2404 return simplify_gen_binary (AND, mode,
2405 simplify_gen_unary (NOT, mode,
2406 XEXP (op0, 0), mode),
2409 else if (GET_CODE (op0) == AND
2410 && rtx_equal_p (XEXP (op0, 0), op1)
2411 && ! side_effects_p (op1))
2412 return simplify_gen_binary (AND, mode,
2413 simplify_gen_unary (NOT, mode,
2414 XEXP (op0, 1), mode),
2417 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2418 comparison if STORE_FLAG_VALUE is 1. */
2419 if (STORE_FLAG_VALUE == 1
2420 && trueop1 == const1_rtx
2421 && COMPARISON_P (op0)
2422 && (reversed = reversed_comparison (op0, mode)))
2425 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2426 is (lt foo (const_int 0)), so we can perform the above
2427 simplification if STORE_FLAG_VALUE is 1. */
2429 if (STORE_FLAG_VALUE == 1
2430 && trueop1 == const1_rtx
2431 && GET_CODE (op0) == LSHIFTRT
2432 && CONST_INT_P (XEXP (op0, 1))
2433 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2434 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2436 /* (xor (comparison foo bar) (const_int sign-bit))
2437 when STORE_FLAG_VALUE is the sign bit. */
2438 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2439 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2440 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2441 && trueop1 == const_true_rtx
2442 && COMPARISON_P (op0)
2443 && (reversed = reversed_comparison (op0, mode)))
2446 tem = simplify_associative_operation (code, mode, op0, op1);
2452 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2454 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2456 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2457 HOST_WIDE_INT nzop1;
2458 if (CONST_INT_P (trueop1))
2460 HOST_WIDE_INT val1 = INTVAL (trueop1);
2461 /* If we are turning off bits already known off in OP0, we need
2463 if ((nzop0 & ~val1) == 0)
2466 nzop1 = nonzero_bits (trueop1, mode);
2467 /* If we are clearing all the nonzero bits, the result is zero. */
2468 if ((nzop1 & nzop0) == 0
2469 && !side_effects_p (op0) && !side_effects_p (op1))
2470 return CONST0_RTX (mode);
2472 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2473 && GET_MODE_CLASS (mode) != MODE_CC)
2476 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2477 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2478 && ! side_effects_p (op0)
2479 && GET_MODE_CLASS (mode) != MODE_CC)
2480 return CONST0_RTX (mode);
2482 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2483 there are no nonzero bits of C outside of X's mode. */
2484 if ((GET_CODE (op0) == SIGN_EXTEND
2485 || GET_CODE (op0) == ZERO_EXTEND)
2486 && CONST_INT_P (trueop1)
2487 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2488 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2489 & INTVAL (trueop1)) == 0)
2491 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2492 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2493 gen_int_mode (INTVAL (trueop1),
2495 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2498 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2499 we might be able to further simplify the AND with X and potentially
2500 remove the truncation altogether. */
2501 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2503 rtx x = XEXP (op0, 0);
2504 enum machine_mode xmode = GET_MODE (x);
2505 tem = simplify_gen_binary (AND, xmode, x,
2506 gen_int_mode (INTVAL (trueop1), xmode));
2507 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2510 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2511 if (GET_CODE (op0) == IOR
2512 && CONST_INT_P (trueop1)
2513 && CONST_INT_P (XEXP (op0, 1)))
2515 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2516 return simplify_gen_binary (IOR, mode,
2517 simplify_gen_binary (AND, mode,
2518 XEXP (op0, 0), op1),
2519 gen_int_mode (tmp, mode));
2522 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2523 insn (and may simplify more). */
2524 if (GET_CODE (op0) == XOR
2525 && rtx_equal_p (XEXP (op0, 0), op1)
2526 && ! side_effects_p (op1))
2527 return simplify_gen_binary (AND, mode,
2528 simplify_gen_unary (NOT, mode,
2529 XEXP (op0, 1), mode),
2532 if (GET_CODE (op0) == XOR
2533 && rtx_equal_p (XEXP (op0, 1), op1)
2534 && ! side_effects_p (op1))
2535 return simplify_gen_binary (AND, mode,
2536 simplify_gen_unary (NOT, mode,
2537 XEXP (op0, 0), mode),
2540 /* Similarly for (~(A ^ B)) & A. */
2541 if (GET_CODE (op0) == NOT
2542 && GET_CODE (XEXP (op0, 0)) == XOR
2543 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2544 && ! side_effects_p (op1))
2545 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2547 if (GET_CODE (op0) == NOT
2548 && GET_CODE (XEXP (op0, 0)) == XOR
2549 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2550 && ! side_effects_p (op1))
2551 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2553 /* Convert (A | B) & A to A. */
2554 if (GET_CODE (op0) == IOR
2555 && (rtx_equal_p (XEXP (op0, 0), op1)
2556 || rtx_equal_p (XEXP (op0, 1), op1))
2557 && ! side_effects_p (XEXP (op0, 0))
2558 && ! side_effects_p (XEXP (op0, 1)))
2561 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2562 ((A & N) + B) & M -> (A + B) & M
2563 Similarly if (N & M) == 0,
2564 ((A | N) + B) & M -> (A + B) & M
2565 and for - instead of + and/or ^ instead of |.
2566 Also, if (N & M) == 0, then
2567 (A +- N) & M -> A & M. */
2568 if (CONST_INT_P (trueop1)
2569 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2570 && ~INTVAL (trueop1)
2571 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2572 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2577 pmop[0] = XEXP (op0, 0);
2578 pmop[1] = XEXP (op0, 1);
2580 if (CONST_INT_P (pmop[1])
2581 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2582 return simplify_gen_binary (AND, mode, pmop[0], op1);
2584 for (which = 0; which < 2; which++)
2587 switch (GET_CODE (tem))
2590 if (CONST_INT_P (XEXP (tem, 1))
2591 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2592 == INTVAL (trueop1))
2593 pmop[which] = XEXP (tem, 0);
2597 if (CONST_INT_P (XEXP (tem, 1))
2598 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2599 pmop[which] = XEXP (tem, 0);
2606 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2608 tem = simplify_gen_binary (GET_CODE (op0), mode,
2610 return simplify_gen_binary (code, mode, tem, op1);
2614 /* (and X (ior (not X) Y) -> (and X Y) */
2615 if (GET_CODE (op1) == IOR
2616 && GET_CODE (XEXP (op1, 0)) == NOT
2617 && op0 == XEXP (XEXP (op1, 0), 0))
2618 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2620 /* (and (ior (not X) Y) X) -> (and X Y) */
2621 if (GET_CODE (op0) == IOR
2622 && GET_CODE (XEXP (op0, 0)) == NOT
2623 && op1 == XEXP (XEXP (op0, 0), 0))
2624 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2626 tem = simplify_associative_operation (code, mode, op0, op1);
2632 /* 0/x is 0 (or x&0 if x has side-effects). */
2633 if (trueop0 == CONST0_RTX (mode))
2635 if (side_effects_p (op1))
2636 return simplify_gen_binary (AND, mode, op1, trueop0);
2640 if (trueop1 == CONST1_RTX (mode))
2641 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2642 /* Convert divide by power of two into shift. */
2643 if (CONST_INT_P (trueop1)
2644 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2645 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2649 /* Handle floating point and integers separately. */
2650 if (SCALAR_FLOAT_MODE_P (mode))
2652 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2653 safe for modes with NaNs, since 0.0 / 0.0 will then be
2654 NaN rather than 0.0. Nor is it safe for modes with signed
2655 zeros, since dividing 0 by a negative number gives -0.0 */
2656 if (trueop0 == CONST0_RTX (mode)
2657 && !HONOR_NANS (mode)
2658 && !HONOR_SIGNED_ZEROS (mode)
2659 && ! side_effects_p (op1))
2662 if (trueop1 == CONST1_RTX (mode)
2663 && !HONOR_SNANS (mode))
2666 if (GET_CODE (trueop1) == CONST_DOUBLE
2667 && trueop1 != CONST0_RTX (mode))
2670 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2673 if (REAL_VALUES_EQUAL (d, dconstm1)
2674 && !HONOR_SNANS (mode))
2675 return simplify_gen_unary (NEG, mode, op0, mode);
2677 /* Change FP division by a constant into multiplication.
2678 Only do this with -freciprocal-math. */
2679 if (flag_reciprocal_math
2680 && !REAL_VALUES_EQUAL (d, dconst0))
2682 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2683 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2684 return simplify_gen_binary (MULT, mode, op0, tem);
2690 /* 0/x is 0 (or x&0 if x has side-effects). */
2691 if (trueop0 == CONST0_RTX (mode))
2693 if (side_effects_p (op1))
2694 return simplify_gen_binary (AND, mode, op1, trueop0);
2698 if (trueop1 == CONST1_RTX (mode))
2699 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2701 if (trueop1 == constm1_rtx)
2703 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2704 return simplify_gen_unary (NEG, mode, x, mode);
2710 /* 0%x is 0 (or x&0 if x has side-effects). */
2711 if (trueop0 == CONST0_RTX (mode))
2713 if (side_effects_p (op1))
2714 return simplify_gen_binary (AND, mode, op1, trueop0);
2717 /* x%1 is 0 (of x&0 if x has side-effects). */
2718 if (trueop1 == CONST1_RTX (mode))
2720 if (side_effects_p (op0))
2721 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2722 return CONST0_RTX (mode);
2724 /* Implement modulus by power of two as AND. */
2725 if (CONST_INT_P (trueop1)
2726 && exact_log2 (INTVAL (trueop1)) > 0)
2727 return simplify_gen_binary (AND, mode, op0,
2728 GEN_INT (INTVAL (op1) - 1));
2732 /* 0%x is 0 (or x&0 if x has side-effects). */
2733 if (trueop0 == CONST0_RTX (mode))
2735 if (side_effects_p (op1))
2736 return simplify_gen_binary (AND, mode, op1, trueop0);
2739 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2740 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2742 if (side_effects_p (op0))
2743 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2744 return CONST0_RTX (mode);
2751 if (trueop1 == CONST0_RTX (mode))
2753 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2755 /* Rotating ~0 always results in ~0. */
2756 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2757 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2758 && ! side_effects_p (op1))
2761 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2763 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2764 if (val != INTVAL (op1))
2765 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2772 if (trueop1 == CONST0_RTX (mode))
2774 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2776 goto canonicalize_shift;
2779 if (trueop1 == CONST0_RTX (mode))
2781 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2783 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2784 if (GET_CODE (op0) == CLZ
2785 && CONST_INT_P (trueop1)
2786 && STORE_FLAG_VALUE == 1
2787 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2789 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2790 unsigned HOST_WIDE_INT zero_val = 0;
2792 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2793 && zero_val == GET_MODE_BITSIZE (imode)
2794 && INTVAL (trueop1) == exact_log2 (zero_val))
2795 return simplify_gen_relational (EQ, mode, imode,
2796 XEXP (op0, 0), const0_rtx);
2798 goto canonicalize_shift;
2801 if (width <= HOST_BITS_PER_WIDE_INT
2802 && CONST_INT_P (trueop1)
2803 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2804 && ! side_effects_p (op0))
2806 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2808 tem = simplify_associative_operation (code, mode, op0, op1);
2814 if (width <= HOST_BITS_PER_WIDE_INT
2815 && CONST_INT_P (trueop1)
2816 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2817 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2818 && ! side_effects_p (op0))
2820 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2822 tem = simplify_associative_operation (code, mode, op0, op1);
2828 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2830 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2832 tem = simplify_associative_operation (code, mode, op0, op1);
2838 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2840 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2842 tem = simplify_associative_operation (code, mode, op0, op1);
2855 /* ??? There are simplifications that can be done. */
2859 if (!VECTOR_MODE_P (mode))
2861 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2862 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2863 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2864 gcc_assert (XVECLEN (trueop1, 0) == 1);
2865 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2867 if (GET_CODE (trueop0) == CONST_VECTOR)
2868 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2871 /* Extract a scalar element from a nested VEC_SELECT expression
2872 (with optional nested VEC_CONCAT expression). Some targets
2873 (i386) extract scalar element from a vector using chain of
2874 nested VEC_SELECT expressions. When input operand is a memory
2875 operand, this operation can be simplified to a simple scalar
2876 load from an offseted memory address. */
2877 if (GET_CODE (trueop0) == VEC_SELECT)
2879 rtx op0 = XEXP (trueop0, 0);
2880 rtx op1 = XEXP (trueop0, 1);
2882 enum machine_mode opmode = GET_MODE (op0);
2883 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2884 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2886 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2892 gcc_assert (GET_CODE (op1) == PARALLEL);
2893 gcc_assert (i < n_elts);
2895 /* Select element, pointed by nested selector. */
2896 elem = INTVAL (XVECEXP (op1, 0, i));
2898 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2899 if (GET_CODE (op0) == VEC_CONCAT)
2901 rtx op00 = XEXP (op0, 0);
2902 rtx op01 = XEXP (op0, 1);
2904 enum machine_mode mode00, mode01;
2905 int n_elts00, n_elts01;
2907 mode00 = GET_MODE (op00);
2908 mode01 = GET_MODE (op01);
2910 /* Find out number of elements of each operand. */
2911 if (VECTOR_MODE_P (mode00))
2913 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2914 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2919 if (VECTOR_MODE_P (mode01))
2921 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2922 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2927 gcc_assert (n_elts == n_elts00 + n_elts01);
2929 /* Select correct operand of VEC_CONCAT
2930 and adjust selector. */
2931 if (elem < n_elts01)
2942 vec = rtvec_alloc (1);
2943 RTVEC_ELT (vec, 0) = GEN_INT (elem);
2945 tmp = gen_rtx_fmt_ee (code, mode,
2946 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
2952 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2953 gcc_assert (GET_MODE_INNER (mode)
2954 == GET_MODE_INNER (GET_MODE (trueop0)));
2955 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2957 if (GET_CODE (trueop0) == CONST_VECTOR)
2959 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2960 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2961 rtvec v = rtvec_alloc (n_elts);
2964 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2965 for (i = 0; i < n_elts; i++)
2967 rtx x = XVECEXP (trueop1, 0, i);
2969 gcc_assert (CONST_INT_P (x));
2970 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2974 return gen_rtx_CONST_VECTOR (mode, v);
2978 if (XVECLEN (trueop1, 0) == 1
2979 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
2980 && GET_CODE (trueop0) == VEC_CONCAT)
2983 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2985 /* Try to find the element in the VEC_CONCAT. */
2986 while (GET_MODE (vec) != mode
2987 && GET_CODE (vec) == VEC_CONCAT)
2989 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2990 if (offset < vec_size)
2991 vec = XEXP (vec, 0);
2995 vec = XEXP (vec, 1);
2997 vec = avoid_constant_pool_reference (vec);
3000 if (GET_MODE (vec) == mode)
3007 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3008 ? GET_MODE (trueop0)
3009 : GET_MODE_INNER (mode));
3010 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3011 ? GET_MODE (trueop1)
3012 : GET_MODE_INNER (mode));
3014 gcc_assert (VECTOR_MODE_P (mode));
3015 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3016 == GET_MODE_SIZE (mode));
3018 if (VECTOR_MODE_P (op0_mode))
3019 gcc_assert (GET_MODE_INNER (mode)
3020 == GET_MODE_INNER (op0_mode));
3022 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3024 if (VECTOR_MODE_P (op1_mode))
3025 gcc_assert (GET_MODE_INNER (mode)
3026 == GET_MODE_INNER (op1_mode));
3028 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3030 if ((GET_CODE (trueop0) == CONST_VECTOR
3031 || CONST_INT_P (trueop0)
3032 || GET_CODE (trueop0) == CONST_DOUBLE)
3033 && (GET_CODE (trueop1) == CONST_VECTOR
3034 || CONST_INT_P (trueop1)
3035 || GET_CODE (trueop1) == CONST_DOUBLE))
3037 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3038 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3039 rtvec v = rtvec_alloc (n_elts);
3041 unsigned in_n_elts = 1;
3043 if (VECTOR_MODE_P (op0_mode))
3044 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3045 for (i = 0; i < n_elts; i++)
3049 if (!VECTOR_MODE_P (op0_mode))
3050 RTVEC_ELT (v, i) = trueop0;
3052 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3056 if (!VECTOR_MODE_P (op1_mode))
3057 RTVEC_ELT (v, i) = trueop1;
3059 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3064 return gen_rtx_CONST_VECTOR (mode, v);
3077 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3080 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3082 unsigned int width = GET_MODE_BITSIZE (mode);
3084 if (VECTOR_MODE_P (mode)
3085 && code != VEC_CONCAT
3086 && GET_CODE (op0) == CONST_VECTOR
3087 && GET_CODE (op1) == CONST_VECTOR)
3089 unsigned n_elts = GET_MODE_NUNITS (mode);
3090 enum machine_mode op0mode = GET_MODE (op0);
3091 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3092 enum machine_mode op1mode = GET_MODE (op1);
3093 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3094 rtvec v = rtvec_alloc (n_elts);
3097 gcc_assert (op0_n_elts == n_elts);
3098 gcc_assert (op1_n_elts == n_elts);
3099 for (i = 0; i < n_elts; i++)
3101 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3102 CONST_VECTOR_ELT (op0, i),
3103 CONST_VECTOR_ELT (op1, i));
3106 RTVEC_ELT (v, i) = x;
3109 return gen_rtx_CONST_VECTOR (mode, v);
3112 if (VECTOR_MODE_P (mode)
3113 && code == VEC_CONCAT
3114 && (CONST_INT_P (op0)
3115 || GET_CODE (op0) == CONST_DOUBLE
3116 || GET_CODE (op0) == CONST_FIXED)
3117 && (CONST_INT_P (op1)
3118 || GET_CODE (op1) == CONST_DOUBLE
3119 || GET_CODE (op1) == CONST_FIXED))
3121 unsigned n_elts = GET_MODE_NUNITS (mode);
3122 rtvec v = rtvec_alloc (n_elts);
3124 gcc_assert (n_elts >= 2);
3127 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3128 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3130 RTVEC_ELT (v, 0) = op0;
3131 RTVEC_ELT (v, 1) = op1;
3135 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3136 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3139 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3140 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3141 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3143 for (i = 0; i < op0_n_elts; ++i)
3144 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3145 for (i = 0; i < op1_n_elts; ++i)
3146 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3149 return gen_rtx_CONST_VECTOR (mode, v);
3152 if (SCALAR_FLOAT_MODE_P (mode)
3153 && GET_CODE (op0) == CONST_DOUBLE
3154 && GET_CODE (op1) == CONST_DOUBLE
3155 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3166 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3168 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3170 for (i = 0; i < 4; i++)
3187 real_from_target (&r, tmp0, mode);
3188 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3192 REAL_VALUE_TYPE f0, f1, value, result;
3195 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3196 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3197 real_convert (&f0, mode, &f0);
3198 real_convert (&f1, mode, &f1);
3200 if (HONOR_SNANS (mode)
3201 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3205 && REAL_VALUES_EQUAL (f1, dconst0)
3206 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3209 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3210 && flag_trapping_math
3211 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3213 int s0 = REAL_VALUE_NEGATIVE (f0);
3214 int s1 = REAL_VALUE_NEGATIVE (f1);
3219 /* Inf + -Inf = NaN plus exception. */
3224 /* Inf - Inf = NaN plus exception. */
3229 /* Inf / Inf = NaN plus exception. */
3236 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3237 && flag_trapping_math
3238 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3239 || (REAL_VALUE_ISINF (f1)
3240 && REAL_VALUES_EQUAL (f0, dconst0))))
3241 /* Inf * 0 = NaN plus exception. */
3244 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3246 real_convert (&result, mode, &value);
3248 /* Don't constant fold this floating point operation if
3249 the result has overflowed and flag_trapping_math. */
3251 if (flag_trapping_math
3252 && MODE_HAS_INFINITIES (mode)
3253 && REAL_VALUE_ISINF (result)
3254 && !REAL_VALUE_ISINF (f0)
3255 && !REAL_VALUE_ISINF (f1))
3256 /* Overflow plus exception. */
3259 /* Don't constant fold this floating point operation if the
3260 result may dependent upon the run-time rounding mode and
3261 flag_rounding_math is set, or if GCC's software emulation
3262 is unable to accurately represent the result. */
3264 if ((flag_rounding_math
3265 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3266 && (inexact || !real_identical (&result, &value)))
3269 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3273 /* We can fold some multi-word operations. */
3274 if (GET_MODE_CLASS (mode) == MODE_INT
3275 && width == HOST_BITS_PER_WIDE_INT * 2
3276 && (GET_CODE (op0) == CONST_DOUBLE || CONST_INT_P (op0))
3277 && (GET_CODE (op1) == CONST_DOUBLE || CONST_INT_P (op1)))
3279 unsigned HOST_WIDE_INT l1, l2, lv, lt;
3280 HOST_WIDE_INT h1, h2, hv, ht;
3282 if (GET_CODE (op0) == CONST_DOUBLE)
3283 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
3285 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
3287 if (GET_CODE (op1) == CONST_DOUBLE)
3288 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
3290 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
3295 /* A - B == A + (-B). */
3296 neg_double (l2, h2, &lv, &hv);
3299 /* Fall through.... */
3302 add_double (l1, h1, l2, h2, &lv, &hv);
3306 mul_double (l1, h1, l2, h2, &lv, &hv);
3310 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3311 &lv, &hv, <, &ht))
3316 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
3317 <, &ht, &lv, &hv))
3322 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3323 &lv, &hv, <, &ht))
3328 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
3329 <, &ht, &lv, &hv))
3334 lv = l1 & l2, hv = h1 & h2;
3338 lv = l1 | l2, hv = h1 | h2;
3342 lv = l1 ^ l2, hv = h1 ^ h2;
3348 && ((unsigned HOST_WIDE_INT) l1
3349 < (unsigned HOST_WIDE_INT) l2)))
3358 && ((unsigned HOST_WIDE_INT) l1
3359 > (unsigned HOST_WIDE_INT) l2)))
3366 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
3368 && ((unsigned HOST_WIDE_INT) l1
3369 < (unsigned HOST_WIDE_INT) l2)))
3376 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
3378 && ((unsigned HOST_WIDE_INT) l1
3379 > (unsigned HOST_WIDE_INT) l2)))
3385 case LSHIFTRT: case ASHIFTRT:
3387 case ROTATE: case ROTATERT:
3388 if (SHIFT_COUNT_TRUNCATED)
3389 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
3391 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
3394 if (code == LSHIFTRT || code == ASHIFTRT)
3395 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
3397 else if (code == ASHIFT)
3398 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
3399 else if (code == ROTATE)
3400 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3401 else /* code == ROTATERT */
3402 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3409 return immed_double_const (lv, hv, mode);
3412 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3413 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3415 /* Get the integer argument values in two forms:
3416 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3418 arg0 = INTVAL (op0);
3419 arg1 = INTVAL (op1);
3421 if (width < HOST_BITS_PER_WIDE_INT)
3423 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3424 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3427 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3428 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3431 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3432 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3440 /* Compute the value of the arithmetic. */
3445 val = arg0s + arg1s;
3449 val = arg0s - arg1s;
3453 val = arg0s * arg1s;
3458 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3461 val = arg0s / arg1s;
3466 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3469 val = arg0s % arg1s;
3474 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3477 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3482 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3485 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3503 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3504 the value is in range. We can't return any old value for
3505 out-of-range arguments because either the middle-end (via
3506 shift_truncation_mask) or the back-end might be relying on
3507 target-specific knowledge. Nor can we rely on
3508 shift_truncation_mask, since the shift might not be part of an
3509 ashlM3, lshrM3 or ashrM3 instruction. */
3510 if (SHIFT_COUNT_TRUNCATED)
3511 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3512 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3515 val = (code == ASHIFT
3516 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3517 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3519 /* Sign-extend the result for arithmetic right shifts. */
3520 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3521 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3529 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3530 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3538 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3539 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3543 /* Do nothing here. */
3547 val = arg0s <= arg1s ? arg0s : arg1s;
3551 val = ((unsigned HOST_WIDE_INT) arg0
3552 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3556 val = arg0s > arg1s ? arg0s : arg1s;
3560 val = ((unsigned HOST_WIDE_INT) arg0
3561 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3574 /* ??? There are simplifications that can be done. */
3581 return gen_int_mode (val, mode);
3589 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3592 Rather than test for specific case, we do this by a brute-force method
3593 and do all possible simplifications until no more changes occur. Then
3594 we rebuild the operation. */
3596 struct simplify_plus_minus_op_data
3603 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3607 result = (commutative_operand_precedence (y)
3608 - commutative_operand_precedence (x));
3612 /* Group together equal REGs to do more simplification. */
3613 if (REG_P (x) && REG_P (y))
3614 return REGNO (x) > REGNO (y);
3620 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3623 struct simplify_plus_minus_op_data ops[8];
3625 int n_ops = 2, input_ops = 2;
3626 int changed, n_constants = 0, canonicalized = 0;
3629 memset (ops, 0, sizeof ops);
3631 /* Set up the two operands and then expand them until nothing has been
3632 changed. If we run out of room in our array, give up; this should
3633 almost never happen. */
3638 ops[1].neg = (code == MINUS);
3644 for (i = 0; i < n_ops; i++)
3646 rtx this_op = ops[i].op;
3647 int this_neg = ops[i].neg;
3648 enum rtx_code this_code = GET_CODE (this_op);
3657 ops[n_ops].op = XEXP (this_op, 1);
3658 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3661 ops[i].op = XEXP (this_op, 0);
3664 canonicalized |= this_neg;
3668 ops[i].op = XEXP (this_op, 0);
3669 ops[i].neg = ! this_neg;
3676 && GET_CODE (XEXP (this_op, 0)) == PLUS
3677 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3678 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3680 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3681 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3682 ops[n_ops].neg = this_neg;
3690 /* ~a -> (-a - 1) */
3693 ops[n_ops].op = constm1_rtx;
3694 ops[n_ops++].neg = this_neg;
3695 ops[i].op = XEXP (this_op, 0);
3696 ops[i].neg = !this_neg;
3706 ops[i].op = neg_const_int (mode, this_op);
3720 if (n_constants > 1)
3723 gcc_assert (n_ops >= 2);
3725 /* If we only have two operands, we can avoid the loops. */
3728 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3731 /* Get the two operands. Be careful with the order, especially for
3732 the cases where code == MINUS. */
3733 if (ops[0].neg && ops[1].neg)
3735 lhs = gen_rtx_NEG (mode, ops[0].op);
3738 else if (ops[0].neg)
3749 return simplify_const_binary_operation (code, mode, lhs, rhs);
3752 /* Now simplify each pair of operands until nothing changes. */
3755 /* Insertion sort is good enough for an eight-element array. */
3756 for (i = 1; i < n_ops; i++)
3758 struct simplify_plus_minus_op_data save;
3760 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3766 ops[j + 1] = ops[j];
3767 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3772 for (i = n_ops - 1; i > 0; i--)
3773 for (j = i - 1; j >= 0; j--)
3775 rtx lhs = ops[j].op, rhs = ops[i].op;
3776 int lneg = ops[j].neg, rneg = ops[i].neg;
3778 if (lhs != 0 && rhs != 0)
3780 enum rtx_code ncode = PLUS;
3786 tem = lhs, lhs = rhs, rhs = tem;
3788 else if (swap_commutative_operands_p (lhs, rhs))
3789 tem = lhs, lhs = rhs, rhs = tem;
3791 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3792 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3794 rtx tem_lhs, tem_rhs;
3796 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3797 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3798 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3800 if (tem && !CONSTANT_P (tem))
3801 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3804 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3806 /* Reject "simplifications" that just wrap the two
3807 arguments in a CONST. Failure to do so can result
3808 in infinite recursion with simplify_binary_operation
3809 when it calls us to simplify CONST operations. */
3811 && ! (GET_CODE (tem) == CONST
3812 && GET_CODE (XEXP (tem, 0)) == ncode
3813 && XEXP (XEXP (tem, 0), 0) == lhs
3814 && XEXP (XEXP (tem, 0), 1) == rhs))
3817 if (GET_CODE (tem) == NEG)
3818 tem = XEXP (tem, 0), lneg = !lneg;
3819 if (CONST_INT_P (tem) && lneg)
3820 tem = neg_const_int (mode, tem), lneg = 0;
3824 ops[j].op = NULL_RTX;
3831 /* If nothing changed, fail. */
3835 /* Pack all the operands to the lower-numbered entries. */
3836 for (i = 0, j = 0; j < n_ops; j++)
3846 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3848 && CONST_INT_P (ops[1].op)
3849 && CONSTANT_P (ops[0].op)
3851 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3853 /* We suppressed creation of trivial CONST expressions in the
3854 combination loop to avoid recursion. Create one manually now.
3855 The combination loop should have ensured that there is exactly
3856 one CONST_INT, and the sort will have ensured that it is last
3857 in the array and that any other constant will be next-to-last. */
3860 && CONST_INT_P (ops[n_ops - 1].op)
3861 && CONSTANT_P (ops[n_ops - 2].op))
3863 rtx value = ops[n_ops - 1].op;
3864 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3865 value = neg_const_int (mode, value);
3866 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3870 /* Put a non-negated operand first, if possible. */
3872 for (i = 0; i < n_ops && ops[i].neg; i++)
3875 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3884 /* Now make the result by performing the requested operations. */
3886 for (i = 1; i < n_ops; i++)
3887 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3888 mode, result, ops[i].op);
3893 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3895 plus_minus_operand_p (const_rtx x)
3897 return GET_CODE (x) == PLUS
3898 || GET_CODE (x) == MINUS
3899 || (GET_CODE (x) == CONST
3900 && GET_CODE (XEXP (x, 0)) == PLUS
3901 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3902 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3905 /* Like simplify_binary_operation except used for relational operators.
3906 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3907 not also be VOIDmode.
3909 CMP_MODE specifies in which mode the comparison is done in, so it is
3910 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3911 the operands or, if both are VOIDmode, the operands are compared in
3912 "infinite precision". */
3914 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3915 enum machine_mode cmp_mode, rtx op0, rtx op1)
3917 rtx tem, trueop0, trueop1;
3919 if (cmp_mode == VOIDmode)
3920 cmp_mode = GET_MODE (op0);
3921 if (cmp_mode == VOIDmode)
3922 cmp_mode = GET_MODE (op1);
3924 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3927 if (SCALAR_FLOAT_MODE_P (mode))
3929 if (tem == const0_rtx)
3930 return CONST0_RTX (mode);
3931 #ifdef FLOAT_STORE_FLAG_VALUE
3933 REAL_VALUE_TYPE val;
3934 val = FLOAT_STORE_FLAG_VALUE (mode);
3935 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3941 if (VECTOR_MODE_P (mode))
3943 if (tem == const0_rtx)
3944 return CONST0_RTX (mode);
3945 #ifdef VECTOR_STORE_FLAG_VALUE
3950 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3951 if (val == NULL_RTX)
3953 if (val == const1_rtx)
3954 return CONST1_RTX (mode);
3956 units = GET_MODE_NUNITS (mode);
3957 v = rtvec_alloc (units);
3958 for (i = 0; i < units; i++)
3959 RTVEC_ELT (v, i) = val;
3960 return gen_rtx_raw_CONST_VECTOR (mode, v);
3970 /* For the following tests, ensure const0_rtx is op1. */
3971 if (swap_commutative_operands_p (op0, op1)
3972 || (op0 == const0_rtx && op1 != const0_rtx))
3973 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3975 /* If op0 is a compare, extract the comparison arguments from it. */
3976 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3977 return simplify_gen_relational (code, mode, VOIDmode,
3978 XEXP (op0, 0), XEXP (op0, 1));
3980 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3984 trueop0 = avoid_constant_pool_reference (op0);
3985 trueop1 = avoid_constant_pool_reference (op1);
3986 return simplify_relational_operation_1 (code, mode, cmp_mode,
3990 /* This part of simplify_relational_operation is only used when CMP_MODE
3991 is not in class MODE_CC (i.e. it is a real comparison).
3993 MODE is the mode of the result, while CMP_MODE specifies in which
3994 mode the comparison is done in, so it is the mode of the operands. */
3997 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3998 enum machine_mode cmp_mode, rtx op0, rtx op1)
4000 enum rtx_code op0code = GET_CODE (op0);
4002 if (op1 == const0_rtx && COMPARISON_P (op0))
4004 /* If op0 is a comparison, extract the comparison arguments
4008 if (GET_MODE (op0) == mode)
4009 return simplify_rtx (op0);
4011 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4012 XEXP (op0, 0), XEXP (op0, 1));
4014 else if (code == EQ)
4016 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4017 if (new_code != UNKNOWN)
4018 return simplify_gen_relational (new_code, mode, VOIDmode,
4019 XEXP (op0, 0), XEXP (op0, 1));
4023 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4024 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4025 if ((code == LTU || code == GEU)
4026 && GET_CODE (op0) == PLUS
4027 && CONST_INT_P (XEXP (op0, 1))
4028 && (rtx_equal_p (op1, XEXP (op0, 0))
4029 || rtx_equal_p (op1, XEXP (op0, 1))))
4032 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4033 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4034 cmp_mode, XEXP (op0, 0), new_cmp);
4037 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4038 if ((code == LTU || code == GEU)
4039 && GET_CODE (op0) == PLUS
4040 && rtx_equal_p (op1, XEXP (op0, 1))
4041 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4042 && !rtx_equal_p (op1, XEXP (op0, 0)))
4043 return simplify_gen_relational (code, mode, cmp_mode, op0, XEXP (op0, 0));
4045 if (op1 == const0_rtx)
4047 /* Canonicalize (GTU x 0) as (NE x 0). */
4049 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4050 /* Canonicalize (LEU x 0) as (EQ x 0). */
4052 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4054 else if (op1 == const1_rtx)
4059 /* Canonicalize (GE x 1) as (GT x 0). */
4060 return simplify_gen_relational (GT, mode, cmp_mode,
4063 /* Canonicalize (GEU x 1) as (NE x 0). */
4064 return simplify_gen_relational (NE, mode, cmp_mode,
4067 /* Canonicalize (LT x 1) as (LE x 0). */
4068 return simplify_gen_relational (LE, mode, cmp_mode,
4071 /* Canonicalize (LTU x 1) as (EQ x 0). */
4072 return simplify_gen_relational (EQ, mode, cmp_mode,
4078 else if (op1 == constm1_rtx)
4080 /* Canonicalize (LE x -1) as (LT x 0). */
4082 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4083 /* Canonicalize (GT x -1) as (GE x 0). */
4085 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4088 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4089 if ((code == EQ || code == NE)
4090 && (op0code == PLUS || op0code == MINUS)
4092 && CONSTANT_P (XEXP (op0, 1))
4093 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4095 rtx x = XEXP (op0, 0);
4096 rtx c = XEXP (op0, 1);
4098 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4100 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4103 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4104 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4106 && op1 == const0_rtx
4107 && GET_MODE_CLASS (mode) == MODE_INT
4108 && cmp_mode != VOIDmode
4109 /* ??? Work-around BImode bugs in the ia64 backend. */
4111 && cmp_mode != BImode
4112 && nonzero_bits (op0, cmp_mode) == 1
4113 && STORE_FLAG_VALUE == 1)
4114 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4115 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4116 : lowpart_subreg (mode, op0, cmp_mode);
4118 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4119 if ((code == EQ || code == NE)
4120 && op1 == const0_rtx
4122 return simplify_gen_relational (code, mode, cmp_mode,
4123 XEXP (op0, 0), XEXP (op0, 1));
4125 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4126 if ((code == EQ || code == NE)
4128 && rtx_equal_p (XEXP (op0, 0), op1)
4129 && !side_effects_p (XEXP (op0, 0)))
4130 return simplify_gen_relational (code, mode, cmp_mode,
4131 XEXP (op0, 1), const0_rtx);
4133 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4134 if ((code == EQ || code == NE)
4136 && rtx_equal_p (XEXP (op0, 1), op1)
4137 && !side_effects_p (XEXP (op0, 1)))
4138 return simplify_gen_relational (code, mode, cmp_mode,
4139 XEXP (op0, 0), const0_rtx);
4141 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4142 if ((code == EQ || code == NE)
4144 && (CONST_INT_P (op1)
4145 || GET_CODE (op1) == CONST_DOUBLE)
4146 && (CONST_INT_P (XEXP (op0, 1))
4147 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4148 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4149 simplify_gen_binary (XOR, cmp_mode,
4150 XEXP (op0, 1), op1));
4152 if (op0code == POPCOUNT && op1 == const0_rtx)
4158 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4159 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4160 XEXP (op0, 0), const0_rtx);
4165 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4166 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4167 XEXP (op0, 0), const0_rtx);
4186 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4187 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4188 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4189 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4190 For floating-point comparisons, assume that the operands were ordered. */
4193 comparison_result (enum rtx_code code, int known_results)
4199 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4202 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4206 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4209 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4213 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4216 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4219 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4221 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4224 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4226 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4229 return const_true_rtx;
4237 /* Check if the given comparison (done in the given MODE) is actually a
4238 tautology or a contradiction.
4239 If no simplification is possible, this function returns zero.
4240 Otherwise, it returns either const_true_rtx or const0_rtx. */
4243 simplify_const_relational_operation (enum rtx_code code,
4244 enum machine_mode mode,
4251 gcc_assert (mode != VOIDmode
4252 || (GET_MODE (op0) == VOIDmode
4253 && GET_MODE (op1) == VOIDmode));
4255 /* If op0 is a compare, extract the comparison arguments from it. */
4256 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4258 op1 = XEXP (op0, 1);
4259 op0 = XEXP (op0, 0);
4261 if (GET_MODE (op0) != VOIDmode)
4262 mode = GET_MODE (op0);
4263 else if (GET_MODE (op1) != VOIDmode)
4264 mode = GET_MODE (op1);
4269 /* We can't simplify MODE_CC values since we don't know what the
4270 actual comparison is. */
4271 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4274 /* Make sure the constant is second. */
4275 if (swap_commutative_operands_p (op0, op1))
4277 tem = op0, op0 = op1, op1 = tem;
4278 code = swap_condition (code);
4281 trueop0 = avoid_constant_pool_reference (op0);
4282 trueop1 = avoid_constant_pool_reference (op1);
4284 /* For integer comparisons of A and B maybe we can simplify A - B and can
4285 then simplify a comparison of that with zero. If A and B are both either
4286 a register or a CONST_INT, this can't help; testing for these cases will
4287 prevent infinite recursion here and speed things up.
4289 We can only do this for EQ and NE comparisons as otherwise we may
4290 lose or introduce overflow which we cannot disregard as undefined as
4291 we do not know the signedness of the operation on either the left or
4292 the right hand side of the comparison. */
4294 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4295 && (code == EQ || code == NE)
4296 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4297 && (REG_P (op1) || CONST_INT_P (trueop1)))
4298 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4299 /* We cannot do this if tem is a nonzero address. */
4300 && ! nonzero_address_p (tem))
4301 return simplify_const_relational_operation (signed_condition (code),
4302 mode, tem, const0_rtx);
4304 if (! HONOR_NANS (mode) && code == ORDERED)
4305 return const_true_rtx;
4307 if (! HONOR_NANS (mode) && code == UNORDERED)
4310 /* For modes without NaNs, if the two operands are equal, we know the
4311 result except if they have side-effects. Even with NaNs we know
4312 the result of unordered comparisons and, if signaling NaNs are
4313 irrelevant, also the result of LT/GT/LTGT. */
4314 if ((! HONOR_NANS (GET_MODE (trueop0))
4315 || code == UNEQ || code == UNLE || code == UNGE
4316 || ((code == LT || code == GT || code == LTGT)
4317 && ! HONOR_SNANS (GET_MODE (trueop0))))
4318 && rtx_equal_p (trueop0, trueop1)
4319 && ! side_effects_p (trueop0))
4320 return comparison_result (code, CMP_EQ);
4322 /* If the operands are floating-point constants, see if we can fold
4324 if (GET_CODE (trueop0) == CONST_DOUBLE
4325 && GET_CODE (trueop1) == CONST_DOUBLE
4326 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4328 REAL_VALUE_TYPE d0, d1;
4330 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4331 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4333 /* Comparisons are unordered iff at least one of the values is NaN. */
4334 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4344 return const_true_rtx;
4357 return comparison_result (code,
4358 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4359 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4362 /* Otherwise, see if the operands are both integers. */
4363 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4364 && (GET_CODE (trueop0) == CONST_DOUBLE
4365 || CONST_INT_P (trueop0))
4366 && (GET_CODE (trueop1) == CONST_DOUBLE
4367 || CONST_INT_P (trueop1)))
4369 int width = GET_MODE_BITSIZE (mode);
4370 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4371 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4373 /* Get the two words comprising each integer constant. */
4374 if (GET_CODE (trueop0) == CONST_DOUBLE)
4376 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4377 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4381 l0u = l0s = INTVAL (trueop0);
4382 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4385 if (GET_CODE (trueop1) == CONST_DOUBLE)
4387 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4388 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4392 l1u = l1s = INTVAL (trueop1);
4393 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4396 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4397 we have to sign or zero-extend the values. */
4398 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4400 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4401 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4403 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4404 l0s |= ((HOST_WIDE_INT) (-1) << width);
4406 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4407 l1s |= ((HOST_WIDE_INT) (-1) << width);
4409 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4410 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4412 if (h0u == h1u && l0u == l1u)
4413 return comparison_result (code, CMP_EQ);
4417 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4418 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4419 return comparison_result (code, cr);
4423 /* Optimize comparisons with upper and lower bounds. */
4424 if (SCALAR_INT_MODE_P (mode)
4425 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4426 && CONST_INT_P (trueop1))
4429 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4430 HOST_WIDE_INT val = INTVAL (trueop1);
4431 HOST_WIDE_INT mmin, mmax;
4441 /* Get a reduced range if the sign bit is zero. */
4442 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4449 rtx mmin_rtx, mmax_rtx;
4450 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4452 mmin = INTVAL (mmin_rtx);
4453 mmax = INTVAL (mmax_rtx);
4456 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4458 mmin >>= (sign_copies - 1);
4459 mmax >>= (sign_copies - 1);
4465 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4467 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4468 return const_true_rtx;
4469 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4474 return const_true_rtx;
4479 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4481 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4482 return const_true_rtx;
4483 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4488 return const_true_rtx;
4494 /* x == y is always false for y out of range. */
4495 if (val < mmin || val > mmax)
4499 /* x > y is always false for y >= mmax, always true for y < mmin. */
4501 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4503 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4504 return const_true_rtx;
4510 return const_true_rtx;
4513 /* x < y is always false for y <= mmin, always true for y > mmax. */
4515 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4517 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4518 return const_true_rtx;
4524 return const_true_rtx;
4528 /* x != y is always true for y out of range. */
4529 if (val < mmin || val > mmax)
4530 return const_true_rtx;
4538 /* Optimize integer comparisons with zero. */
4539 if (trueop1 == const0_rtx)
4541 /* Some addresses are known to be nonzero. We don't know
4542 their sign, but equality comparisons are known. */
4543 if (nonzero_address_p (trueop0))
4545 if (code == EQ || code == LEU)
4547 if (code == NE || code == GTU)
4548 return const_true_rtx;
4551 /* See if the first operand is an IOR with a constant. If so, we
4552 may be able to determine the result of this comparison. */
4553 if (GET_CODE (op0) == IOR)
4555 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4556 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4558 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4559 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4560 && (INTVAL (inner_const)
4561 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4570 return const_true_rtx;
4574 return const_true_rtx;
4588 /* Optimize comparison of ABS with zero. */
4589 if (trueop1 == CONST0_RTX (mode)
4590 && (GET_CODE (trueop0) == ABS
4591 || (GET_CODE (trueop0) == FLOAT_EXTEND
4592 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4597 /* Optimize abs(x) < 0.0. */
4598 if (!HONOR_SNANS (mode)
4599 && (!INTEGRAL_MODE_P (mode)
4600 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4602 if (INTEGRAL_MODE_P (mode)
4603 && (issue_strict_overflow_warning
4604 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4605 warning (OPT_Wstrict_overflow,
4606 ("assuming signed overflow does not occur when "
4607 "assuming abs (x) < 0 is false"));
4613 /* Optimize abs(x) >= 0.0. */
4614 if (!HONOR_NANS (mode)
4615 && (!INTEGRAL_MODE_P (mode)
4616 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4618 if (INTEGRAL_MODE_P (mode)
4619 && (issue_strict_overflow_warning
4620 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4621 warning (OPT_Wstrict_overflow,
4622 ("assuming signed overflow does not occur when "
4623 "assuming abs (x) >= 0 is true"));
4624 return const_true_rtx;
4629 /* Optimize ! (abs(x) < 0.0). */
4630 return const_true_rtx;
4640 /* Simplify CODE, an operation with result mode MODE and three operands,
4641 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4642 a constant. Return 0 if no simplifications is possible. */
4645 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4646 enum machine_mode op0_mode, rtx op0, rtx op1,
4649 unsigned int width = GET_MODE_BITSIZE (mode);
4651 /* VOIDmode means "infinite" precision. */
4653 width = HOST_BITS_PER_WIDE_INT;
4659 if (CONST_INT_P (op0)
4660 && CONST_INT_P (op1)
4661 && CONST_INT_P (op2)
4662 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4663 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4665 /* Extracting a bit-field from a constant */
4666 HOST_WIDE_INT val = INTVAL (op0);
4668 if (BITS_BIG_ENDIAN)
4669 val >>= (GET_MODE_BITSIZE (op0_mode)
4670 - INTVAL (op2) - INTVAL (op1));
4672 val >>= INTVAL (op2);
4674 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4676 /* First zero-extend. */
4677 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4678 /* If desired, propagate sign bit. */
4679 if (code == SIGN_EXTRACT
4680 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4681 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4684 /* Clear the bits that don't belong in our mode,
4685 unless they and our sign bit are all one.
4686 So we get either a reasonable negative value or a reasonable
4687 unsigned value for this mode. */
4688 if (width < HOST_BITS_PER_WIDE_INT
4689 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4690 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4691 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4693 return gen_int_mode (val, mode);
4698 if (CONST_INT_P (op0))
4699 return op0 != const0_rtx ? op1 : op2;
4701 /* Convert c ? a : a into "a". */
4702 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4705 /* Convert a != b ? a : b into "a". */
4706 if (GET_CODE (op0) == NE
4707 && ! side_effects_p (op0)
4708 && ! HONOR_NANS (mode)
4709 && ! HONOR_SIGNED_ZEROS (mode)
4710 && ((rtx_equal_p (XEXP (op0, 0), op1)
4711 && rtx_equal_p (XEXP (op0, 1), op2))
4712 || (rtx_equal_p (XEXP (op0, 0), op2)
4713 && rtx_equal_p (XEXP (op0, 1), op1))))
4716 /* Convert a == b ? a : b into "b". */
4717 if (GET_CODE (op0) == EQ
4718 && ! side_effects_p (op0)
4719 && ! HONOR_NANS (mode)
4720 && ! HONOR_SIGNED_ZEROS (mode)
4721 && ((rtx_equal_p (XEXP (op0, 0), op1)
4722 && rtx_equal_p (XEXP (op0, 1), op2))
4723 || (rtx_equal_p (XEXP (op0, 0), op2)
4724 && rtx_equal_p (XEXP (op0, 1), op1))))
4727 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4729 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4730 ? GET_MODE (XEXP (op0, 1))
4731 : GET_MODE (XEXP (op0, 0)));
4734 /* Look for happy constants in op1 and op2. */
4735 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4737 HOST_WIDE_INT t = INTVAL (op1);
4738 HOST_WIDE_INT f = INTVAL (op2);
4740 if (t == STORE_FLAG_VALUE && f == 0)
4741 code = GET_CODE (op0);
4742 else if (t == 0 && f == STORE_FLAG_VALUE)
4745 tmp = reversed_comparison_code (op0, NULL_RTX);
4753 return simplify_gen_relational (code, mode, cmp_mode,
4754 XEXP (op0, 0), XEXP (op0, 1));
4757 if (cmp_mode == VOIDmode)
4758 cmp_mode = op0_mode;
4759 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4760 cmp_mode, XEXP (op0, 0),
4763 /* See if any simplifications were possible. */
4766 if (CONST_INT_P (temp))
4767 return temp == const0_rtx ? op2 : op1;
4769 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4775 gcc_assert (GET_MODE (op0) == mode);
4776 gcc_assert (GET_MODE (op1) == mode);
4777 gcc_assert (VECTOR_MODE_P (mode));
4778 op2 = avoid_constant_pool_reference (op2);
4779 if (CONST_INT_P (op2))
4781 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4782 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4783 int mask = (1 << n_elts) - 1;
4785 if (!(INTVAL (op2) & mask))
4787 if ((INTVAL (op2) & mask) == mask)
4790 op0 = avoid_constant_pool_reference (op0);
4791 op1 = avoid_constant_pool_reference (op1);
4792 if (GET_CODE (op0) == CONST_VECTOR
4793 && GET_CODE (op1) == CONST_VECTOR)
4795 rtvec v = rtvec_alloc (n_elts);
4798 for (i = 0; i < n_elts; i++)
4799 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4800 ? CONST_VECTOR_ELT (op0, i)
4801 : CONST_VECTOR_ELT (op1, i));
4802 return gen_rtx_CONST_VECTOR (mode, v);
4814 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4816 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4818 Works by unpacking OP into a collection of 8-bit values
4819 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4820 and then repacking them again for OUTERMODE. */
4823 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4824 enum machine_mode innermode, unsigned int byte)
4826 /* We support up to 512-bit values (for V8DFmode). */
4830 value_mask = (1 << value_bit) - 1
4832 unsigned char value[max_bitsize / value_bit];
4841 rtvec result_v = NULL;
4842 enum mode_class outer_class;
4843 enum machine_mode outer_submode;
4845 /* Some ports misuse CCmode. */
4846 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4849 /* We have no way to represent a complex constant at the rtl level. */
4850 if (COMPLEX_MODE_P (outermode))
4853 /* Unpack the value. */
4855 if (GET_CODE (op) == CONST_VECTOR)
4857 num_elem = CONST_VECTOR_NUNITS (op);
4858 elems = &CONST_VECTOR_ELT (op, 0);
4859 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4865 elem_bitsize = max_bitsize;
4867 /* If this asserts, it is too complicated; reducing value_bit may help. */
4868 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4869 /* I don't know how to handle endianness of sub-units. */
4870 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4872 for (elem = 0; elem < num_elem; elem++)
4875 rtx el = elems[elem];
4877 /* Vectors are kept in target memory order. (This is probably
4880 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4881 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4883 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4884 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4885 unsigned bytele = (subword_byte % UNITS_PER_WORD
4886 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4887 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4890 switch (GET_CODE (el))
4894 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4896 *vp++ = INTVAL (el) >> i;
4897 /* CONST_INTs are always logically sign-extended. */
4898 for (; i < elem_bitsize; i += value_bit)
4899 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4903 if (GET_MODE (el) == VOIDmode)
4905 /* If this triggers, someone should have generated a
4906 CONST_INT instead. */
4907 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4909 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4910 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4911 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4914 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4917 /* It shouldn't matter what's done here, so fill it with
4919 for (; i < elem_bitsize; i += value_bit)
4924 long tmp[max_bitsize / 32];
4925 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4927 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4928 gcc_assert (bitsize <= elem_bitsize);
4929 gcc_assert (bitsize % value_bit == 0);
4931 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4934 /* real_to_target produces its result in words affected by
4935 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4936 and use WORDS_BIG_ENDIAN instead; see the documentation
4937 of SUBREG in rtl.texi. */
4938 for (i = 0; i < bitsize; i += value_bit)
4941 if (WORDS_BIG_ENDIAN)
4942 ibase = bitsize - 1 - i;
4945 *vp++ = tmp[ibase / 32] >> i % 32;
4948 /* It shouldn't matter what's done here, so fill it with
4950 for (; i < elem_bitsize; i += value_bit)
4956 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4958 for (i = 0; i < elem_bitsize; i += value_bit)
4959 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4963 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4964 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
4965 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4967 *vp++ = CONST_FIXED_VALUE_HIGH (el)
4968 >> (i - HOST_BITS_PER_WIDE_INT);
4969 for (; i < elem_bitsize; i += value_bit)
4979 /* Now, pick the right byte to start with. */
4980 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4981 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4982 will already have offset 0. */
4983 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4985 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4987 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4988 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4989 byte = (subword_byte % UNITS_PER_WORD
4990 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4993 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4994 so if it's become negative it will instead be very large.) */
4995 gcc_assert (byte < GET_MODE_SIZE (innermode));
4997 /* Convert from bytes to chunks of size value_bit. */
4998 value_start = byte * (BITS_PER_UNIT / value_bit);
5000 /* Re-pack the value. */
5002 if (VECTOR_MODE_P (outermode))
5004 num_elem = GET_MODE_NUNITS (outermode);
5005 result_v = rtvec_alloc (num_elem);
5006 elems = &RTVEC_ELT (result_v, 0);
5007 outer_submode = GET_MODE_INNER (outermode);
5013 outer_submode = outermode;
5016 outer_class = GET_MODE_CLASS (outer_submode);
5017 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5019 gcc_assert (elem_bitsize % value_bit == 0);
5020 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5022 for (elem = 0; elem < num_elem; elem++)
5026 /* Vectors are stored in target memory order. (This is probably
5029 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5030 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5032 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5033 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5034 unsigned bytele = (subword_byte % UNITS_PER_WORD
5035 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5036 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5039 switch (outer_class)
5042 case MODE_PARTIAL_INT:
5044 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5047 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5049 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5050 for (; i < elem_bitsize; i += value_bit)
5051 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5052 << (i - HOST_BITS_PER_WIDE_INT));
5054 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5056 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5057 elems[elem] = gen_int_mode (lo, outer_submode);
5058 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5059 elems[elem] = immed_double_const (lo, hi, outer_submode);
5066 case MODE_DECIMAL_FLOAT:
5069 long tmp[max_bitsize / 32];
5071 /* real_from_target wants its input in words affected by
5072 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5073 and use WORDS_BIG_ENDIAN instead; see the documentation
5074 of SUBREG in rtl.texi. */
5075 for (i = 0; i < max_bitsize / 32; i++)
5077 for (i = 0; i < elem_bitsize; i += value_bit)
5080 if (WORDS_BIG_ENDIAN)
5081 ibase = elem_bitsize - 1 - i;
5084 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5087 real_from_target (&r, tmp, outer_submode);
5088 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5100 f.mode = outer_submode;
5103 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5105 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5106 for (; i < elem_bitsize; i += value_bit)
5107 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5108 << (i - HOST_BITS_PER_WIDE_INT));
5110 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5118 if (VECTOR_MODE_P (outermode))
5119 return gen_rtx_CONST_VECTOR (outermode, result_v);
5124 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5125 Return 0 if no simplifications are possible. */
5127 simplify_subreg (enum machine_mode outermode, rtx op,
5128 enum machine_mode innermode, unsigned int byte)
5130 /* Little bit of sanity checking. */
5131 gcc_assert (innermode != VOIDmode);
5132 gcc_assert (outermode != VOIDmode);
5133 gcc_assert (innermode != BLKmode);
5134 gcc_assert (outermode != BLKmode);
5136 gcc_assert (GET_MODE (op) == innermode
5137 || GET_MODE (op) == VOIDmode);
5139 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5140 gcc_assert (byte < GET_MODE_SIZE (innermode));
5142 if (outermode == innermode && !byte)
5145 if (CONST_INT_P (op)
5146 || GET_CODE (op) == CONST_DOUBLE
5147 || GET_CODE (op) == CONST_FIXED
5148 || GET_CODE (op) == CONST_VECTOR)
5149 return simplify_immed_subreg (outermode, op, innermode, byte);
5151 /* Changing mode twice with SUBREG => just change it once,
5152 or not at all if changing back op starting mode. */
5153 if (GET_CODE (op) == SUBREG)
5155 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5156 int final_offset = byte + SUBREG_BYTE (op);
5159 if (outermode == innermostmode
5160 && byte == 0 && SUBREG_BYTE (op) == 0)
5161 return SUBREG_REG (op);
5163 /* The SUBREG_BYTE represents offset, as if the value were stored
5164 in memory. Irritating exception is paradoxical subreg, where
5165 we define SUBREG_BYTE to be 0. On big endian machines, this
5166 value should be negative. For a moment, undo this exception. */
5167 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5169 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5170 if (WORDS_BIG_ENDIAN)
5171 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5172 if (BYTES_BIG_ENDIAN)
5173 final_offset += difference % UNITS_PER_WORD;
5175 if (SUBREG_BYTE (op) == 0
5176 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5178 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5179 if (WORDS_BIG_ENDIAN)
5180 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5181 if (BYTES_BIG_ENDIAN)
5182 final_offset += difference % UNITS_PER_WORD;
5185 /* See whether resulting subreg will be paradoxical. */
5186 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5188 /* In nonparadoxical subregs we can't handle negative offsets. */
5189 if (final_offset < 0)
5191 /* Bail out in case resulting subreg would be incorrect. */
5192 if (final_offset % GET_MODE_SIZE (outermode)
5193 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5199 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5201 /* In paradoxical subreg, see if we are still looking on lower part.
5202 If so, our SUBREG_BYTE will be 0. */
5203 if (WORDS_BIG_ENDIAN)
5204 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5205 if (BYTES_BIG_ENDIAN)
5206 offset += difference % UNITS_PER_WORD;
5207 if (offset == final_offset)
5213 /* Recurse for further possible simplifications. */
5214 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5218 if (validate_subreg (outermode, innermostmode,
5219 SUBREG_REG (op), final_offset))
5221 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5222 if (SUBREG_PROMOTED_VAR_P (op)
5223 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5224 && GET_MODE_CLASS (outermode) == MODE_INT
5225 && IN_RANGE (GET_MODE_SIZE (outermode),
5226 GET_MODE_SIZE (innermode),
5227 GET_MODE_SIZE (innermostmode))
5228 && subreg_lowpart_p (newx))
5230 SUBREG_PROMOTED_VAR_P (newx) = 1;
5231 SUBREG_PROMOTED_UNSIGNED_SET
5232 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5239 /* Merge implicit and explicit truncations. */
5241 if (GET_CODE (op) == TRUNCATE
5242 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5243 && subreg_lowpart_offset (outermode, innermode) == byte)
5244 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5245 GET_MODE (XEXP (op, 0)));
5247 /* SUBREG of a hard register => just change the register number
5248 and/or mode. If the hard register is not valid in that mode,
5249 suppress this simplification. If the hard register is the stack,
5250 frame, or argument pointer, leave this as a SUBREG. */
5252 if (REG_P (op) && HARD_REGISTER_P (op))
5254 unsigned int regno, final_regno;
5257 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5258 if (HARD_REGISTER_NUM_P (final_regno))
5261 int final_offset = byte;
5263 /* Adjust offset for paradoxical subregs. */
5265 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5267 int difference = (GET_MODE_SIZE (innermode)
5268 - GET_MODE_SIZE (outermode));
5269 if (WORDS_BIG_ENDIAN)
5270 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5271 if (BYTES_BIG_ENDIAN)
5272 final_offset += difference % UNITS_PER_WORD;
5275 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5277 /* Propagate original regno. We don't have any way to specify
5278 the offset inside original regno, so do so only for lowpart.
5279 The information is used only by alias analysis that can not
5280 grog partial register anyway. */
5282 if (subreg_lowpart_offset (outermode, innermode) == byte)
5283 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5288 /* If we have a SUBREG of a register that we are replacing and we are
5289 replacing it with a MEM, make a new MEM and try replacing the
5290 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5291 or if we would be widening it. */
5294 && ! mode_dependent_address_p (XEXP (op, 0))
5295 /* Allow splitting of volatile memory references in case we don't
5296 have instruction to move the whole thing. */
5297 && (! MEM_VOLATILE_P (op)
5298 || ! have_insn_for (SET, innermode))
5299 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5300 return adjust_address_nv (op, outermode, byte);
5302 /* Handle complex values represented as CONCAT
5303 of real and imaginary part. */
5304 if (GET_CODE (op) == CONCAT)
5306 unsigned int part_size, final_offset;
5309 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5310 if (byte < part_size)
5312 part = XEXP (op, 0);
5313 final_offset = byte;
5317 part = XEXP (op, 1);
5318 final_offset = byte - part_size;
5321 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5324 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5327 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5328 return gen_rtx_SUBREG (outermode, part, final_offset);
5332 /* Optimize SUBREG truncations of zero and sign extended values. */
5333 if ((GET_CODE (op) == ZERO_EXTEND
5334 || GET_CODE (op) == SIGN_EXTEND)
5335 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5337 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5339 /* If we're requesting the lowpart of a zero or sign extension,
5340 there are three possibilities. If the outermode is the same
5341 as the origmode, we can omit both the extension and the subreg.
5342 If the outermode is not larger than the origmode, we can apply
5343 the truncation without the extension. Finally, if the outermode
5344 is larger than the origmode, but both are integer modes, we
5345 can just extend to the appropriate mode. */
5348 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5349 if (outermode == origmode)
5350 return XEXP (op, 0);
5351 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5352 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5353 subreg_lowpart_offset (outermode,
5355 if (SCALAR_INT_MODE_P (outermode))
5356 return simplify_gen_unary (GET_CODE (op), outermode,
5357 XEXP (op, 0), origmode);
5360 /* A SUBREG resulting from a zero extension may fold to zero if
5361 it extracts higher bits that the ZERO_EXTEND's source bits. */
5362 if (GET_CODE (op) == ZERO_EXTEND
5363 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5364 return CONST0_RTX (outermode);
5367 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5368 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5369 the outer subreg is effectively a truncation to the original mode. */
5370 if ((GET_CODE (op) == LSHIFTRT
5371 || GET_CODE (op) == ASHIFTRT)
5372 && SCALAR_INT_MODE_P (outermode)
5373 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5374 to avoid the possibility that an outer LSHIFTRT shifts by more
5375 than the sign extension's sign_bit_copies and introduces zeros
5376 into the high bits of the result. */
5377 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5378 && CONST_INT_P (XEXP (op, 1))
5379 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5380 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5381 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5382 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5383 return simplify_gen_binary (ASHIFTRT, outermode,
5384 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5386 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5387 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5388 the outer subreg is effectively a truncation to the original mode. */
5389 if ((GET_CODE (op) == LSHIFTRT
5390 || GET_CODE (op) == ASHIFTRT)
5391 && SCALAR_INT_MODE_P (outermode)
5392 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5393 && CONST_INT_P (XEXP (op, 1))
5394 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5395 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5396 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5397 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5398 return simplify_gen_binary (LSHIFTRT, outermode,
5399 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5401 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5402 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5403 the outer subreg is effectively a truncation to the original mode. */
5404 if (GET_CODE (op) == ASHIFT
5405 && SCALAR_INT_MODE_P (outermode)
5406 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5407 && CONST_INT_P (XEXP (op, 1))
5408 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5409 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5410 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5411 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5412 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5413 return simplify_gen_binary (ASHIFT, outermode,
5414 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5416 /* Recognize a word extraction from a multi-word subreg. */
5417 if ((GET_CODE (op) == LSHIFTRT
5418 || GET_CODE (op) == ASHIFTRT)
5419 && SCALAR_INT_MODE_P (outermode)
5420 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5421 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5422 && CONST_INT_P (XEXP (op, 1))
5423 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5424 && INTVAL (XEXP (op, 1)) >= 0
5425 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5426 && byte == subreg_lowpart_offset (outermode, innermode))
5428 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5429 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5431 ? byte - shifted_bytes
5432 : byte + shifted_bytes));
5438 /* Make a SUBREG operation or equivalent if it folds. */
5441 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5442 enum machine_mode innermode, unsigned int byte)
5446 newx = simplify_subreg (outermode, op, innermode, byte);
5450 if (GET_CODE (op) == SUBREG
5451 || GET_CODE (op) == CONCAT
5452 || GET_MODE (op) == VOIDmode)
5455 if (validate_subreg (outermode, innermode, op, byte))
5456 return gen_rtx_SUBREG (outermode, op, byte);
5461 /* Simplify X, an rtx expression.
5463 Return the simplified expression or NULL if no simplifications
5466 This is the preferred entry point into the simplification routines;
5467 however, we still allow passes to call the more specific routines.
5469 Right now GCC has three (yes, three) major bodies of RTL simplification
5470 code that need to be unified.
5472 1. fold_rtx in cse.c. This code uses various CSE specific
5473 information to aid in RTL simplification.
5475 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5476 it uses combine specific information to aid in RTL
5479 3. The routines in this file.
5482 Long term we want to only have one body of simplification code; to
5483 get to that state I recommend the following steps:
5485 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5486 which are not pass dependent state into these routines.
5488 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5489 use this routine whenever possible.
5491 3. Allow for pass dependent state to be provided to these
5492 routines and add simplifications based on the pass dependent
5493 state. Remove code from cse.c & combine.c that becomes
5496 It will take time, but ultimately the compiler will be easier to
5497 maintain and improve. It's totally silly that when we add a
5498 simplification that it needs to be added to 4 places (3 for RTL
5499 simplification and 1 for tree simplification. */
5502 simplify_rtx (const_rtx x)
5504 const enum rtx_code code = GET_CODE (x);
5505 const enum machine_mode mode = GET_MODE (x);
5507 switch (GET_RTX_CLASS (code))
5510 return simplify_unary_operation (code, mode,
5511 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5512 case RTX_COMM_ARITH:
5513 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5514 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5516 /* Fall through.... */
5519 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5522 case RTX_BITFIELD_OPS:
5523 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5524 XEXP (x, 0), XEXP (x, 1),
5528 case RTX_COMM_COMPARE:
5529 return simplify_relational_operation (code, mode,
5530 ((GET_MODE (XEXP (x, 0))
5532 ? GET_MODE (XEXP (x, 0))
5533 : GET_MODE (XEXP (x, 1))),
5539 return simplify_subreg (mode, SUBREG_REG (x),
5540 GET_MODE (SUBREG_REG (x)),
5547 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5548 if (GET_CODE (XEXP (x, 0)) == HIGH
5549 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))