1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
38 #include "diagnostic-core.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, const_rtx);
53 static bool plus_minus_operand_p (const_rtx);
54 static bool simplify_plus_minus_op_data_cmp (rtx, rtx);
55 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
61 enum machine_mode, rtx, rtx);
62 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
63 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode, const_rtx i)
71 return gen_int_mode (- INTVAL (i), mode);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode, const_rtx x)
80 unsigned HOST_WIDE_INT val;
83 if (GET_MODE_CLASS (mode) != MODE_INT)
86 width = GET_MODE_BITSIZE (mode);
90 if (width <= HOST_BITS_PER_WIDE_INT
93 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x) == 0)
97 val = CONST_DOUBLE_HIGH (x);
98 width -= HOST_BITS_PER_WIDE_INT;
103 if (width < HOST_BITS_PER_WIDE_INT)
104 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
105 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
117 /* If this simplifies, do it. */
118 tem = simplify_binary_operation (code, mode, op0, op1);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0, op1))
125 tem = op0, op0 = op1, op1 = tem;
127 return gen_rtx_fmt_ee (code, mode, op0, op1);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x)
136 enum machine_mode cmode;
137 HOST_WIDE_INT offset = 0;
139 switch (GET_CODE (x))
145 /* Handle float extensions of constant pool references. */
147 c = avoid_constant_pool_reference (tmp);
148 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
152 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
161 if (GET_MODE (x) == BLKmode)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr = targetm.delegitimize_address (addr);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr) == CONST
171 && GET_CODE (XEXP (addr, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
174 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
175 addr = XEXP (XEXP (addr, 0), 0);
178 if (GET_CODE (addr) == LO_SUM)
179 addr = XEXP (addr, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr))
186 c = get_pool_constant (addr);
187 cmode = get_pool_mode (addr);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset != 0 || cmode != GET_MODE (x))
194 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
195 if (tem && CONSTANT_P (tem))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x)
212 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
213 use their base addresses as equivalent. */
218 tree decl = MEM_EXPR (x);
219 enum machine_mode mode = GET_MODE (x);
220 HOST_WIDE_INT offset = 0;
222 switch (TREE_CODE (decl))
232 case ARRAY_RANGE_REF:
237 case VIEW_CONVERT_EXPR:
239 HOST_WIDE_INT bitsize, bitpos;
241 int unsignedp = 0, volatilep = 0;
243 decl = get_inner_reference (decl, &bitsize, &bitpos, &toffset,
244 &mode, &unsignedp, &volatilep, false);
245 if (bitsize != GET_MODE_BITSIZE (mode)
246 || (bitpos % BITS_PER_UNIT)
247 || (toffset && !host_integerp (toffset, 0)))
251 offset += bitpos / BITS_PER_UNIT;
253 offset += TREE_INT_CST_LOW (toffset);
260 && mode == GET_MODE (x)
261 && TREE_CODE (decl) == VAR_DECL
262 && (TREE_STATIC (decl)
263 || DECL_THREAD_LOCAL_P (decl))
264 && DECL_RTL_SET_P (decl)
265 && MEM_P (DECL_RTL (decl)))
269 offset += INTVAL (MEM_OFFSET (x));
271 newx = DECL_RTL (decl);
275 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o) == PLUS
285 && GET_CODE (XEXP (o, 1)) == CONST_INT
286 && (offset == INTVAL (XEXP (o, 1))
287 || (GET_CODE (n) == PLUS
288 && GET_CODE (XEXP (n, 1)) == CONST_INT
289 && (INTVAL (XEXP (n, 1)) + offset
290 == INTVAL (XEXP (o, 1)))
291 && (n = XEXP (n, 0))))
292 && (o = XEXP (o, 0))))
293 && rtx_equal_p (o, n)))
294 x = adjust_address_nv (newx, mode, offset);
296 else if (GET_MODE (x) == GET_MODE (newx)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
310 enum machine_mode op_mode)
314 /* If this simplifies, use it. */
315 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
318 return gen_rtx_fmt_e (code, mode, op);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
325 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
329 /* If this simplifies, use it. */
330 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
334 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
342 enum machine_mode cmp_mode, rtx op0, rtx op1)
346 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
350 return gen_rtx_fmt_ee (code, mode, op0, op1);
353 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
354 and simplify the result. If FN is non-NULL, call this callback on each
355 X, if it returns non-NULL, replace X with its return value and simplify the
359 simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
360 rtx (*fn) (rtx, const_rtx, void *), void *data)
362 enum rtx_code code = GET_CODE (x);
363 enum machine_mode mode = GET_MODE (x);
364 enum machine_mode op_mode;
366 rtx op0, op1, op2, newx, op;
370 if (__builtin_expect (fn != NULL, 0))
372 newx = fn (x, old_rtx, data);
376 else if (rtx_equal_p (x, old_rtx))
377 return copy_rtx ((rtx) data);
379 switch (GET_RTX_CLASS (code))
383 op_mode = GET_MODE (op0);
384 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
385 if (op0 == XEXP (x, 0))
387 return simplify_gen_unary (code, mode, op0, op_mode);
391 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
392 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
393 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
395 return simplify_gen_binary (code, mode, op0, op1);
398 case RTX_COMM_COMPARE:
401 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
402 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
403 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
404 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
406 return simplify_gen_relational (code, mode, op_mode, op0, op1);
409 case RTX_BITFIELD_OPS:
411 op_mode = GET_MODE (op0);
412 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
413 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
414 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
415 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
417 if (op_mode == VOIDmode)
418 op_mode = GET_MODE (op0);
419 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
424 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
425 if (op0 == SUBREG_REG (x))
427 op0 = simplify_gen_subreg (GET_MODE (x), op0,
428 GET_MODE (SUBREG_REG (x)),
430 return op0 ? op0 : x;
437 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
438 if (op0 == XEXP (x, 0))
440 return replace_equiv_address_nv (x, op0);
442 else if (code == LO_SUM)
444 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
445 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
447 /* (lo_sum (high x) x) -> x */
448 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
451 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
453 return gen_rtx_LO_SUM (mode, op0, op1);
462 fmt = GET_RTX_FORMAT (code);
463 for (i = 0; fmt[i]; i++)
468 newvec = XVEC (newx, i);
469 for (j = 0; j < GET_NUM_ELEM (vec); j++)
471 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
473 if (op != RTVEC_ELT (vec, j))
477 newvec = shallow_copy_rtvec (vec);
479 newx = shallow_copy_rtx (x);
480 XVEC (newx, i) = newvec;
482 RTVEC_ELT (newvec, j) = op;
490 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
491 if (op != XEXP (x, i))
494 newx = shallow_copy_rtx (x);
503 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
504 resulting RTX. Return a new RTX which is as simplified as possible. */
507 simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
509 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
512 /* Try to simplify a unary operation CODE whose output mode is to be
513 MODE with input operand OP whose mode was originally OP_MODE.
514 Return zero if no simplification can be made. */
516 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
517 rtx op, enum machine_mode op_mode)
521 trueop = avoid_constant_pool_reference (op);
523 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
527 return simplify_unary_operation_1 (code, mode, op);
530 /* Perform some simplifications we can do even if the operands
533 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
535 enum rtx_code reversed;
541 /* (not (not X)) == X. */
542 if (GET_CODE (op) == NOT)
545 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
546 comparison is all ones. */
547 if (COMPARISON_P (op)
548 && (mode == BImode || STORE_FLAG_VALUE == -1)
549 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
550 return simplify_gen_relational (reversed, mode, VOIDmode,
551 XEXP (op, 0), XEXP (op, 1));
553 /* (not (plus X -1)) can become (neg X). */
554 if (GET_CODE (op) == PLUS
555 && XEXP (op, 1) == constm1_rtx)
556 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
558 /* Similarly, (not (neg X)) is (plus X -1). */
559 if (GET_CODE (op) == NEG)
560 return plus_constant (XEXP (op, 0), -1);
562 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
563 if (GET_CODE (op) == XOR
564 && CONST_INT_P (XEXP (op, 1))
565 && (temp = simplify_unary_operation (NOT, mode,
566 XEXP (op, 1), mode)) != 0)
567 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
569 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
570 if (GET_CODE (op) == PLUS
571 && CONST_INT_P (XEXP (op, 1))
572 && mode_signbit_p (mode, XEXP (op, 1))
573 && (temp = simplify_unary_operation (NOT, mode,
574 XEXP (op, 1), mode)) != 0)
575 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
578 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
579 operands other than 1, but that is not valid. We could do a
580 similar simplification for (not (lshiftrt C X)) where C is
581 just the sign bit, but this doesn't seem common enough to
583 if (GET_CODE (op) == ASHIFT
584 && XEXP (op, 0) == const1_rtx)
586 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
587 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
590 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
591 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
592 so we can perform the above simplification. */
594 if (STORE_FLAG_VALUE == -1
595 && GET_CODE (op) == ASHIFTRT
596 && GET_CODE (XEXP (op, 1))
597 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
598 return simplify_gen_relational (GE, mode, VOIDmode,
599 XEXP (op, 0), const0_rtx);
602 if (GET_CODE (op) == SUBREG
603 && subreg_lowpart_p (op)
604 && (GET_MODE_SIZE (GET_MODE (op))
605 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
606 && GET_CODE (SUBREG_REG (op)) == ASHIFT
607 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
609 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
612 x = gen_rtx_ROTATE (inner_mode,
613 simplify_gen_unary (NOT, inner_mode, const1_rtx,
615 XEXP (SUBREG_REG (op), 1));
616 return rtl_hooks.gen_lowpart_no_emit (mode, x);
619 /* Apply De Morgan's laws to reduce number of patterns for machines
620 with negating logical insns (and-not, nand, etc.). If result has
621 only one NOT, put it first, since that is how the patterns are
624 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
626 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
627 enum machine_mode op_mode;
629 op_mode = GET_MODE (in1);
630 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
632 op_mode = GET_MODE (in2);
633 if (op_mode == VOIDmode)
635 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
637 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
640 in2 = in1; in1 = tem;
643 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
649 /* (neg (neg X)) == X. */
650 if (GET_CODE (op) == NEG)
653 /* (neg (plus X 1)) can become (not X). */
654 if (GET_CODE (op) == PLUS
655 && XEXP (op, 1) == const1_rtx)
656 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
658 /* Similarly, (neg (not X)) is (plus X 1). */
659 if (GET_CODE (op) == NOT)
660 return plus_constant (XEXP (op, 0), 1);
662 /* (neg (minus X Y)) can become (minus Y X). This transformation
663 isn't safe for modes with signed zeros, since if X and Y are
664 both +0, (minus Y X) is the same as (minus X Y). If the
665 rounding mode is towards +infinity (or -infinity) then the two
666 expressions will be rounded differently. */
667 if (GET_CODE (op) == MINUS
668 && !HONOR_SIGNED_ZEROS (mode)
669 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
670 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
672 if (GET_CODE (op) == PLUS
673 && !HONOR_SIGNED_ZEROS (mode)
674 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
676 /* (neg (plus A C)) is simplified to (minus -C A). */
677 if (CONST_INT_P (XEXP (op, 1))
678 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
680 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
682 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
685 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
686 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
687 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
690 /* (neg (mult A B)) becomes (mult (neg A) B).
691 This works even for floating-point values. */
692 if (GET_CODE (op) == MULT
693 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
695 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
696 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
699 /* NEG commutes with ASHIFT since it is multiplication. Only do
700 this if we can then eliminate the NEG (e.g., if the operand
702 if (GET_CODE (op) == ASHIFT)
704 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
706 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
709 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
710 C is equal to the width of MODE minus 1. */
711 if (GET_CODE (op) == ASHIFTRT
712 && CONST_INT_P (XEXP (op, 1))
713 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
714 return simplify_gen_binary (LSHIFTRT, mode,
715 XEXP (op, 0), XEXP (op, 1));
717 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
718 C is equal to the width of MODE minus 1. */
719 if (GET_CODE (op) == LSHIFTRT
720 && CONST_INT_P (XEXP (op, 1))
721 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
722 return simplify_gen_binary (ASHIFTRT, mode,
723 XEXP (op, 0), XEXP (op, 1));
725 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
726 if (GET_CODE (op) == XOR
727 && XEXP (op, 1) == const1_rtx
728 && nonzero_bits (XEXP (op, 0), mode) == 1)
729 return plus_constant (XEXP (op, 0), -1);
731 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
732 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
733 if (GET_CODE (op) == LT
734 && XEXP (op, 1) == const0_rtx
735 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
737 enum machine_mode inner = GET_MODE (XEXP (op, 0));
738 int isize = GET_MODE_BITSIZE (inner);
739 if (STORE_FLAG_VALUE == 1)
741 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
742 GEN_INT (isize - 1));
745 if (GET_MODE_BITSIZE (mode) > isize)
746 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
747 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
749 else if (STORE_FLAG_VALUE == -1)
751 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
752 GEN_INT (isize - 1));
755 if (GET_MODE_BITSIZE (mode) > isize)
756 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
757 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
763 /* We can't handle truncation to a partial integer mode here
764 because we don't know the real bitsize of the partial
766 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
769 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
770 if ((GET_CODE (op) == SIGN_EXTEND
771 || GET_CODE (op) == ZERO_EXTEND)
772 && GET_MODE (XEXP (op, 0)) == mode)
775 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
776 (OP:SI foo:SI) if OP is NEG or ABS. */
777 if ((GET_CODE (op) == ABS
778 || GET_CODE (op) == NEG)
779 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
780 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
781 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
782 return simplify_gen_unary (GET_CODE (op), mode,
783 XEXP (XEXP (op, 0), 0), mode);
785 /* (truncate:A (subreg:B (truncate:C X) 0)) is
787 if (GET_CODE (op) == SUBREG
788 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
789 && subreg_lowpart_p (op))
790 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
791 GET_MODE (XEXP (SUBREG_REG (op), 0)));
793 /* If we know that the value is already truncated, we can
794 replace the TRUNCATE with a SUBREG. Note that this is also
795 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
796 modes we just have to apply a different definition for
797 truncation. But don't do this for an (LSHIFTRT (MULT ...))
798 since this will cause problems with the umulXi3_highpart
800 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
801 GET_MODE_BITSIZE (GET_MODE (op)))
802 ? (num_sign_bit_copies (op, GET_MODE (op))
803 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
804 - GET_MODE_BITSIZE (mode)))
805 : truncated_to_mode (mode, op))
806 && ! (GET_CODE (op) == LSHIFTRT
807 && GET_CODE (XEXP (op, 0)) == MULT))
808 return rtl_hooks.gen_lowpart_no_emit (mode, op);
810 /* A truncate of a comparison can be replaced with a subreg if
811 STORE_FLAG_VALUE permits. This is like the previous test,
812 but it works even if the comparison is done in a mode larger
813 than HOST_BITS_PER_WIDE_INT. */
814 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
816 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
817 return rtl_hooks.gen_lowpart_no_emit (mode, op);
821 if (DECIMAL_FLOAT_MODE_P (mode))
824 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
825 if (GET_CODE (op) == FLOAT_EXTEND
826 && GET_MODE (XEXP (op, 0)) == mode)
829 /* (float_truncate:SF (float_truncate:DF foo:XF))
830 = (float_truncate:SF foo:XF).
831 This may eliminate double rounding, so it is unsafe.
833 (float_truncate:SF (float_extend:XF foo:DF))
834 = (float_truncate:SF foo:DF).
836 (float_truncate:DF (float_extend:XF foo:SF))
837 = (float_extend:SF foo:DF). */
838 if ((GET_CODE (op) == FLOAT_TRUNCATE
839 && flag_unsafe_math_optimizations)
840 || GET_CODE (op) == FLOAT_EXTEND)
841 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
843 > GET_MODE_SIZE (mode)
844 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
848 /* (float_truncate (float x)) is (float x) */
849 if (GET_CODE (op) == FLOAT
850 && (flag_unsafe_math_optimizations
851 || (SCALAR_FLOAT_MODE_P (GET_MODE (op))
852 && ((unsigned)significand_size (GET_MODE (op))
853 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
854 - num_sign_bit_copies (XEXP (op, 0),
855 GET_MODE (XEXP (op, 0))))))))
856 return simplify_gen_unary (FLOAT, mode,
858 GET_MODE (XEXP (op, 0)));
860 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
861 (OP:SF foo:SF) if OP is NEG or ABS. */
862 if ((GET_CODE (op) == ABS
863 || GET_CODE (op) == NEG)
864 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
865 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
866 return simplify_gen_unary (GET_CODE (op), mode,
867 XEXP (XEXP (op, 0), 0), mode);
869 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
870 is (float_truncate:SF x). */
871 if (GET_CODE (op) == SUBREG
872 && subreg_lowpart_p (op)
873 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
874 return SUBREG_REG (op);
878 if (DECIMAL_FLOAT_MODE_P (mode))
881 /* (float_extend (float_extend x)) is (float_extend x)
883 (float_extend (float x)) is (float x) assuming that double
884 rounding can't happen.
886 if (GET_CODE (op) == FLOAT_EXTEND
887 || (GET_CODE (op) == FLOAT
888 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
889 && ((unsigned)significand_size (GET_MODE (op))
890 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
891 - num_sign_bit_copies (XEXP (op, 0),
892 GET_MODE (XEXP (op, 0)))))))
893 return simplify_gen_unary (GET_CODE (op), mode,
895 GET_MODE (XEXP (op, 0)));
900 /* (abs (neg <foo>)) -> (abs <foo>) */
901 if (GET_CODE (op) == NEG)
902 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
903 GET_MODE (XEXP (op, 0)));
905 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
907 if (GET_MODE (op) == VOIDmode)
910 /* If operand is something known to be positive, ignore the ABS. */
911 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
912 || ((GET_MODE_BITSIZE (GET_MODE (op))
913 <= HOST_BITS_PER_WIDE_INT)
914 && ((nonzero_bits (op, GET_MODE (op))
916 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
920 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
921 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
922 return gen_rtx_NEG (mode, op);
927 /* (ffs (*_extend <X>)) = (ffs <X>) */
928 if (GET_CODE (op) == SIGN_EXTEND
929 || GET_CODE (op) == ZERO_EXTEND)
930 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
931 GET_MODE (XEXP (op, 0)));
935 switch (GET_CODE (op))
939 /* (popcount (zero_extend <X>)) = (popcount <X>) */
940 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
941 GET_MODE (XEXP (op, 0)));
945 /* Rotations don't affect popcount. */
946 if (!side_effects_p (XEXP (op, 1)))
947 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
948 GET_MODE (XEXP (op, 0)));
957 switch (GET_CODE (op))
963 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
964 GET_MODE (XEXP (op, 0)));
968 /* Rotations don't affect parity. */
969 if (!side_effects_p (XEXP (op, 1)))
970 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
971 GET_MODE (XEXP (op, 0)));
980 /* (bswap (bswap x)) -> x. */
981 if (GET_CODE (op) == BSWAP)
986 /* (float (sign_extend <X>)) = (float <X>). */
987 if (GET_CODE (op) == SIGN_EXTEND)
988 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
989 GET_MODE (XEXP (op, 0)));
993 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
994 becomes just the MINUS if its mode is MODE. This allows
995 folding switch statements on machines using casesi (such as
997 if (GET_CODE (op) == TRUNCATE
998 && GET_MODE (XEXP (op, 0)) == mode
999 && GET_CODE (XEXP (op, 0)) == MINUS
1000 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1001 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1002 return XEXP (op, 0);
1004 /* Check for a sign extension of a subreg of a promoted
1005 variable, where the promotion is sign-extended, and the
1006 target mode is the same as the variable's promotion. */
1007 if (GET_CODE (op) == SUBREG
1008 && SUBREG_PROMOTED_VAR_P (op)
1009 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1010 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1011 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1013 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1014 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1015 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1017 gcc_assert (GET_MODE_BITSIZE (mode)
1018 > GET_MODE_BITSIZE (GET_MODE (op)));
1019 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1020 GET_MODE (XEXP (op, 0)));
1023 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1024 is (sign_extend:M (subreg:O <X>)) if there is mode with
1025 GET_MODE_BITSIZE (N) - I bits.
1026 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1027 is similarly (zero_extend:M (subreg:O <X>)). */
1028 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1029 && GET_CODE (XEXP (op, 0)) == ASHIFT
1030 && CONST_INT_P (XEXP (op, 1))
1031 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1032 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1034 enum machine_mode tmode
1035 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1036 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1037 gcc_assert (GET_MODE_BITSIZE (mode)
1038 > GET_MODE_BITSIZE (GET_MODE (op)));
1039 if (tmode != BLKmode)
1042 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1043 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1044 ? SIGN_EXTEND : ZERO_EXTEND,
1045 mode, inner, tmode);
1049 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1050 /* As we do not know which address space the pointer is refering to,
1051 we can do this only if the target does not support different pointer
1052 or address modes depending on the address space. */
1053 if (target_default_pointer_address_modes_p ()
1054 && ! POINTERS_EXTEND_UNSIGNED
1055 && mode == Pmode && GET_MODE (op) == ptr_mode
1057 || (GET_CODE (op) == SUBREG
1058 && REG_P (SUBREG_REG (op))
1059 && REG_POINTER (SUBREG_REG (op))
1060 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1061 return convert_memory_address (Pmode, op);
1066 /* Check for a zero extension of a subreg of a promoted
1067 variable, where the promotion is zero-extended, and the
1068 target mode is the same as the variable's promotion. */
1069 if (GET_CODE (op) == SUBREG
1070 && SUBREG_PROMOTED_VAR_P (op)
1071 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
1072 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (GET_MODE (XEXP (op, 0))))
1073 return rtl_hooks.gen_lowpart_no_emit (mode, op);
1075 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1076 if (GET_CODE (op) == ZERO_EXTEND)
1077 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1078 GET_MODE (XEXP (op, 0)));
1080 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1081 is (zero_extend:M (subreg:O <X>)) if there is mode with
1082 GET_MODE_BITSIZE (N) - I bits. */
1083 if (GET_CODE (op) == LSHIFTRT
1084 && GET_CODE (XEXP (op, 0)) == ASHIFT
1085 && CONST_INT_P (XEXP (op, 1))
1086 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1087 && GET_MODE_BITSIZE (GET_MODE (op)) > INTVAL (XEXP (op, 1)))
1089 enum machine_mode tmode
1090 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op))
1091 - INTVAL (XEXP (op, 1)), MODE_INT, 1);
1092 if (tmode != BLKmode)
1095 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1096 return simplify_gen_unary (ZERO_EXTEND, mode, inner, tmode);
1100 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1101 /* As we do not know which address space the pointer is refering to,
1102 we can do this only if the target does not support different pointer
1103 or address modes depending on the address space. */
1104 if (target_default_pointer_address_modes_p ()
1105 && POINTERS_EXTEND_UNSIGNED > 0
1106 && mode == Pmode && GET_MODE (op) == ptr_mode
1108 || (GET_CODE (op) == SUBREG
1109 && REG_P (SUBREG_REG (op))
1110 && REG_POINTER (SUBREG_REG (op))
1111 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1112 return convert_memory_address (Pmode, op);
1123 /* Try to compute the value of a unary operation CODE whose output mode is to
1124 be MODE with input operand OP whose mode was originally OP_MODE.
1125 Return zero if the value cannot be computed. */
1127 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
1128 rtx op, enum machine_mode op_mode)
1130 unsigned int width = GET_MODE_BITSIZE (mode);
1132 if (code == VEC_DUPLICATE)
1134 gcc_assert (VECTOR_MODE_P (mode));
1135 if (GET_MODE (op) != VOIDmode)
1137 if (!VECTOR_MODE_P (GET_MODE (op)))
1138 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1140 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1143 if (CONST_INT_P (op) || GET_CODE (op) == CONST_DOUBLE
1144 || GET_CODE (op) == CONST_VECTOR)
1146 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1147 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1148 rtvec v = rtvec_alloc (n_elts);
1151 if (GET_CODE (op) != CONST_VECTOR)
1152 for (i = 0; i < n_elts; i++)
1153 RTVEC_ELT (v, i) = op;
1156 enum machine_mode inmode = GET_MODE (op);
1157 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
1158 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
1160 gcc_assert (in_n_elts < n_elts);
1161 gcc_assert ((n_elts % in_n_elts) == 0);
1162 for (i = 0; i < n_elts; i++)
1163 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1165 return gen_rtx_CONST_VECTOR (mode, v);
1169 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1171 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1172 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1173 enum machine_mode opmode = GET_MODE (op);
1174 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
1175 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1176 rtvec v = rtvec_alloc (n_elts);
1179 gcc_assert (op_n_elts == n_elts);
1180 for (i = 0; i < n_elts; i++)
1182 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1183 CONST_VECTOR_ELT (op, i),
1184 GET_MODE_INNER (opmode));
1187 RTVEC_ELT (v, i) = x;
1189 return gen_rtx_CONST_VECTOR (mode, v);
1192 /* The order of these tests is critical so that, for example, we don't
1193 check the wrong mode (input vs. output) for a conversion operation,
1194 such as FIX. At some point, this should be simplified. */
1196 if (code == FLOAT && GET_MODE (op) == VOIDmode
1197 && (GET_CODE (op) == CONST_DOUBLE || CONST_INT_P (op)))
1199 HOST_WIDE_INT hv, lv;
1202 if (CONST_INT_P (op))
1203 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1205 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1207 REAL_VALUE_FROM_INT (d, lv, hv, mode);
1208 d = real_value_truncate (mode, d);
1209 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1211 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
1212 && (GET_CODE (op) == CONST_DOUBLE
1213 || CONST_INT_P (op)))
1215 HOST_WIDE_INT hv, lv;
1218 if (CONST_INT_P (op))
1219 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
1221 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
1223 if (op_mode == VOIDmode)
1225 /* We don't know how to interpret negative-looking numbers in
1226 this case, so don't try to fold those. */
1230 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
1233 hv = 0, lv &= GET_MODE_MASK (op_mode);
1235 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
1236 d = real_value_truncate (mode, d);
1237 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1240 if (CONST_INT_P (op)
1241 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
1243 HOST_WIDE_INT arg0 = INTVAL (op);
1257 val = (arg0 >= 0 ? arg0 : - arg0);
1261 arg0 &= GET_MODE_MASK (mode);
1262 val = ffs_hwi (arg0);
1266 arg0 &= GET_MODE_MASK (mode);
1267 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1270 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1274 arg0 &= GET_MODE_MASK (mode);
1277 /* Even if the value at zero is undefined, we have to come
1278 up with some replacement. Seems good enough. */
1279 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1280 val = GET_MODE_BITSIZE (mode);
1283 val = ctz_hwi (arg0);
1287 arg0 &= GET_MODE_MASK (mode);
1290 val++, arg0 &= arg0 - 1;
1294 arg0 &= GET_MODE_MASK (mode);
1297 val++, arg0 &= arg0 - 1;
1306 for (s = 0; s < width; s += 8)
1308 unsigned int d = width - s - 8;
1309 unsigned HOST_WIDE_INT byte;
1310 byte = (arg0 >> s) & 0xff;
1321 /* When zero-extending a CONST_INT, we need to know its
1323 gcc_assert (op_mode != VOIDmode);
1324 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1326 /* If we were really extending the mode,
1327 we would have to distinguish between zero-extension
1328 and sign-extension. */
1329 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1332 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1333 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1339 if (op_mode == VOIDmode)
1341 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1343 /* If we were really extending the mode,
1344 we would have to distinguish between zero-extension
1345 and sign-extension. */
1346 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1349 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1352 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1354 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1355 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1363 case FLOAT_TRUNCATE:
1375 return gen_int_mode (val, mode);
1378 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1379 for a DImode operation on a CONST_INT. */
1380 else if (GET_MODE (op) == VOIDmode
1381 && width <= HOST_BITS_PER_WIDE_INT * 2
1382 && (GET_CODE (op) == CONST_DOUBLE
1383 || CONST_INT_P (op)))
1385 unsigned HOST_WIDE_INT l1, lv;
1386 HOST_WIDE_INT h1, hv;
1388 if (GET_CODE (op) == CONST_DOUBLE)
1389 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1391 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1401 neg_double (l1, h1, &lv, &hv);
1406 neg_double (l1, h1, &lv, &hv);
1416 lv = HOST_BITS_PER_WIDE_INT + ffs_hwi (h1);
1424 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1425 - HOST_BITS_PER_WIDE_INT;
1427 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1428 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1429 lv = GET_MODE_BITSIZE (mode);
1437 lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
1438 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1439 lv = GET_MODE_BITSIZE (mode);
1467 for (s = 0; s < width; s += 8)
1469 unsigned int d = width - s - 8;
1470 unsigned HOST_WIDE_INT byte;
1472 if (s < HOST_BITS_PER_WIDE_INT)
1473 byte = (l1 >> s) & 0xff;
1475 byte = (h1 >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
1477 if (d < HOST_BITS_PER_WIDE_INT)
1480 hv |= byte << (d - HOST_BITS_PER_WIDE_INT);
1486 /* This is just a change-of-mode, so do nothing. */
1491 gcc_assert (op_mode != VOIDmode);
1493 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1497 lv = l1 & GET_MODE_MASK (op_mode);
1501 if (op_mode == VOIDmode
1502 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1506 lv = l1 & GET_MODE_MASK (op_mode);
1507 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1508 && (lv & ((HOST_WIDE_INT) 1
1509 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1510 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1512 hv = HWI_SIGN_EXTEND (lv);
1523 return immed_double_const (lv, hv, mode);
1526 else if (GET_CODE (op) == CONST_DOUBLE
1527 && SCALAR_FLOAT_MODE_P (mode))
1529 REAL_VALUE_TYPE d, t;
1530 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1535 if (HONOR_SNANS (mode) && real_isnan (&d))
1537 real_sqrt (&t, mode, &d);
1541 d = real_value_abs (&d);
1544 d = real_value_negate (&d);
1546 case FLOAT_TRUNCATE:
1547 d = real_value_truncate (mode, d);
1550 /* All this does is change the mode. */
1553 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1560 real_to_target (tmp, &d, GET_MODE (op));
1561 for (i = 0; i < 4; i++)
1563 real_from_target (&d, tmp, mode);
1569 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1572 else if (GET_CODE (op) == CONST_DOUBLE
1573 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1574 && GET_MODE_CLASS (mode) == MODE_INT
1575 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1577 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1578 operators are intentionally left unspecified (to ease implementation
1579 by target backends), for consistency, this routine implements the
1580 same semantics for constant folding as used by the middle-end. */
1582 /* This was formerly used only for non-IEEE float.
1583 eggert@twinsun.com says it is safe for IEEE also. */
1584 HOST_WIDE_INT xh, xl, th, tl;
1585 REAL_VALUE_TYPE x, t;
1586 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1590 if (REAL_VALUE_ISNAN (x))
1593 /* Test against the signed upper bound. */
1594 if (width > HOST_BITS_PER_WIDE_INT)
1596 th = ((unsigned HOST_WIDE_INT) 1
1597 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1603 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1605 real_from_integer (&t, VOIDmode, tl, th, 0);
1606 if (REAL_VALUES_LESS (t, x))
1613 /* Test against the signed lower bound. */
1614 if (width > HOST_BITS_PER_WIDE_INT)
1616 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1622 tl = (HOST_WIDE_INT) -1 << (width - 1);
1624 real_from_integer (&t, VOIDmode, tl, th, 0);
1625 if (REAL_VALUES_LESS (x, t))
1631 REAL_VALUE_TO_INT (&xl, &xh, x);
1635 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1638 /* Test against the unsigned upper bound. */
1639 if (width == 2*HOST_BITS_PER_WIDE_INT)
1644 else if (width >= HOST_BITS_PER_WIDE_INT)
1646 th = ((unsigned HOST_WIDE_INT) 1
1647 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1653 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1655 real_from_integer (&t, VOIDmode, tl, th, 1);
1656 if (REAL_VALUES_LESS (t, x))
1663 REAL_VALUE_TO_INT (&xl, &xh, x);
1669 return immed_double_const (xl, xh, mode);
1675 /* Subroutine of simplify_binary_operation to simplify a commutative,
1676 associative binary operation CODE with result mode MODE, operating
1677 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1678 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1679 canonicalization is possible. */
1682 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1687 /* Linearize the operator to the left. */
1688 if (GET_CODE (op1) == code)
1690 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1691 if (GET_CODE (op0) == code)
1693 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1694 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1697 /* "a op (b op c)" becomes "(b op c) op a". */
1698 if (! swap_commutative_operands_p (op1, op0))
1699 return simplify_gen_binary (code, mode, op1, op0);
1706 if (GET_CODE (op0) == code)
1708 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1709 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1711 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1712 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1715 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1716 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1718 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1720 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1721 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1723 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1730 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1731 and OP1. Return 0 if no simplification is possible.
1733 Don't use this for relational operations such as EQ or LT.
1734 Use simplify_relational_operation instead. */
1736 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1739 rtx trueop0, trueop1;
1742 /* Relational operations don't work here. We must know the mode
1743 of the operands in order to do the comparison correctly.
1744 Assuming a full word can give incorrect results.
1745 Consider comparing 128 with -128 in QImode. */
1746 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1747 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1749 /* Make sure the constant is second. */
1750 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1751 && swap_commutative_operands_p (op0, op1))
1753 tem = op0, op0 = op1, op1 = tem;
1756 trueop0 = avoid_constant_pool_reference (op0);
1757 trueop1 = avoid_constant_pool_reference (op1);
1759 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1762 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1765 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1766 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1767 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1768 actual constants. */
1771 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1772 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1774 rtx tem, reversed, opleft, opright;
1776 unsigned int width = GET_MODE_BITSIZE (mode);
1778 /* Even if we can't compute a constant result,
1779 there are some cases worth simplifying. */
1784 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1785 when x is NaN, infinite, or finite and nonzero. They aren't
1786 when x is -0 and the rounding mode is not towards -infinity,
1787 since (-0) + 0 is then 0. */
1788 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1791 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1792 transformations are safe even for IEEE. */
1793 if (GET_CODE (op0) == NEG)
1794 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1795 else if (GET_CODE (op1) == NEG)
1796 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1798 /* (~a) + 1 -> -a */
1799 if (INTEGRAL_MODE_P (mode)
1800 && GET_CODE (op0) == NOT
1801 && trueop1 == const1_rtx)
1802 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1804 /* Handle both-operands-constant cases. We can only add
1805 CONST_INTs to constants since the sum of relocatable symbols
1806 can't be handled by most assemblers. Don't add CONST_INT
1807 to CONST_INT since overflow won't be computed properly if wider
1808 than HOST_BITS_PER_WIDE_INT. */
1810 if ((GET_CODE (op0) == CONST
1811 || GET_CODE (op0) == SYMBOL_REF
1812 || GET_CODE (op0) == LABEL_REF)
1813 && CONST_INT_P (op1))
1814 return plus_constant (op0, INTVAL (op1));
1815 else if ((GET_CODE (op1) == CONST
1816 || GET_CODE (op1) == SYMBOL_REF
1817 || GET_CODE (op1) == LABEL_REF)
1818 && CONST_INT_P (op0))
1819 return plus_constant (op1, INTVAL (op0));
1821 /* See if this is something like X * C - X or vice versa or
1822 if the multiplication is written as a shift. If so, we can
1823 distribute and make a new multiply, shift, or maybe just
1824 have X (if C is 2 in the example above). But don't make
1825 something more expensive than we had before. */
1827 if (SCALAR_INT_MODE_P (mode))
1829 double_int coeff0, coeff1;
1830 rtx lhs = op0, rhs = op1;
1832 coeff0 = double_int_one;
1833 coeff1 = double_int_one;
1835 if (GET_CODE (lhs) == NEG)
1837 coeff0 = double_int_minus_one;
1838 lhs = XEXP (lhs, 0);
1840 else if (GET_CODE (lhs) == MULT
1841 && CONST_INT_P (XEXP (lhs, 1)))
1843 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
1844 lhs = XEXP (lhs, 0);
1846 else if (GET_CODE (lhs) == ASHIFT
1847 && CONST_INT_P (XEXP (lhs, 1))
1848 && INTVAL (XEXP (lhs, 1)) >= 0
1849 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1851 coeff0 = double_int_setbit (double_int_zero,
1852 INTVAL (XEXP (lhs, 1)));
1853 lhs = XEXP (lhs, 0);
1856 if (GET_CODE (rhs) == NEG)
1858 coeff1 = double_int_minus_one;
1859 rhs = XEXP (rhs, 0);
1861 else if (GET_CODE (rhs) == MULT
1862 && CONST_INT_P (XEXP (rhs, 1)))
1864 coeff1 = shwi_to_double_int (INTVAL (XEXP (rhs, 1)));
1865 rhs = XEXP (rhs, 0);
1867 else if (GET_CODE (rhs) == ASHIFT
1868 && CONST_INT_P (XEXP (rhs, 1))
1869 && INTVAL (XEXP (rhs, 1)) >= 0
1870 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1872 coeff1 = double_int_setbit (double_int_zero,
1873 INTVAL (XEXP (rhs, 1)));
1874 rhs = XEXP (rhs, 0);
1877 if (rtx_equal_p (lhs, rhs))
1879 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1882 bool speed = optimize_function_for_speed_p (cfun);
1884 val = double_int_add (coeff0, coeff1);
1885 coeff = immed_double_int_const (val, mode);
1887 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1888 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
1893 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1894 if ((CONST_INT_P (op1)
1895 || GET_CODE (op1) == CONST_DOUBLE)
1896 && GET_CODE (op0) == XOR
1897 && (CONST_INT_P (XEXP (op0, 1))
1898 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1899 && mode_signbit_p (mode, op1))
1900 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1901 simplify_gen_binary (XOR, mode, op1,
1904 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1905 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
1906 && GET_CODE (op0) == MULT
1907 && GET_CODE (XEXP (op0, 0)) == NEG)
1911 in1 = XEXP (XEXP (op0, 0), 0);
1912 in2 = XEXP (op0, 1);
1913 return simplify_gen_binary (MINUS, mode, op1,
1914 simplify_gen_binary (MULT, mode,
1918 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1919 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1921 if (COMPARISON_P (op0)
1922 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1923 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1924 && (reversed = reversed_comparison (op0, mode)))
1926 simplify_gen_unary (NEG, mode, reversed, mode);
1928 /* If one of the operands is a PLUS or a MINUS, see if we can
1929 simplify this by the associative law.
1930 Don't use the associative law for floating point.
1931 The inaccuracy makes it nonassociative,
1932 and subtle programs can break if operations are associated. */
1934 if (INTEGRAL_MODE_P (mode)
1935 && (plus_minus_operand_p (op0)
1936 || plus_minus_operand_p (op1))
1937 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1940 /* Reassociate floating point addition only when the user
1941 specifies associative math operations. */
1942 if (FLOAT_MODE_P (mode)
1943 && flag_associative_math)
1945 tem = simplify_associative_operation (code, mode, op0, op1);
1952 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1953 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1954 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1955 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1957 rtx xop00 = XEXP (op0, 0);
1958 rtx xop10 = XEXP (op1, 0);
1961 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1963 if (REG_P (xop00) && REG_P (xop10)
1964 && GET_MODE (xop00) == GET_MODE (xop10)
1965 && REGNO (xop00) == REGNO (xop10)
1966 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1967 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1974 /* We can't assume x-x is 0 even with non-IEEE floating point,
1975 but since it is zero except in very strange circumstances, we
1976 will treat it as zero with -ffinite-math-only. */
1977 if (rtx_equal_p (trueop0, trueop1)
1978 && ! side_effects_p (op0)
1979 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
1980 return CONST0_RTX (mode);
1982 /* Change subtraction from zero into negation. (0 - x) is the
1983 same as -x when x is NaN, infinite, or finite and nonzero.
1984 But if the mode has signed zeros, and does not round towards
1985 -infinity, then 0 - 0 is 0, not -0. */
1986 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1987 return simplify_gen_unary (NEG, mode, op1, mode);
1989 /* (-1 - a) is ~a. */
1990 if (trueop0 == constm1_rtx)
1991 return simplify_gen_unary (NOT, mode, op1, mode);
1993 /* Subtracting 0 has no effect unless the mode has signed zeros
1994 and supports rounding towards -infinity. In such a case,
1996 if (!(HONOR_SIGNED_ZEROS (mode)
1997 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1998 && trueop1 == CONST0_RTX (mode))
2001 /* See if this is something like X * C - X or vice versa or
2002 if the multiplication is written as a shift. If so, we can
2003 distribute and make a new multiply, shift, or maybe just
2004 have X (if C is 2 in the example above). But don't make
2005 something more expensive than we had before. */
2007 if (SCALAR_INT_MODE_P (mode))
2009 double_int coeff0, negcoeff1;
2010 rtx lhs = op0, rhs = op1;
2012 coeff0 = double_int_one;
2013 negcoeff1 = double_int_minus_one;
2015 if (GET_CODE (lhs) == NEG)
2017 coeff0 = double_int_minus_one;
2018 lhs = XEXP (lhs, 0);
2020 else if (GET_CODE (lhs) == MULT
2021 && CONST_INT_P (XEXP (lhs, 1)))
2023 coeff0 = shwi_to_double_int (INTVAL (XEXP (lhs, 1)));
2024 lhs = XEXP (lhs, 0);
2026 else if (GET_CODE (lhs) == ASHIFT
2027 && CONST_INT_P (XEXP (lhs, 1))
2028 && INTVAL (XEXP (lhs, 1)) >= 0
2029 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
2031 coeff0 = double_int_setbit (double_int_zero,
2032 INTVAL (XEXP (lhs, 1)));
2033 lhs = XEXP (lhs, 0);
2036 if (GET_CODE (rhs) == NEG)
2038 negcoeff1 = double_int_one;
2039 rhs = XEXP (rhs, 0);
2041 else if (GET_CODE (rhs) == MULT
2042 && CONST_INT_P (XEXP (rhs, 1)))
2044 negcoeff1 = shwi_to_double_int (-INTVAL (XEXP (rhs, 1)));
2045 rhs = XEXP (rhs, 0);
2047 else if (GET_CODE (rhs) == ASHIFT
2048 && CONST_INT_P (XEXP (rhs, 1))
2049 && INTVAL (XEXP (rhs, 1)) >= 0
2050 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
2052 negcoeff1 = double_int_setbit (double_int_zero,
2053 INTVAL (XEXP (rhs, 1)));
2054 negcoeff1 = double_int_neg (negcoeff1);
2055 rhs = XEXP (rhs, 0);
2058 if (rtx_equal_p (lhs, rhs))
2060 rtx orig = gen_rtx_MINUS (mode, op0, op1);
2063 bool speed = optimize_function_for_speed_p (cfun);
2065 val = double_int_add (coeff0, negcoeff1);
2066 coeff = immed_double_int_const (val, mode);
2068 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
2069 return rtx_cost (tem, SET, speed) <= rtx_cost (orig, SET, speed)
2074 /* (a - (-b)) -> (a + b). True even for IEEE. */
2075 if (GET_CODE (op1) == NEG)
2076 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2078 /* (-x - c) may be simplified as (-c - x). */
2079 if (GET_CODE (op0) == NEG
2080 && (CONST_INT_P (op1)
2081 || GET_CODE (op1) == CONST_DOUBLE))
2083 tem = simplify_unary_operation (NEG, mode, op1, mode);
2085 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2088 /* Don't let a relocatable value get a negative coeff. */
2089 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2090 return simplify_gen_binary (PLUS, mode,
2092 neg_const_int (mode, op1));
2094 /* (x - (x & y)) -> (x & ~y) */
2095 if (GET_CODE (op1) == AND)
2097 if (rtx_equal_p (op0, XEXP (op1, 0)))
2099 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2100 GET_MODE (XEXP (op1, 1)));
2101 return simplify_gen_binary (AND, mode, op0, tem);
2103 if (rtx_equal_p (op0, XEXP (op1, 1)))
2105 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2106 GET_MODE (XEXP (op1, 0)));
2107 return simplify_gen_binary (AND, mode, op0, tem);
2111 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2112 by reversing the comparison code if valid. */
2113 if (STORE_FLAG_VALUE == 1
2114 && trueop0 == const1_rtx
2115 && COMPARISON_P (op1)
2116 && (reversed = reversed_comparison (op1, mode)))
2119 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2120 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2121 && GET_CODE (op1) == MULT
2122 && GET_CODE (XEXP (op1, 0)) == NEG)
2126 in1 = XEXP (XEXP (op1, 0), 0);
2127 in2 = XEXP (op1, 1);
2128 return simplify_gen_binary (PLUS, mode,
2129 simplify_gen_binary (MULT, mode,
2134 /* Canonicalize (minus (neg A) (mult B C)) to
2135 (minus (mult (neg B) C) A). */
2136 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2137 && GET_CODE (op1) == MULT
2138 && GET_CODE (op0) == NEG)
2142 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2143 in2 = XEXP (op1, 1);
2144 return simplify_gen_binary (MINUS, mode,
2145 simplify_gen_binary (MULT, mode,
2150 /* If one of the operands is a PLUS or a MINUS, see if we can
2151 simplify this by the associative law. This will, for example,
2152 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2153 Don't use the associative law for floating point.
2154 The inaccuracy makes it nonassociative,
2155 and subtle programs can break if operations are associated. */
2157 if (INTEGRAL_MODE_P (mode)
2158 && (plus_minus_operand_p (op0)
2159 || plus_minus_operand_p (op1))
2160 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2165 if (trueop1 == constm1_rtx)
2166 return simplify_gen_unary (NEG, mode, op0, mode);
2168 if (GET_CODE (op0) == NEG)
2170 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2172 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2174 if (GET_CODE (op1) == NEG)
2176 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2178 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2181 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2182 x is NaN, since x * 0 is then also NaN. Nor is it valid
2183 when the mode has signed zeros, since multiplying a negative
2184 number by 0 will give -0, not 0. */
2185 if (!HONOR_NANS (mode)
2186 && !HONOR_SIGNED_ZEROS (mode)
2187 && trueop1 == CONST0_RTX (mode)
2188 && ! side_effects_p (op0))
2191 /* In IEEE floating point, x*1 is not equivalent to x for
2193 if (!HONOR_SNANS (mode)
2194 && trueop1 == CONST1_RTX (mode))
2197 /* Convert multiply by constant power of two into shift unless
2198 we are still generating RTL. This test is a kludge. */
2199 if (CONST_INT_P (trueop1)
2200 && (val = exact_log2 (INTVAL (trueop1))) >= 0
2201 /* If the mode is larger than the host word size, and the
2202 uppermost bit is set, then this isn't a power of two due
2203 to implicit sign extension. */
2204 && (width <= HOST_BITS_PER_WIDE_INT
2205 || val != HOST_BITS_PER_WIDE_INT - 1))
2206 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2208 /* Likewise for multipliers wider than a word. */
2209 if (GET_CODE (trueop1) == CONST_DOUBLE
2210 && (GET_MODE (trueop1) == VOIDmode
2211 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
2212 && GET_MODE (op0) == mode
2213 && CONST_DOUBLE_LOW (trueop1) == 0
2214 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
2215 return simplify_gen_binary (ASHIFT, mode, op0,
2216 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
2218 /* x*2 is x+x and x*(-1) is -x */
2219 if (GET_CODE (trueop1) == CONST_DOUBLE
2220 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2221 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2222 && GET_MODE (op0) == mode)
2225 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2227 if (REAL_VALUES_EQUAL (d, dconst2))
2228 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2230 if (!HONOR_SNANS (mode)
2231 && REAL_VALUES_EQUAL (d, dconstm1))
2232 return simplify_gen_unary (NEG, mode, op0, mode);
2235 /* Optimize -x * -x as x * x. */
2236 if (FLOAT_MODE_P (mode)
2237 && GET_CODE (op0) == NEG
2238 && GET_CODE (op1) == NEG
2239 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2240 && !side_effects_p (XEXP (op0, 0)))
2241 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2243 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2244 if (SCALAR_FLOAT_MODE_P (mode)
2245 && GET_CODE (op0) == ABS
2246 && GET_CODE (op1) == ABS
2247 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2248 && !side_effects_p (XEXP (op0, 0)))
2249 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2251 /* Reassociate multiplication, but for floating point MULTs
2252 only when the user specifies unsafe math optimizations. */
2253 if (! FLOAT_MODE_P (mode)
2254 || flag_unsafe_math_optimizations)
2256 tem = simplify_associative_operation (code, mode, op0, op1);
2263 if (trueop1 == CONST0_RTX (mode))
2265 if (CONST_INT_P (trueop1)
2266 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2267 == GET_MODE_MASK (mode)))
2269 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2271 /* A | (~A) -> -1 */
2272 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2273 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2274 && ! side_effects_p (op0)
2275 && SCALAR_INT_MODE_P (mode))
2278 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2279 if (CONST_INT_P (op1)
2280 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2281 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
2284 /* Canonicalize (X & C1) | C2. */
2285 if (GET_CODE (op0) == AND
2286 && CONST_INT_P (trueop1)
2287 && CONST_INT_P (XEXP (op0, 1)))
2289 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2290 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2291 HOST_WIDE_INT c2 = INTVAL (trueop1);
2293 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2295 && !side_effects_p (XEXP (op0, 0)))
2298 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2299 if (((c1|c2) & mask) == mask)
2300 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2302 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2303 if (((c1 & ~c2) & mask) != (c1 & mask))
2305 tem = simplify_gen_binary (AND, mode, XEXP (op0, 0),
2306 gen_int_mode (c1 & ~c2, mode));
2307 return simplify_gen_binary (IOR, mode, tem, op1);
2311 /* Convert (A & B) | A to A. */
2312 if (GET_CODE (op0) == AND
2313 && (rtx_equal_p (XEXP (op0, 0), op1)
2314 || rtx_equal_p (XEXP (op0, 1), op1))
2315 && ! side_effects_p (XEXP (op0, 0))
2316 && ! side_effects_p (XEXP (op0, 1)))
2319 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2320 mode size to (rotate A CX). */
2322 if (GET_CODE (op1) == ASHIFT
2323 || GET_CODE (op1) == SUBREG)
2334 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2335 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2336 && CONST_INT_P (XEXP (opleft, 1))
2337 && CONST_INT_P (XEXP (opright, 1))
2338 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2339 == GET_MODE_BITSIZE (mode)))
2340 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2342 /* Same, but for ashift that has been "simplified" to a wider mode
2343 by simplify_shift_const. */
2345 if (GET_CODE (opleft) == SUBREG
2346 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2347 && GET_CODE (opright) == LSHIFTRT
2348 && GET_CODE (XEXP (opright, 0)) == SUBREG
2349 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2350 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2351 && (GET_MODE_SIZE (GET_MODE (opleft))
2352 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2353 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2354 SUBREG_REG (XEXP (opright, 0)))
2355 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2356 && CONST_INT_P (XEXP (opright, 1))
2357 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2358 == GET_MODE_BITSIZE (mode)))
2359 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2360 XEXP (SUBREG_REG (opleft), 1));
2362 /* If we have (ior (and (X C1) C2)), simplify this by making
2363 C1 as small as possible if C1 actually changes. */
2364 if (CONST_INT_P (op1)
2365 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2366 || INTVAL (op1) > 0)
2367 && GET_CODE (op0) == AND
2368 && CONST_INT_P (XEXP (op0, 1))
2369 && CONST_INT_P (op1)
2370 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2371 return simplify_gen_binary (IOR, mode,
2373 (AND, mode, XEXP (op0, 0),
2374 GEN_INT (INTVAL (XEXP (op0, 1))
2378 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2379 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2380 the PLUS does not affect any of the bits in OP1: then we can do
2381 the IOR as a PLUS and we can associate. This is valid if OP1
2382 can be safely shifted left C bits. */
2383 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2384 && GET_CODE (XEXP (op0, 0)) == PLUS
2385 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2386 && CONST_INT_P (XEXP (op0, 1))
2387 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2389 int count = INTVAL (XEXP (op0, 1));
2390 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2392 if (mask >> count == INTVAL (trueop1)
2393 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2394 return simplify_gen_binary (ASHIFTRT, mode,
2395 plus_constant (XEXP (op0, 0), mask),
2399 tem = simplify_associative_operation (code, mode, op0, op1);
2405 if (trueop1 == CONST0_RTX (mode))
2407 if (CONST_INT_P (trueop1)
2408 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2409 == GET_MODE_MASK (mode)))
2410 return simplify_gen_unary (NOT, mode, op0, mode);
2411 if (rtx_equal_p (trueop0, trueop1)
2412 && ! side_effects_p (op0)
2413 && GET_MODE_CLASS (mode) != MODE_CC)
2414 return CONST0_RTX (mode);
2416 /* Canonicalize XOR of the most significant bit to PLUS. */
2417 if ((CONST_INT_P (op1)
2418 || GET_CODE (op1) == CONST_DOUBLE)
2419 && mode_signbit_p (mode, op1))
2420 return simplify_gen_binary (PLUS, mode, op0, op1);
2421 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2422 if ((CONST_INT_P (op1)
2423 || GET_CODE (op1) == CONST_DOUBLE)
2424 && GET_CODE (op0) == PLUS
2425 && (CONST_INT_P (XEXP (op0, 1))
2426 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2427 && mode_signbit_p (mode, XEXP (op0, 1)))
2428 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2429 simplify_gen_binary (XOR, mode, op1,
2432 /* If we are XORing two things that have no bits in common,
2433 convert them into an IOR. This helps to detect rotation encoded
2434 using those methods and possibly other simplifications. */
2436 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2437 && (nonzero_bits (op0, mode)
2438 & nonzero_bits (op1, mode)) == 0)
2439 return (simplify_gen_binary (IOR, mode, op0, op1));
2441 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2442 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2445 int num_negated = 0;
2447 if (GET_CODE (op0) == NOT)
2448 num_negated++, op0 = XEXP (op0, 0);
2449 if (GET_CODE (op1) == NOT)
2450 num_negated++, op1 = XEXP (op1, 0);
2452 if (num_negated == 2)
2453 return simplify_gen_binary (XOR, mode, op0, op1);
2454 else if (num_negated == 1)
2455 return simplify_gen_unary (NOT, mode,
2456 simplify_gen_binary (XOR, mode, op0, op1),
2460 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2461 correspond to a machine insn or result in further simplifications
2462 if B is a constant. */
2464 if (GET_CODE (op0) == AND
2465 && rtx_equal_p (XEXP (op0, 1), op1)
2466 && ! side_effects_p (op1))
2467 return simplify_gen_binary (AND, mode,
2468 simplify_gen_unary (NOT, mode,
2469 XEXP (op0, 0), mode),
2472 else if (GET_CODE (op0) == AND
2473 && rtx_equal_p (XEXP (op0, 0), op1)
2474 && ! side_effects_p (op1))
2475 return simplify_gen_binary (AND, mode,
2476 simplify_gen_unary (NOT, mode,
2477 XEXP (op0, 1), mode),
2480 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2481 comparison if STORE_FLAG_VALUE is 1. */
2482 if (STORE_FLAG_VALUE == 1
2483 && trueop1 == const1_rtx
2484 && COMPARISON_P (op0)
2485 && (reversed = reversed_comparison (op0, mode)))
2488 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2489 is (lt foo (const_int 0)), so we can perform the above
2490 simplification if STORE_FLAG_VALUE is 1. */
2492 if (STORE_FLAG_VALUE == 1
2493 && trueop1 == const1_rtx
2494 && GET_CODE (op0) == LSHIFTRT
2495 && CONST_INT_P (XEXP (op0, 1))
2496 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2497 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2499 /* (xor (comparison foo bar) (const_int sign-bit))
2500 when STORE_FLAG_VALUE is the sign bit. */
2501 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2502 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2503 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2504 && trueop1 == const_true_rtx
2505 && COMPARISON_P (op0)
2506 && (reversed = reversed_comparison (op0, mode)))
2509 tem = simplify_associative_operation (code, mode, op0, op1);
2515 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2517 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
2519 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
2520 HOST_WIDE_INT nzop1;
2521 if (CONST_INT_P (trueop1))
2523 HOST_WIDE_INT val1 = INTVAL (trueop1);
2524 /* If we are turning off bits already known off in OP0, we need
2526 if ((nzop0 & ~val1) == 0)
2529 nzop1 = nonzero_bits (trueop1, mode);
2530 /* If we are clearing all the nonzero bits, the result is zero. */
2531 if ((nzop1 & nzop0) == 0
2532 && !side_effects_p (op0) && !side_effects_p (op1))
2533 return CONST0_RTX (mode);
2535 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2536 && GET_MODE_CLASS (mode) != MODE_CC)
2539 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2540 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2541 && ! side_effects_p (op0)
2542 && GET_MODE_CLASS (mode) != MODE_CC)
2543 return CONST0_RTX (mode);
2545 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2546 there are no nonzero bits of C outside of X's mode. */
2547 if ((GET_CODE (op0) == SIGN_EXTEND
2548 || GET_CODE (op0) == ZERO_EXTEND)
2549 && CONST_INT_P (trueop1)
2550 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2551 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2552 & INTVAL (trueop1)) == 0)
2554 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2555 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2556 gen_int_mode (INTVAL (trueop1),
2558 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2561 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2562 we might be able to further simplify the AND with X and potentially
2563 remove the truncation altogether. */
2564 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
2566 rtx x = XEXP (op0, 0);
2567 enum machine_mode xmode = GET_MODE (x);
2568 tem = simplify_gen_binary (AND, xmode, x,
2569 gen_int_mode (INTVAL (trueop1), xmode));
2570 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
2573 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2574 if (GET_CODE (op0) == IOR
2575 && CONST_INT_P (trueop1)
2576 && CONST_INT_P (XEXP (op0, 1)))
2578 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
2579 return simplify_gen_binary (IOR, mode,
2580 simplify_gen_binary (AND, mode,
2581 XEXP (op0, 0), op1),
2582 gen_int_mode (tmp, mode));
2585 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2586 insn (and may simplify more). */
2587 if (GET_CODE (op0) == XOR
2588 && rtx_equal_p (XEXP (op0, 0), op1)
2589 && ! side_effects_p (op1))
2590 return simplify_gen_binary (AND, mode,
2591 simplify_gen_unary (NOT, mode,
2592 XEXP (op0, 1), mode),
2595 if (GET_CODE (op0) == XOR
2596 && rtx_equal_p (XEXP (op0, 1), op1)
2597 && ! side_effects_p (op1))
2598 return simplify_gen_binary (AND, mode,
2599 simplify_gen_unary (NOT, mode,
2600 XEXP (op0, 0), mode),
2603 /* Similarly for (~(A ^ B)) & A. */
2604 if (GET_CODE (op0) == NOT
2605 && GET_CODE (XEXP (op0, 0)) == XOR
2606 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2607 && ! side_effects_p (op1))
2608 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2610 if (GET_CODE (op0) == NOT
2611 && GET_CODE (XEXP (op0, 0)) == XOR
2612 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2613 && ! side_effects_p (op1))
2614 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2616 /* Convert (A | B) & A to A. */
2617 if (GET_CODE (op0) == IOR
2618 && (rtx_equal_p (XEXP (op0, 0), op1)
2619 || rtx_equal_p (XEXP (op0, 1), op1))
2620 && ! side_effects_p (XEXP (op0, 0))
2621 && ! side_effects_p (XEXP (op0, 1)))
2624 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2625 ((A & N) + B) & M -> (A + B) & M
2626 Similarly if (N & M) == 0,
2627 ((A | N) + B) & M -> (A + B) & M
2628 and for - instead of + and/or ^ instead of |.
2629 Also, if (N & M) == 0, then
2630 (A +- N) & M -> A & M. */
2631 if (CONST_INT_P (trueop1)
2632 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2633 && ~INTVAL (trueop1)
2634 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2635 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2640 pmop[0] = XEXP (op0, 0);
2641 pmop[1] = XEXP (op0, 1);
2643 if (CONST_INT_P (pmop[1])
2644 && (INTVAL (pmop[1]) & INTVAL (trueop1)) == 0)
2645 return simplify_gen_binary (AND, mode, pmop[0], op1);
2647 for (which = 0; which < 2; which++)
2650 switch (GET_CODE (tem))
2653 if (CONST_INT_P (XEXP (tem, 1))
2654 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2655 == INTVAL (trueop1))
2656 pmop[which] = XEXP (tem, 0);
2660 if (CONST_INT_P (XEXP (tem, 1))
2661 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2662 pmop[which] = XEXP (tem, 0);
2669 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2671 tem = simplify_gen_binary (GET_CODE (op0), mode,
2673 return simplify_gen_binary (code, mode, tem, op1);
2677 /* (and X (ior (not X) Y) -> (and X Y) */
2678 if (GET_CODE (op1) == IOR
2679 && GET_CODE (XEXP (op1, 0)) == NOT
2680 && op0 == XEXP (XEXP (op1, 0), 0))
2681 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
2683 /* (and (ior (not X) Y) X) -> (and X Y) */
2684 if (GET_CODE (op0) == IOR
2685 && GET_CODE (XEXP (op0, 0)) == NOT
2686 && op1 == XEXP (XEXP (op0, 0), 0))
2687 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
2689 tem = simplify_associative_operation (code, mode, op0, op1);
2695 /* 0/x is 0 (or x&0 if x has side-effects). */
2696 if (trueop0 == CONST0_RTX (mode))
2698 if (side_effects_p (op1))
2699 return simplify_gen_binary (AND, mode, op1, trueop0);
2703 if (trueop1 == CONST1_RTX (mode))
2704 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2705 /* Convert divide by power of two into shift. */
2706 if (CONST_INT_P (trueop1)
2707 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2708 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2712 /* Handle floating point and integers separately. */
2713 if (SCALAR_FLOAT_MODE_P (mode))
2715 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2716 safe for modes with NaNs, since 0.0 / 0.0 will then be
2717 NaN rather than 0.0. Nor is it safe for modes with signed
2718 zeros, since dividing 0 by a negative number gives -0.0 */
2719 if (trueop0 == CONST0_RTX (mode)
2720 && !HONOR_NANS (mode)
2721 && !HONOR_SIGNED_ZEROS (mode)
2722 && ! side_effects_p (op1))
2725 if (trueop1 == CONST1_RTX (mode)
2726 && !HONOR_SNANS (mode))
2729 if (GET_CODE (trueop1) == CONST_DOUBLE
2730 && trueop1 != CONST0_RTX (mode))
2733 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2736 if (REAL_VALUES_EQUAL (d, dconstm1)
2737 && !HONOR_SNANS (mode))
2738 return simplify_gen_unary (NEG, mode, op0, mode);
2740 /* Change FP division by a constant into multiplication.
2741 Only do this with -freciprocal-math. */
2742 if (flag_reciprocal_math
2743 && !REAL_VALUES_EQUAL (d, dconst0))
2745 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2746 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2747 return simplify_gen_binary (MULT, mode, op0, tem);
2753 /* 0/x is 0 (or x&0 if x has side-effects). */
2754 if (trueop0 == CONST0_RTX (mode))
2756 if (side_effects_p (op1))
2757 return simplify_gen_binary (AND, mode, op1, trueop0);
2761 if (trueop1 == CONST1_RTX (mode))
2762 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2764 if (trueop1 == constm1_rtx)
2766 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2767 return simplify_gen_unary (NEG, mode, x, mode);
2773 /* 0%x is 0 (or x&0 if x has side-effects). */
2774 if (trueop0 == CONST0_RTX (mode))
2776 if (side_effects_p (op1))
2777 return simplify_gen_binary (AND, mode, op1, trueop0);
2780 /* x%1 is 0 (of x&0 if x has side-effects). */
2781 if (trueop1 == CONST1_RTX (mode))
2783 if (side_effects_p (op0))
2784 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2785 return CONST0_RTX (mode);
2787 /* Implement modulus by power of two as AND. */
2788 if (CONST_INT_P (trueop1)
2789 && exact_log2 (INTVAL (trueop1)) > 0)
2790 return simplify_gen_binary (AND, mode, op0,
2791 GEN_INT (INTVAL (op1) - 1));
2795 /* 0%x is 0 (or x&0 if x has side-effects). */
2796 if (trueop0 == CONST0_RTX (mode))
2798 if (side_effects_p (op1))
2799 return simplify_gen_binary (AND, mode, op1, trueop0);
2802 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2803 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2805 if (side_effects_p (op0))
2806 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2807 return CONST0_RTX (mode);
2814 if (trueop1 == CONST0_RTX (mode))
2816 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2818 /* Rotating ~0 always results in ~0. */
2819 if (CONST_INT_P (trueop0) && width <= HOST_BITS_PER_WIDE_INT
2820 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2821 && ! side_effects_p (op1))
2824 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
2826 val = INTVAL (op1) & (GET_MODE_BITSIZE (mode) - 1);
2827 if (val != INTVAL (op1))
2828 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
2835 if (trueop1 == CONST0_RTX (mode))
2837 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2839 goto canonicalize_shift;
2842 if (trueop1 == CONST0_RTX (mode))
2844 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2846 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2847 if (GET_CODE (op0) == CLZ
2848 && CONST_INT_P (trueop1)
2849 && STORE_FLAG_VALUE == 1
2850 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2852 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2853 unsigned HOST_WIDE_INT zero_val = 0;
2855 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2856 && zero_val == GET_MODE_BITSIZE (imode)
2857 && INTVAL (trueop1) == exact_log2 (zero_val))
2858 return simplify_gen_relational (EQ, mode, imode,
2859 XEXP (op0, 0), const0_rtx);
2861 goto canonicalize_shift;
2864 if (width <= HOST_BITS_PER_WIDE_INT
2865 && CONST_INT_P (trueop1)
2866 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2867 && ! side_effects_p (op0))
2869 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2871 tem = simplify_associative_operation (code, mode, op0, op1);
2877 if (width <= HOST_BITS_PER_WIDE_INT
2878 && CONST_INT_P (trueop1)
2879 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2880 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2881 && ! side_effects_p (op0))
2883 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2885 tem = simplify_associative_operation (code, mode, op0, op1);
2891 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2893 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2895 tem = simplify_associative_operation (code, mode, op0, op1);
2901 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2903 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2905 tem = simplify_associative_operation (code, mode, op0, op1);
2918 /* ??? There are simplifications that can be done. */
2922 if (!VECTOR_MODE_P (mode))
2924 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2925 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2926 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2927 gcc_assert (XVECLEN (trueop1, 0) == 1);
2928 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
2930 if (GET_CODE (trueop0) == CONST_VECTOR)
2931 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2934 /* Extract a scalar element from a nested VEC_SELECT expression
2935 (with optional nested VEC_CONCAT expression). Some targets
2936 (i386) extract scalar element from a vector using chain of
2937 nested VEC_SELECT expressions. When input operand is a memory
2938 operand, this operation can be simplified to a simple scalar
2939 load from an offseted memory address. */
2940 if (GET_CODE (trueop0) == VEC_SELECT)
2942 rtx op0 = XEXP (trueop0, 0);
2943 rtx op1 = XEXP (trueop0, 1);
2945 enum machine_mode opmode = GET_MODE (op0);
2946 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
2947 int n_elts = GET_MODE_SIZE (opmode) / elt_size;
2949 int i = INTVAL (XVECEXP (trueop1, 0, 0));
2955 gcc_assert (GET_CODE (op1) == PARALLEL);
2956 gcc_assert (i < n_elts);
2958 /* Select element, pointed by nested selector. */
2959 elem = INTVAL (XVECEXP (op1, 0, i));
2961 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2962 if (GET_CODE (op0) == VEC_CONCAT)
2964 rtx op00 = XEXP (op0, 0);
2965 rtx op01 = XEXP (op0, 1);
2967 enum machine_mode mode00, mode01;
2968 int n_elts00, n_elts01;
2970 mode00 = GET_MODE (op00);
2971 mode01 = GET_MODE (op01);
2973 /* Find out number of elements of each operand. */
2974 if (VECTOR_MODE_P (mode00))
2976 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode00));
2977 n_elts00 = GET_MODE_SIZE (mode00) / elt_size;
2982 if (VECTOR_MODE_P (mode01))
2984 elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode01));
2985 n_elts01 = GET_MODE_SIZE (mode01) / elt_size;
2990 gcc_assert (n_elts == n_elts00 + n_elts01);
2992 /* Select correct operand of VEC_CONCAT
2993 and adjust selector. */
2994 if (elem < n_elts01)
3005 vec = rtvec_alloc (1);
3006 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3008 tmp = gen_rtx_fmt_ee (code, mode,
3009 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3012 if (GET_CODE (trueop0) == VEC_DUPLICATE
3013 && GET_MODE (XEXP (trueop0, 0)) == mode)
3014 return XEXP (trueop0, 0);
3018 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3019 gcc_assert (GET_MODE_INNER (mode)
3020 == GET_MODE_INNER (GET_MODE (trueop0)));
3021 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3023 if (GET_CODE (trueop0) == CONST_VECTOR)
3025 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3026 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3027 rtvec v = rtvec_alloc (n_elts);
3030 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3031 for (i = 0; i < n_elts; i++)
3033 rtx x = XVECEXP (trueop1, 0, i);
3035 gcc_assert (CONST_INT_P (x));
3036 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3040 return gen_rtx_CONST_VECTOR (mode, v);
3044 if (XVECLEN (trueop1, 0) == 1
3045 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3046 && GET_CODE (trueop0) == VEC_CONCAT)
3049 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
3051 /* Try to find the element in the VEC_CONCAT. */
3052 while (GET_MODE (vec) != mode
3053 && GET_CODE (vec) == VEC_CONCAT)
3055 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
3056 if (offset < vec_size)
3057 vec = XEXP (vec, 0);
3061 vec = XEXP (vec, 1);
3063 vec = avoid_constant_pool_reference (vec);
3066 if (GET_MODE (vec) == mode)
3073 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
3074 ? GET_MODE (trueop0)
3075 : GET_MODE_INNER (mode));
3076 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
3077 ? GET_MODE (trueop1)
3078 : GET_MODE_INNER (mode));
3080 gcc_assert (VECTOR_MODE_P (mode));
3081 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
3082 == GET_MODE_SIZE (mode));
3084 if (VECTOR_MODE_P (op0_mode))
3085 gcc_assert (GET_MODE_INNER (mode)
3086 == GET_MODE_INNER (op0_mode));
3088 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
3090 if (VECTOR_MODE_P (op1_mode))
3091 gcc_assert (GET_MODE_INNER (mode)
3092 == GET_MODE_INNER (op1_mode));
3094 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
3096 if ((GET_CODE (trueop0) == CONST_VECTOR
3097 || CONST_INT_P (trueop0)
3098 || GET_CODE (trueop0) == CONST_DOUBLE)
3099 && (GET_CODE (trueop1) == CONST_VECTOR
3100 || CONST_INT_P (trueop1)
3101 || GET_CODE (trueop1) == CONST_DOUBLE))
3103 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
3104 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3105 rtvec v = rtvec_alloc (n_elts);
3107 unsigned in_n_elts = 1;
3109 if (VECTOR_MODE_P (op0_mode))
3110 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
3111 for (i = 0; i < n_elts; i++)
3115 if (!VECTOR_MODE_P (op0_mode))
3116 RTVEC_ELT (v, i) = trueop0;
3118 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
3122 if (!VECTOR_MODE_P (op1_mode))
3123 RTVEC_ELT (v, i) = trueop1;
3125 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
3130 return gen_rtx_CONST_VECTOR (mode, v);
3143 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
3146 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
3148 unsigned int width = GET_MODE_BITSIZE (mode);
3150 if (VECTOR_MODE_P (mode)
3151 && code != VEC_CONCAT
3152 && GET_CODE (op0) == CONST_VECTOR
3153 && GET_CODE (op1) == CONST_VECTOR)
3155 unsigned n_elts = GET_MODE_NUNITS (mode);
3156 enum machine_mode op0mode = GET_MODE (op0);
3157 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
3158 enum machine_mode op1mode = GET_MODE (op1);
3159 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
3160 rtvec v = rtvec_alloc (n_elts);
3163 gcc_assert (op0_n_elts == n_elts);
3164 gcc_assert (op1_n_elts == n_elts);
3165 for (i = 0; i < n_elts; i++)
3167 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
3168 CONST_VECTOR_ELT (op0, i),
3169 CONST_VECTOR_ELT (op1, i));
3172 RTVEC_ELT (v, i) = x;
3175 return gen_rtx_CONST_VECTOR (mode, v);
3178 if (VECTOR_MODE_P (mode)
3179 && code == VEC_CONCAT
3180 && (CONST_INT_P (op0)
3181 || GET_CODE (op0) == CONST_DOUBLE
3182 || GET_CODE (op0) == CONST_FIXED)
3183 && (CONST_INT_P (op1)
3184 || GET_CODE (op1) == CONST_DOUBLE
3185 || GET_CODE (op1) == CONST_FIXED))
3187 unsigned n_elts = GET_MODE_NUNITS (mode);
3188 rtvec v = rtvec_alloc (n_elts);
3190 gcc_assert (n_elts >= 2);
3193 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
3194 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
3196 RTVEC_ELT (v, 0) = op0;
3197 RTVEC_ELT (v, 1) = op1;
3201 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3202 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
3205 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
3206 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
3207 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
3209 for (i = 0; i < op0_n_elts; ++i)
3210 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
3211 for (i = 0; i < op1_n_elts; ++i)
3212 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
3215 return gen_rtx_CONST_VECTOR (mode, v);
3218 if (SCALAR_FLOAT_MODE_P (mode)
3219 && GET_CODE (op0) == CONST_DOUBLE
3220 && GET_CODE (op1) == CONST_DOUBLE
3221 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
3232 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
3234 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
3236 for (i = 0; i < 4; i++)
3253 real_from_target (&r, tmp0, mode);
3254 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
3258 REAL_VALUE_TYPE f0, f1, value, result;
3261 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
3262 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
3263 real_convert (&f0, mode, &f0);
3264 real_convert (&f1, mode, &f1);
3266 if (HONOR_SNANS (mode)
3267 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
3271 && REAL_VALUES_EQUAL (f1, dconst0)
3272 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
3275 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3276 && flag_trapping_math
3277 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
3279 int s0 = REAL_VALUE_NEGATIVE (f0);
3280 int s1 = REAL_VALUE_NEGATIVE (f1);
3285 /* Inf + -Inf = NaN plus exception. */
3290 /* Inf - Inf = NaN plus exception. */
3295 /* Inf / Inf = NaN plus exception. */
3302 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
3303 && flag_trapping_math
3304 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
3305 || (REAL_VALUE_ISINF (f1)
3306 && REAL_VALUES_EQUAL (f0, dconst0))))
3307 /* Inf * 0 = NaN plus exception. */
3310 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
3312 real_convert (&result, mode, &value);
3314 /* Don't constant fold this floating point operation if
3315 the result has overflowed and flag_trapping_math. */
3317 if (flag_trapping_math
3318 && MODE_HAS_INFINITIES (mode)
3319 && REAL_VALUE_ISINF (result)
3320 && !REAL_VALUE_ISINF (f0)
3321 && !REAL_VALUE_ISINF (f1))
3322 /* Overflow plus exception. */
3325 /* Don't constant fold this floating point operation if the
3326 result may dependent upon the run-time rounding mode and
3327 flag_rounding_math is set, or if GCC's software emulation
3328 is unable to accurately represent the result. */
3330 if ((flag_rounding_math
3331 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
3332 && (inexact || !real_identical (&result, &value)))
3335 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
3339 /* We can fold some multi-word operations. */
3340 if (GET_MODE_CLASS (mode) == MODE_INT
3341 && width == HOST_BITS_PER_DOUBLE_INT
3342 && (CONST_DOUBLE_P (op0) || CONST_INT_P (op0))
3343 && (CONST_DOUBLE_P (op1) || CONST_INT_P (op1)))
3345 double_int o0, o1, res, tmp;
3347 o0 = rtx_to_double_int (op0);
3348 o1 = rtx_to_double_int (op1);
3353 /* A - B == A + (-B). */
3354 o1 = double_int_neg (o1);
3356 /* Fall through.... */
3359 res = double_int_add (o0, o1);
3363 res = double_int_mul (o0, o1);
3367 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3368 o0.low, o0.high, o1.low, o1.high,
3369 &res.low, &res.high,
3370 &tmp.low, &tmp.high))
3375 if (div_and_round_double (TRUNC_DIV_EXPR, 0,
3376 o0.low, o0.high, o1.low, o1.high,
3377 &tmp.low, &tmp.high,
3378 &res.low, &res.high))
3383 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3384 o0.low, o0.high, o1.low, o1.high,
3385 &res.low, &res.high,
3386 &tmp.low, &tmp.high))
3391 if (div_and_round_double (TRUNC_DIV_EXPR, 1,
3392 o0.low, o0.high, o1.low, o1.high,
3393 &tmp.low, &tmp.high,
3394 &res.low, &res.high))
3399 res = double_int_and (o0, o1);
3403 res = double_int_ior (o0, o1);
3407 res = double_int_xor (o0, o1);
3411 res = double_int_smin (o0, o1);
3415 res = double_int_smax (o0, o1);
3419 res = double_int_umin (o0, o1);
3423 res = double_int_umax (o0, o1);
3426 case LSHIFTRT: case ASHIFTRT:
3428 case ROTATE: case ROTATERT:
3430 unsigned HOST_WIDE_INT cnt;
3432 if (SHIFT_COUNT_TRUNCATED)
3433 o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
3435 if (!double_int_fits_in_uhwi_p (o1)
3436 || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
3439 cnt = double_int_to_uhwi (o1);
3441 if (code == LSHIFTRT || code == ASHIFTRT)
3442 res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
3444 else if (code == ASHIFT)
3445 res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
3447 else if (code == ROTATE)
3448 res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3449 else /* code == ROTATERT */
3450 res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
3458 return immed_double_int_const (res, mode);
3461 if (CONST_INT_P (op0) && CONST_INT_P (op1)
3462 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3464 /* Get the integer argument values in two forms:
3465 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3467 arg0 = INTVAL (op0);
3468 arg1 = INTVAL (op1);
3470 if (width < HOST_BITS_PER_WIDE_INT)
3472 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3473 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3476 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3477 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3480 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3481 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3489 /* Compute the value of the arithmetic. */
3494 val = arg0s + arg1s;
3498 val = arg0s - arg1s;
3502 val = arg0s * arg1s;
3507 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3510 val = arg0s / arg1s;
3515 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3518 val = arg0s % arg1s;
3523 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3526 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3531 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3534 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3552 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3553 the value is in range. We can't return any old value for
3554 out-of-range arguments because either the middle-end (via
3555 shift_truncation_mask) or the back-end might be relying on
3556 target-specific knowledge. Nor can we rely on
3557 shift_truncation_mask, since the shift might not be part of an
3558 ashlM3, lshrM3 or ashrM3 instruction. */
3559 if (SHIFT_COUNT_TRUNCATED)
3560 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3561 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3564 val = (code == ASHIFT
3565 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3566 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3568 /* Sign-extend the result for arithmetic right shifts. */
3569 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3570 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3578 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3579 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3587 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3588 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3592 /* Do nothing here. */
3596 val = arg0s <= arg1s ? arg0s : arg1s;
3600 val = ((unsigned HOST_WIDE_INT) arg0
3601 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3605 val = arg0s > arg1s ? arg0s : arg1s;
3609 val = ((unsigned HOST_WIDE_INT) arg0
3610 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3623 /* ??? There are simplifications that can be done. */
3630 return gen_int_mode (val, mode);
3638 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3641 Rather than test for specific case, we do this by a brute-force method
3642 and do all possible simplifications until no more changes occur. Then
3643 we rebuild the operation. */
3645 struct simplify_plus_minus_op_data
3652 simplify_plus_minus_op_data_cmp (rtx x, rtx y)
3656 result = (commutative_operand_precedence (y)
3657 - commutative_operand_precedence (x));
3661 /* Group together equal REGs to do more simplification. */
3662 if (REG_P (x) && REG_P (y))
3663 return REGNO (x) > REGNO (y);
3669 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3672 struct simplify_plus_minus_op_data ops[8];
3674 int n_ops = 2, input_ops = 2;
3675 int changed, n_constants = 0, canonicalized = 0;
3678 memset (ops, 0, sizeof ops);
3680 /* Set up the two operands and then expand them until nothing has been
3681 changed. If we run out of room in our array, give up; this should
3682 almost never happen. */
3687 ops[1].neg = (code == MINUS);
3693 for (i = 0; i < n_ops; i++)
3695 rtx this_op = ops[i].op;
3696 int this_neg = ops[i].neg;
3697 enum rtx_code this_code = GET_CODE (this_op);
3706 ops[n_ops].op = XEXP (this_op, 1);
3707 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3710 ops[i].op = XEXP (this_op, 0);
3713 canonicalized |= this_neg;
3717 ops[i].op = XEXP (this_op, 0);
3718 ops[i].neg = ! this_neg;
3725 && GET_CODE (XEXP (this_op, 0)) == PLUS
3726 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3727 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3729 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3730 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3731 ops[n_ops].neg = this_neg;
3739 /* ~a -> (-a - 1) */
3742 ops[n_ops].op = constm1_rtx;
3743 ops[n_ops++].neg = this_neg;
3744 ops[i].op = XEXP (this_op, 0);
3745 ops[i].neg = !this_neg;
3755 ops[i].op = neg_const_int (mode, this_op);
3769 if (n_constants > 1)
3772 gcc_assert (n_ops >= 2);
3774 /* If we only have two operands, we can avoid the loops. */
3777 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3780 /* Get the two operands. Be careful with the order, especially for
3781 the cases where code == MINUS. */
3782 if (ops[0].neg && ops[1].neg)
3784 lhs = gen_rtx_NEG (mode, ops[0].op);
3787 else if (ops[0].neg)
3798 return simplify_const_binary_operation (code, mode, lhs, rhs);
3801 /* Now simplify each pair of operands until nothing changes. */
3804 /* Insertion sort is good enough for an eight-element array. */
3805 for (i = 1; i < n_ops; i++)
3807 struct simplify_plus_minus_op_data save;
3809 if (!simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op))
3815 ops[j + 1] = ops[j];
3816 while (j-- && simplify_plus_minus_op_data_cmp (ops[j].op, save.op));
3821 for (i = n_ops - 1; i > 0; i--)
3822 for (j = i - 1; j >= 0; j--)
3824 rtx lhs = ops[j].op, rhs = ops[i].op;
3825 int lneg = ops[j].neg, rneg = ops[i].neg;
3827 if (lhs != 0 && rhs != 0)
3829 enum rtx_code ncode = PLUS;
3835 tem = lhs, lhs = rhs, rhs = tem;
3837 else if (swap_commutative_operands_p (lhs, rhs))
3838 tem = lhs, lhs = rhs, rhs = tem;
3840 if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
3841 && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
3843 rtx tem_lhs, tem_rhs;
3845 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3846 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3847 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3849 if (tem && !CONSTANT_P (tem))
3850 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3853 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3855 /* Reject "simplifications" that just wrap the two
3856 arguments in a CONST. Failure to do so can result
3857 in infinite recursion with simplify_binary_operation
3858 when it calls us to simplify CONST operations. */
3860 && ! (GET_CODE (tem) == CONST
3861 && GET_CODE (XEXP (tem, 0)) == ncode
3862 && XEXP (XEXP (tem, 0), 0) == lhs
3863 && XEXP (XEXP (tem, 0), 1) == rhs))
3866 if (GET_CODE (tem) == NEG)
3867 tem = XEXP (tem, 0), lneg = !lneg;
3868 if (CONST_INT_P (tem) && lneg)
3869 tem = neg_const_int (mode, tem), lneg = 0;
3873 ops[j].op = NULL_RTX;
3880 /* If nothing changed, fail. */
3884 /* Pack all the operands to the lower-numbered entries. */
3885 for (i = 0, j = 0; j < n_ops; j++)
3895 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3897 && CONST_INT_P (ops[1].op)
3898 && CONSTANT_P (ops[0].op)
3900 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3902 /* We suppressed creation of trivial CONST expressions in the
3903 combination loop to avoid recursion. Create one manually now.
3904 The combination loop should have ensured that there is exactly
3905 one CONST_INT, and the sort will have ensured that it is last
3906 in the array and that any other constant will be next-to-last. */
3909 && CONST_INT_P (ops[n_ops - 1].op)
3910 && CONSTANT_P (ops[n_ops - 2].op))
3912 rtx value = ops[n_ops - 1].op;
3913 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3914 value = neg_const_int (mode, value);
3915 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3919 /* Put a non-negated operand first, if possible. */
3921 for (i = 0; i < n_ops && ops[i].neg; i++)
3924 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3933 /* Now make the result by performing the requested operations. */
3935 for (i = 1; i < n_ops; i++)
3936 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3937 mode, result, ops[i].op);
3942 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3944 plus_minus_operand_p (const_rtx x)
3946 return GET_CODE (x) == PLUS
3947 || GET_CODE (x) == MINUS
3948 || (GET_CODE (x) == CONST
3949 && GET_CODE (XEXP (x, 0)) == PLUS
3950 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3951 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3954 /* Like simplify_binary_operation except used for relational operators.
3955 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3956 not also be VOIDmode.
3958 CMP_MODE specifies in which mode the comparison is done in, so it is
3959 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3960 the operands or, if both are VOIDmode, the operands are compared in
3961 "infinite precision". */
3963 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3964 enum machine_mode cmp_mode, rtx op0, rtx op1)
3966 rtx tem, trueop0, trueop1;
3968 if (cmp_mode == VOIDmode)
3969 cmp_mode = GET_MODE (op0);
3970 if (cmp_mode == VOIDmode)
3971 cmp_mode = GET_MODE (op1);
3973 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3976 if (SCALAR_FLOAT_MODE_P (mode))
3978 if (tem == const0_rtx)
3979 return CONST0_RTX (mode);
3980 #ifdef FLOAT_STORE_FLAG_VALUE
3982 REAL_VALUE_TYPE val;
3983 val = FLOAT_STORE_FLAG_VALUE (mode);
3984 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3990 if (VECTOR_MODE_P (mode))
3992 if (tem == const0_rtx)
3993 return CONST0_RTX (mode);
3994 #ifdef VECTOR_STORE_FLAG_VALUE
3999 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
4000 if (val == NULL_RTX)
4002 if (val == const1_rtx)
4003 return CONST1_RTX (mode);
4005 units = GET_MODE_NUNITS (mode);
4006 v = rtvec_alloc (units);
4007 for (i = 0; i < units; i++)
4008 RTVEC_ELT (v, i) = val;
4009 return gen_rtx_raw_CONST_VECTOR (mode, v);
4019 /* For the following tests, ensure const0_rtx is op1. */
4020 if (swap_commutative_operands_p (op0, op1)
4021 || (op0 == const0_rtx && op1 != const0_rtx))
4022 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
4024 /* If op0 is a compare, extract the comparison arguments from it. */
4025 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4026 return simplify_gen_relational (code, mode, VOIDmode,
4027 XEXP (op0, 0), XEXP (op0, 1));
4029 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
4033 trueop0 = avoid_constant_pool_reference (op0);
4034 trueop1 = avoid_constant_pool_reference (op1);
4035 return simplify_relational_operation_1 (code, mode, cmp_mode,
4039 /* This part of simplify_relational_operation is only used when CMP_MODE
4040 is not in class MODE_CC (i.e. it is a real comparison).
4042 MODE is the mode of the result, while CMP_MODE specifies in which
4043 mode the comparison is done in, so it is the mode of the operands. */
4046 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
4047 enum machine_mode cmp_mode, rtx op0, rtx op1)
4049 enum rtx_code op0code = GET_CODE (op0);
4051 if (op1 == const0_rtx && COMPARISON_P (op0))
4053 /* If op0 is a comparison, extract the comparison arguments
4057 if (GET_MODE (op0) == mode)
4058 return simplify_rtx (op0);
4060 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
4061 XEXP (op0, 0), XEXP (op0, 1));
4063 else if (code == EQ)
4065 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
4066 if (new_code != UNKNOWN)
4067 return simplify_gen_relational (new_code, mode, VOIDmode,
4068 XEXP (op0, 0), XEXP (op0, 1));
4072 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4073 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4074 if ((code == LTU || code == GEU)
4075 && GET_CODE (op0) == PLUS
4076 && CONST_INT_P (XEXP (op0, 1))
4077 && (rtx_equal_p (op1, XEXP (op0, 0))
4078 || rtx_equal_p (op1, XEXP (op0, 1))))
4081 = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
4082 return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
4083 cmp_mode, XEXP (op0, 0), new_cmp);
4086 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4087 if ((code == LTU || code == GEU)
4088 && GET_CODE (op0) == PLUS
4089 && rtx_equal_p (op1, XEXP (op0, 1))
4090 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4091 && !rtx_equal_p (op1, XEXP (op0, 0)))
4092 return simplify_gen_relational (code, mode, cmp_mode, op0,
4093 copy_rtx (XEXP (op0, 0)));
4095 if (op1 == const0_rtx)
4097 /* Canonicalize (GTU x 0) as (NE x 0). */
4099 return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
4100 /* Canonicalize (LEU x 0) as (EQ x 0). */
4102 return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
4104 else if (op1 == const1_rtx)
4109 /* Canonicalize (GE x 1) as (GT x 0). */
4110 return simplify_gen_relational (GT, mode, cmp_mode,
4113 /* Canonicalize (GEU x 1) as (NE x 0). */
4114 return simplify_gen_relational (NE, mode, cmp_mode,
4117 /* Canonicalize (LT x 1) as (LE x 0). */
4118 return simplify_gen_relational (LE, mode, cmp_mode,
4121 /* Canonicalize (LTU x 1) as (EQ x 0). */
4122 return simplify_gen_relational (EQ, mode, cmp_mode,
4128 else if (op1 == constm1_rtx)
4130 /* Canonicalize (LE x -1) as (LT x 0). */
4132 return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
4133 /* Canonicalize (GT x -1) as (GE x 0). */
4135 return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
4138 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4139 if ((code == EQ || code == NE)
4140 && (op0code == PLUS || op0code == MINUS)
4142 && CONSTANT_P (XEXP (op0, 1))
4143 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
4145 rtx x = XEXP (op0, 0);
4146 rtx c = XEXP (op0, 1);
4148 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
4150 return simplify_gen_relational (code, mode, cmp_mode, x, c);
4153 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4154 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4156 && op1 == const0_rtx
4157 && GET_MODE_CLASS (mode) == MODE_INT
4158 && cmp_mode != VOIDmode
4159 /* ??? Work-around BImode bugs in the ia64 backend. */
4161 && cmp_mode != BImode
4162 && nonzero_bits (op0, cmp_mode) == 1
4163 && STORE_FLAG_VALUE == 1)
4164 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
4165 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
4166 : lowpart_subreg (mode, op0, cmp_mode);
4168 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4169 if ((code == EQ || code == NE)
4170 && op1 == const0_rtx
4172 return simplify_gen_relational (code, mode, cmp_mode,
4173 XEXP (op0, 0), XEXP (op0, 1));
4175 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4176 if ((code == EQ || code == NE)
4178 && rtx_equal_p (XEXP (op0, 0), op1)
4179 && !side_effects_p (XEXP (op0, 0)))
4180 return simplify_gen_relational (code, mode, cmp_mode,
4181 XEXP (op0, 1), const0_rtx);
4183 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4184 if ((code == EQ || code == NE)
4186 && rtx_equal_p (XEXP (op0, 1), op1)
4187 && !side_effects_p (XEXP (op0, 1)))
4188 return simplify_gen_relational (code, mode, cmp_mode,
4189 XEXP (op0, 0), const0_rtx);
4191 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4192 if ((code == EQ || code == NE)
4194 && (CONST_INT_P (op1)
4195 || GET_CODE (op1) == CONST_DOUBLE)
4196 && (CONST_INT_P (XEXP (op0, 1))
4197 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
4198 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
4199 simplify_gen_binary (XOR, cmp_mode,
4200 XEXP (op0, 1), op1));
4202 if (op0code == POPCOUNT && op1 == const0_rtx)
4208 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4209 return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
4210 XEXP (op0, 0), const0_rtx);
4215 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4216 return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
4217 XEXP (op0, 0), const0_rtx);
4236 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4237 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4238 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4239 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4240 For floating-point comparisons, assume that the operands were ordered. */
4243 comparison_result (enum rtx_code code, int known_results)
4249 return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
4252 return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
4256 return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
4259 return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
4263 return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
4266 return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
4269 return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
4271 return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
4274 return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
4276 return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
4279 return const_true_rtx;
4287 /* Check if the given comparison (done in the given MODE) is actually a
4288 tautology or a contradiction.
4289 If no simplification is possible, this function returns zero.
4290 Otherwise, it returns either const_true_rtx or const0_rtx. */
4293 simplify_const_relational_operation (enum rtx_code code,
4294 enum machine_mode mode,
4301 gcc_assert (mode != VOIDmode
4302 || (GET_MODE (op0) == VOIDmode
4303 && GET_MODE (op1) == VOIDmode));
4305 /* If op0 is a compare, extract the comparison arguments from it. */
4306 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
4308 op1 = XEXP (op0, 1);
4309 op0 = XEXP (op0, 0);
4311 if (GET_MODE (op0) != VOIDmode)
4312 mode = GET_MODE (op0);
4313 else if (GET_MODE (op1) != VOIDmode)
4314 mode = GET_MODE (op1);
4319 /* We can't simplify MODE_CC values since we don't know what the
4320 actual comparison is. */
4321 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
4324 /* Make sure the constant is second. */
4325 if (swap_commutative_operands_p (op0, op1))
4327 tem = op0, op0 = op1, op1 = tem;
4328 code = swap_condition (code);
4331 trueop0 = avoid_constant_pool_reference (op0);
4332 trueop1 = avoid_constant_pool_reference (op1);
4334 /* For integer comparisons of A and B maybe we can simplify A - B and can
4335 then simplify a comparison of that with zero. If A and B are both either
4336 a register or a CONST_INT, this can't help; testing for these cases will
4337 prevent infinite recursion here and speed things up.
4339 We can only do this for EQ and NE comparisons as otherwise we may
4340 lose or introduce overflow which we cannot disregard as undefined as
4341 we do not know the signedness of the operation on either the left or
4342 the right hand side of the comparison. */
4344 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
4345 && (code == EQ || code == NE)
4346 && ! ((REG_P (op0) || CONST_INT_P (trueop0))
4347 && (REG_P (op1) || CONST_INT_P (trueop1)))
4348 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
4349 /* We cannot do this if tem is a nonzero address. */
4350 && ! nonzero_address_p (tem))
4351 return simplify_const_relational_operation (signed_condition (code),
4352 mode, tem, const0_rtx);
4354 if (! HONOR_NANS (mode) && code == ORDERED)
4355 return const_true_rtx;
4357 if (! HONOR_NANS (mode) && code == UNORDERED)
4360 /* For modes without NaNs, if the two operands are equal, we know the
4361 result except if they have side-effects. Even with NaNs we know
4362 the result of unordered comparisons and, if signaling NaNs are
4363 irrelevant, also the result of LT/GT/LTGT. */
4364 if ((! HONOR_NANS (GET_MODE (trueop0))
4365 || code == UNEQ || code == UNLE || code == UNGE
4366 || ((code == LT || code == GT || code == LTGT)
4367 && ! HONOR_SNANS (GET_MODE (trueop0))))
4368 && rtx_equal_p (trueop0, trueop1)
4369 && ! side_effects_p (trueop0))
4370 return comparison_result (code, CMP_EQ);
4372 /* If the operands are floating-point constants, see if we can fold
4374 if (GET_CODE (trueop0) == CONST_DOUBLE
4375 && GET_CODE (trueop1) == CONST_DOUBLE
4376 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
4378 REAL_VALUE_TYPE d0, d1;
4380 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
4381 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
4383 /* Comparisons are unordered iff at least one of the values is NaN. */
4384 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
4394 return const_true_rtx;
4407 return comparison_result (code,
4408 (REAL_VALUES_EQUAL (d0, d1) ? CMP_EQ :
4409 REAL_VALUES_LESS (d0, d1) ? CMP_LT : CMP_GT));
4412 /* Otherwise, see if the operands are both integers. */
4413 if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
4414 && (GET_CODE (trueop0) == CONST_DOUBLE
4415 || CONST_INT_P (trueop0))
4416 && (GET_CODE (trueop1) == CONST_DOUBLE
4417 || CONST_INT_P (trueop1)))
4419 int width = GET_MODE_BITSIZE (mode);
4420 HOST_WIDE_INT l0s, h0s, l1s, h1s;
4421 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
4423 /* Get the two words comprising each integer constant. */
4424 if (GET_CODE (trueop0) == CONST_DOUBLE)
4426 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
4427 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
4431 l0u = l0s = INTVAL (trueop0);
4432 h0u = h0s = HWI_SIGN_EXTEND (l0s);
4435 if (GET_CODE (trueop1) == CONST_DOUBLE)
4437 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
4438 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
4442 l1u = l1s = INTVAL (trueop1);
4443 h1u = h1s = HWI_SIGN_EXTEND (l1s);
4446 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4447 we have to sign or zero-extend the values. */
4448 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
4450 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
4451 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
4453 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
4454 l0s |= ((HOST_WIDE_INT) (-1) << width);
4456 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
4457 l1s |= ((HOST_WIDE_INT) (-1) << width);
4459 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
4460 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
4462 if (h0u == h1u && l0u == l1u)
4463 return comparison_result (code, CMP_EQ);
4467 cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
4468 cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
4469 return comparison_result (code, cr);
4473 /* Optimize comparisons with upper and lower bounds. */
4474 if (SCALAR_INT_MODE_P (mode)
4475 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
4476 && CONST_INT_P (trueop1))
4479 unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
4480 HOST_WIDE_INT val = INTVAL (trueop1);
4481 HOST_WIDE_INT mmin, mmax;
4491 /* Get a reduced range if the sign bit is zero. */
4492 if (nonzero <= (GET_MODE_MASK (mode) >> 1))
4499 rtx mmin_rtx, mmax_rtx;
4500 get_mode_bounds (mode, sign, mode, &mmin_rtx, &mmax_rtx);
4502 mmin = INTVAL (mmin_rtx);
4503 mmax = INTVAL (mmax_rtx);
4506 unsigned int sign_copies = num_sign_bit_copies (trueop0, mode);
4508 mmin >>= (sign_copies - 1);
4509 mmax >>= (sign_copies - 1);
4515 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4517 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4518 return const_true_rtx;
4519 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4524 return const_true_rtx;
4529 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4531 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4532 return const_true_rtx;
4533 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4538 return const_true_rtx;
4544 /* x == y is always false for y out of range. */
4545 if (val < mmin || val > mmax)
4549 /* x > y is always false for y >= mmax, always true for y < mmin. */
4551 if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
4553 if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
4554 return const_true_rtx;
4560 return const_true_rtx;
4563 /* x < y is always false for y <= mmin, always true for y > mmax. */
4565 if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
4567 if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
4568 return const_true_rtx;
4574 return const_true_rtx;
4578 /* x != y is always true for y out of range. */
4579 if (val < mmin || val > mmax)
4580 return const_true_rtx;
4588 /* Optimize integer comparisons with zero. */
4589 if (trueop1 == const0_rtx)
4591 /* Some addresses are known to be nonzero. We don't know
4592 their sign, but equality comparisons are known. */
4593 if (nonzero_address_p (trueop0))
4595 if (code == EQ || code == LEU)
4597 if (code == NE || code == GTU)
4598 return const_true_rtx;
4601 /* See if the first operand is an IOR with a constant. If so, we
4602 may be able to determine the result of this comparison. */
4603 if (GET_CODE (op0) == IOR)
4605 rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
4606 if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
4608 int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
4609 int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
4610 && (INTVAL (inner_const)
4611 & ((HOST_WIDE_INT) 1 << sign_bitnum)));
4620 return const_true_rtx;
4624 return const_true_rtx;
4638 /* Optimize comparison of ABS with zero. */
4639 if (trueop1 == CONST0_RTX (mode)
4640 && (GET_CODE (trueop0) == ABS
4641 || (GET_CODE (trueop0) == FLOAT_EXTEND
4642 && GET_CODE (XEXP (trueop0, 0)) == ABS)))
4647 /* Optimize abs(x) < 0.0. */
4648 if (!HONOR_SNANS (mode)
4649 && (!INTEGRAL_MODE_P (mode)
4650 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4652 if (INTEGRAL_MODE_P (mode)
4653 && (issue_strict_overflow_warning
4654 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4655 warning (OPT_Wstrict_overflow,
4656 ("assuming signed overflow does not occur when "
4657 "assuming abs (x) < 0 is false"));
4663 /* Optimize abs(x) >= 0.0. */
4664 if (!HONOR_NANS (mode)
4665 && (!INTEGRAL_MODE_P (mode)
4666 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
4668 if (INTEGRAL_MODE_P (mode)
4669 && (issue_strict_overflow_warning
4670 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
4671 warning (OPT_Wstrict_overflow,
4672 ("assuming signed overflow does not occur when "
4673 "assuming abs (x) >= 0 is true"));
4674 return const_true_rtx;
4679 /* Optimize ! (abs(x) < 0.0). */
4680 return const_true_rtx;
4690 /* Simplify CODE, an operation with result mode MODE and three operands,
4691 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4692 a constant. Return 0 if no simplifications is possible. */
4695 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4696 enum machine_mode op0_mode, rtx op0, rtx op1,
4699 unsigned int width = GET_MODE_BITSIZE (mode);
4701 /* VOIDmode means "infinite" precision. */
4703 width = HOST_BITS_PER_WIDE_INT;
4709 if (CONST_INT_P (op0)
4710 && CONST_INT_P (op1)
4711 && CONST_INT_P (op2)
4712 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4713 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4715 /* Extracting a bit-field from a constant */
4716 HOST_WIDE_INT val = INTVAL (op0);
4718 if (BITS_BIG_ENDIAN)
4719 val >>= (GET_MODE_BITSIZE (op0_mode)
4720 - INTVAL (op2) - INTVAL (op1));
4722 val >>= INTVAL (op2);
4724 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4726 /* First zero-extend. */
4727 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4728 /* If desired, propagate sign bit. */
4729 if (code == SIGN_EXTRACT
4730 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4731 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4734 /* Clear the bits that don't belong in our mode,
4735 unless they and our sign bit are all one.
4736 So we get either a reasonable negative value or a reasonable
4737 unsigned value for this mode. */
4738 if (width < HOST_BITS_PER_WIDE_INT
4739 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4740 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4741 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4743 return gen_int_mode (val, mode);
4748 if (CONST_INT_P (op0))
4749 return op0 != const0_rtx ? op1 : op2;
4751 /* Convert c ? a : a into "a". */
4752 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4755 /* Convert a != b ? a : b into "a". */
4756 if (GET_CODE (op0) == NE
4757 && ! side_effects_p (op0)
4758 && ! HONOR_NANS (mode)
4759 && ! HONOR_SIGNED_ZEROS (mode)
4760 && ((rtx_equal_p (XEXP (op0, 0), op1)
4761 && rtx_equal_p (XEXP (op0, 1), op2))
4762 || (rtx_equal_p (XEXP (op0, 0), op2)
4763 && rtx_equal_p (XEXP (op0, 1), op1))))
4766 /* Convert a == b ? a : b into "b". */
4767 if (GET_CODE (op0) == EQ
4768 && ! side_effects_p (op0)
4769 && ! HONOR_NANS (mode)
4770 && ! HONOR_SIGNED_ZEROS (mode)
4771 && ((rtx_equal_p (XEXP (op0, 0), op1)
4772 && rtx_equal_p (XEXP (op0, 1), op2))
4773 || (rtx_equal_p (XEXP (op0, 0), op2)
4774 && rtx_equal_p (XEXP (op0, 1), op1))))
4777 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4779 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4780 ? GET_MODE (XEXP (op0, 1))
4781 : GET_MODE (XEXP (op0, 0)));
4784 /* Look for happy constants in op1 and op2. */
4785 if (CONST_INT_P (op1) && CONST_INT_P (op2))
4787 HOST_WIDE_INT t = INTVAL (op1);
4788 HOST_WIDE_INT f = INTVAL (op2);
4790 if (t == STORE_FLAG_VALUE && f == 0)
4791 code = GET_CODE (op0);
4792 else if (t == 0 && f == STORE_FLAG_VALUE)
4795 tmp = reversed_comparison_code (op0, NULL_RTX);
4803 return simplify_gen_relational (code, mode, cmp_mode,
4804 XEXP (op0, 0), XEXP (op0, 1));
4807 if (cmp_mode == VOIDmode)
4808 cmp_mode = op0_mode;
4809 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4810 cmp_mode, XEXP (op0, 0),
4813 /* See if any simplifications were possible. */
4816 if (CONST_INT_P (temp))
4817 return temp == const0_rtx ? op2 : op1;
4819 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4825 gcc_assert (GET_MODE (op0) == mode);
4826 gcc_assert (GET_MODE (op1) == mode);
4827 gcc_assert (VECTOR_MODE_P (mode));
4828 op2 = avoid_constant_pool_reference (op2);
4829 if (CONST_INT_P (op2))
4831 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4832 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4833 int mask = (1 << n_elts) - 1;
4835 if (!(INTVAL (op2) & mask))
4837 if ((INTVAL (op2) & mask) == mask)
4840 op0 = avoid_constant_pool_reference (op0);
4841 op1 = avoid_constant_pool_reference (op1);
4842 if (GET_CODE (op0) == CONST_VECTOR
4843 && GET_CODE (op1) == CONST_VECTOR)
4845 rtvec v = rtvec_alloc (n_elts);
4848 for (i = 0; i < n_elts; i++)
4849 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4850 ? CONST_VECTOR_ELT (op0, i)
4851 : CONST_VECTOR_ELT (op1, i));
4852 return gen_rtx_CONST_VECTOR (mode, v);
4864 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4866 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4868 Works by unpacking OP into a collection of 8-bit values
4869 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4870 and then repacking them again for OUTERMODE. */
4873 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4874 enum machine_mode innermode, unsigned int byte)
4876 /* We support up to 512-bit values (for V8DFmode). */
4880 value_mask = (1 << value_bit) - 1
4882 unsigned char value[max_bitsize / value_bit];
4891 rtvec result_v = NULL;
4892 enum mode_class outer_class;
4893 enum machine_mode outer_submode;
4895 /* Some ports misuse CCmode. */
4896 if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
4899 /* We have no way to represent a complex constant at the rtl level. */
4900 if (COMPLEX_MODE_P (outermode))
4903 /* Unpack the value. */
4905 if (GET_CODE (op) == CONST_VECTOR)
4907 num_elem = CONST_VECTOR_NUNITS (op);
4908 elems = &CONST_VECTOR_ELT (op, 0);
4909 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4915 elem_bitsize = max_bitsize;
4917 /* If this asserts, it is too complicated; reducing value_bit may help. */
4918 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4919 /* I don't know how to handle endianness of sub-units. */
4920 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4922 for (elem = 0; elem < num_elem; elem++)
4925 rtx el = elems[elem];
4927 /* Vectors are kept in target memory order. (This is probably
4930 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4931 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4933 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4934 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4935 unsigned bytele = (subword_byte % UNITS_PER_WORD
4936 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4937 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4940 switch (GET_CODE (el))
4944 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4946 *vp++ = INTVAL (el) >> i;
4947 /* CONST_INTs are always logically sign-extended. */
4948 for (; i < elem_bitsize; i += value_bit)
4949 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4953 if (GET_MODE (el) == VOIDmode)
4955 /* If this triggers, someone should have generated a
4956 CONST_INT instead. */
4957 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4959 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4960 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4961 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4964 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4967 /* It shouldn't matter what's done here, so fill it with
4969 for (; i < elem_bitsize; i += value_bit)
4974 long tmp[max_bitsize / 32];
4975 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4977 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4978 gcc_assert (bitsize <= elem_bitsize);
4979 gcc_assert (bitsize % value_bit == 0);
4981 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4984 /* real_to_target produces its result in words affected by
4985 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4986 and use WORDS_BIG_ENDIAN instead; see the documentation
4987 of SUBREG in rtl.texi. */
4988 for (i = 0; i < bitsize; i += value_bit)
4991 if (WORDS_BIG_ENDIAN)
4992 ibase = bitsize - 1 - i;
4995 *vp++ = tmp[ibase / 32] >> i % 32;
4998 /* It shouldn't matter what's done here, so fill it with
5000 for (; i < elem_bitsize; i += value_bit)
5006 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5008 for (i = 0; i < elem_bitsize; i += value_bit)
5009 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5013 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
5014 *vp++ = CONST_FIXED_VALUE_LOW (el) >> i;
5015 for (; i < 2 * HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5017 *vp++ = CONST_FIXED_VALUE_HIGH (el)
5018 >> (i - HOST_BITS_PER_WIDE_INT);
5019 for (; i < elem_bitsize; i += value_bit)
5029 /* Now, pick the right byte to start with. */
5030 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5031 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5032 will already have offset 0. */
5033 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
5035 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
5037 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5038 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5039 byte = (subword_byte % UNITS_PER_WORD
5040 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5043 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5044 so if it's become negative it will instead be very large.) */
5045 gcc_assert (byte < GET_MODE_SIZE (innermode));
5047 /* Convert from bytes to chunks of size value_bit. */
5048 value_start = byte * (BITS_PER_UNIT / value_bit);
5050 /* Re-pack the value. */
5052 if (VECTOR_MODE_P (outermode))
5054 num_elem = GET_MODE_NUNITS (outermode);
5055 result_v = rtvec_alloc (num_elem);
5056 elems = &RTVEC_ELT (result_v, 0);
5057 outer_submode = GET_MODE_INNER (outermode);
5063 outer_submode = outermode;
5066 outer_class = GET_MODE_CLASS (outer_submode);
5067 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
5069 gcc_assert (elem_bitsize % value_bit == 0);
5070 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
5072 for (elem = 0; elem < num_elem; elem++)
5076 /* Vectors are stored in target memory order. (This is probably
5079 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
5080 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
5082 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
5083 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
5084 unsigned bytele = (subword_byte % UNITS_PER_WORD
5085 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
5086 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
5089 switch (outer_class)
5092 case MODE_PARTIAL_INT:
5094 unsigned HOST_WIDE_INT hi = 0, lo = 0;
5097 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5099 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5100 for (; i < elem_bitsize; i += value_bit)
5101 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5102 << (i - HOST_BITS_PER_WIDE_INT));
5104 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5106 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
5107 elems[elem] = gen_int_mode (lo, outer_submode);
5108 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
5109 elems[elem] = immed_double_const (lo, hi, outer_submode);
5116 case MODE_DECIMAL_FLOAT:
5119 long tmp[max_bitsize / 32];
5121 /* real_from_target wants its input in words affected by
5122 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5123 and use WORDS_BIG_ENDIAN instead; see the documentation
5124 of SUBREG in rtl.texi. */
5125 for (i = 0; i < max_bitsize / 32; i++)
5127 for (i = 0; i < elem_bitsize; i += value_bit)
5130 if (WORDS_BIG_ENDIAN)
5131 ibase = elem_bitsize - 1 - i;
5134 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
5137 real_from_target (&r, tmp, outer_submode);
5138 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
5150 f.mode = outer_submode;
5153 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
5155 f.data.low |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
5156 for (; i < elem_bitsize; i += value_bit)
5157 f.data.high |= ((HOST_WIDE_INT)(*vp++ & value_mask)
5158 << (i - HOST_BITS_PER_WIDE_INT));
5160 elems[elem] = CONST_FIXED_FROM_FIXED_VALUE (f, outer_submode);
5168 if (VECTOR_MODE_P (outermode))
5169 return gen_rtx_CONST_VECTOR (outermode, result_v);
5174 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5175 Return 0 if no simplifications are possible. */
5177 simplify_subreg (enum machine_mode outermode, rtx op,
5178 enum machine_mode innermode, unsigned int byte)
5180 /* Little bit of sanity checking. */
5181 gcc_assert (innermode != VOIDmode);
5182 gcc_assert (outermode != VOIDmode);
5183 gcc_assert (innermode != BLKmode);
5184 gcc_assert (outermode != BLKmode);
5186 gcc_assert (GET_MODE (op) == innermode
5187 || GET_MODE (op) == VOIDmode);
5189 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
5190 gcc_assert (byte < GET_MODE_SIZE (innermode));
5192 if (outermode == innermode && !byte)
5195 if (CONST_INT_P (op)
5196 || GET_CODE (op) == CONST_DOUBLE
5197 || GET_CODE (op) == CONST_FIXED
5198 || GET_CODE (op) == CONST_VECTOR)
5199 return simplify_immed_subreg (outermode, op, innermode, byte);
5201 /* Changing mode twice with SUBREG => just change it once,
5202 or not at all if changing back op starting mode. */
5203 if (GET_CODE (op) == SUBREG)
5205 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
5206 int final_offset = byte + SUBREG_BYTE (op);
5209 if (outermode == innermostmode
5210 && byte == 0 && SUBREG_BYTE (op) == 0)
5211 return SUBREG_REG (op);
5213 /* The SUBREG_BYTE represents offset, as if the value were stored
5214 in memory. Irritating exception is paradoxical subreg, where
5215 we define SUBREG_BYTE to be 0. On big endian machines, this
5216 value should be negative. For a moment, undo this exception. */
5217 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5219 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
5220 if (WORDS_BIG_ENDIAN)
5221 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5222 if (BYTES_BIG_ENDIAN)
5223 final_offset += difference % UNITS_PER_WORD;
5225 if (SUBREG_BYTE (op) == 0
5226 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
5228 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
5229 if (WORDS_BIG_ENDIAN)
5230 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5231 if (BYTES_BIG_ENDIAN)
5232 final_offset += difference % UNITS_PER_WORD;
5235 /* See whether resulting subreg will be paradoxical. */
5236 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
5238 /* In nonparadoxical subregs we can't handle negative offsets. */
5239 if (final_offset < 0)
5241 /* Bail out in case resulting subreg would be incorrect. */
5242 if (final_offset % GET_MODE_SIZE (outermode)
5243 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
5249 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
5251 /* In paradoxical subreg, see if we are still looking on lower part.
5252 If so, our SUBREG_BYTE will be 0. */
5253 if (WORDS_BIG_ENDIAN)
5254 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5255 if (BYTES_BIG_ENDIAN)
5256 offset += difference % UNITS_PER_WORD;
5257 if (offset == final_offset)
5263 /* Recurse for further possible simplifications. */
5264 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
5268 if (validate_subreg (outermode, innermostmode,
5269 SUBREG_REG (op), final_offset))
5271 newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
5272 if (SUBREG_PROMOTED_VAR_P (op)
5273 && SUBREG_PROMOTED_UNSIGNED_P (op) >= 0
5274 && GET_MODE_CLASS (outermode) == MODE_INT
5275 && IN_RANGE (GET_MODE_SIZE (outermode),
5276 GET_MODE_SIZE (innermode),
5277 GET_MODE_SIZE (innermostmode))
5278 && subreg_lowpart_p (newx))
5280 SUBREG_PROMOTED_VAR_P (newx) = 1;
5281 SUBREG_PROMOTED_UNSIGNED_SET
5282 (newx, SUBREG_PROMOTED_UNSIGNED_P (op));
5289 /* Merge implicit and explicit truncations. */
5291 if (GET_CODE (op) == TRUNCATE
5292 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
5293 && subreg_lowpart_offset (outermode, innermode) == byte)
5294 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
5295 GET_MODE (XEXP (op, 0)));
5297 /* SUBREG of a hard register => just change the register number
5298 and/or mode. If the hard register is not valid in that mode,
5299 suppress this simplification. If the hard register is the stack,
5300 frame, or argument pointer, leave this as a SUBREG. */
5302 if (REG_P (op) && HARD_REGISTER_P (op))
5304 unsigned int regno, final_regno;
5307 final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
5308 if (HARD_REGISTER_NUM_P (final_regno))
5311 int final_offset = byte;
5313 /* Adjust offset for paradoxical subregs. */
5315 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
5317 int difference = (GET_MODE_SIZE (innermode)
5318 - GET_MODE_SIZE (outermode));
5319 if (WORDS_BIG_ENDIAN)
5320 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
5321 if (BYTES_BIG_ENDIAN)
5322 final_offset += difference % UNITS_PER_WORD;
5325 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
5327 /* Propagate original regno. We don't have any way to specify
5328 the offset inside original regno, so do so only for lowpart.
5329 The information is used only by alias analysis that can not
5330 grog partial register anyway. */
5332 if (subreg_lowpart_offset (outermode, innermode) == byte)
5333 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
5338 /* If we have a SUBREG of a register that we are replacing and we are
5339 replacing it with a MEM, make a new MEM and try replacing the
5340 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5341 or if we would be widening it. */
5344 && ! mode_dependent_address_p (XEXP (op, 0))
5345 /* Allow splitting of volatile memory references in case we don't
5346 have instruction to move the whole thing. */
5347 && (! MEM_VOLATILE_P (op)
5348 || ! have_insn_for (SET, innermode))
5349 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
5350 return adjust_address_nv (op, outermode, byte);
5352 /* Handle complex values represented as CONCAT
5353 of real and imaginary part. */
5354 if (GET_CODE (op) == CONCAT)
5356 unsigned int part_size, final_offset;
5359 part_size = GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)));
5360 if (byte < part_size)
5362 part = XEXP (op, 0);
5363 final_offset = byte;
5367 part = XEXP (op, 1);
5368 final_offset = byte - part_size;
5371 if (final_offset + GET_MODE_SIZE (outermode) > part_size)
5374 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
5377 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
5378 return gen_rtx_SUBREG (outermode, part, final_offset);
5382 /* Optimize SUBREG truncations of zero and sign extended values. */
5383 if ((GET_CODE (op) == ZERO_EXTEND
5384 || GET_CODE (op) == SIGN_EXTEND)
5385 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
5387 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
5389 /* If we're requesting the lowpart of a zero or sign extension,
5390 there are three possibilities. If the outermode is the same
5391 as the origmode, we can omit both the extension and the subreg.
5392 If the outermode is not larger than the origmode, we can apply
5393 the truncation without the extension. Finally, if the outermode
5394 is larger than the origmode, but both are integer modes, we
5395 can just extend to the appropriate mode. */
5398 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
5399 if (outermode == origmode)
5400 return XEXP (op, 0);
5401 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
5402 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
5403 subreg_lowpart_offset (outermode,
5405 if (SCALAR_INT_MODE_P (outermode))
5406 return simplify_gen_unary (GET_CODE (op), outermode,
5407 XEXP (op, 0), origmode);
5410 /* A SUBREG resulting from a zero extension may fold to zero if
5411 it extracts higher bits that the ZERO_EXTEND's source bits. */
5412 if (GET_CODE (op) == ZERO_EXTEND
5413 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
5414 return CONST0_RTX (outermode);
5417 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5418 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5419 the outer subreg is effectively a truncation to the original mode. */
5420 if ((GET_CODE (op) == LSHIFTRT
5421 || GET_CODE (op) == ASHIFTRT)
5422 && SCALAR_INT_MODE_P (outermode)
5423 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5424 to avoid the possibility that an outer LSHIFTRT shifts by more
5425 than the sign extension's sign_bit_copies and introduces zeros
5426 into the high bits of the result. */
5427 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
5428 && CONST_INT_P (XEXP (op, 1))
5429 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
5430 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5431 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5432 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5433 return simplify_gen_binary (ASHIFTRT, outermode,
5434 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5436 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5437 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5438 the outer subreg is effectively a truncation to the original mode. */
5439 if ((GET_CODE (op) == LSHIFTRT
5440 || GET_CODE (op) == ASHIFTRT)
5441 && SCALAR_INT_MODE_P (outermode)
5442 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5443 && CONST_INT_P (XEXP (op, 1))
5444 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5445 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5446 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5447 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5448 return simplify_gen_binary (LSHIFTRT, outermode,
5449 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5451 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5452 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5453 the outer subreg is effectively a truncation to the original mode. */
5454 if (GET_CODE (op) == ASHIFT
5455 && SCALAR_INT_MODE_P (outermode)
5456 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
5457 && CONST_INT_P (XEXP (op, 1))
5458 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
5459 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
5460 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
5461 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
5462 && subreg_lsb_1 (outermode, innermode, byte) == 0)
5463 return simplify_gen_binary (ASHIFT, outermode,
5464 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
5466 /* Recognize a word extraction from a multi-word subreg. */
5467 if ((GET_CODE (op) == LSHIFTRT
5468 || GET_CODE (op) == ASHIFTRT)
5469 && SCALAR_INT_MODE_P (outermode)
5470 && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
5471 && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
5472 && CONST_INT_P (XEXP (op, 1))
5473 && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
5474 && INTVAL (XEXP (op, 1)) >= 0
5475 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
5476 && byte == subreg_lowpart_offset (outermode, innermode))
5478 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
5479 return simplify_gen_subreg (outermode, XEXP (op, 0), innermode,
5481 ? byte - shifted_bytes
5482 : byte + shifted_bytes));
5488 /* Make a SUBREG operation or equivalent if it folds. */
5491 simplify_gen_subreg (enum machine_mode outermode, rtx op,
5492 enum machine_mode innermode, unsigned int byte)
5496 newx = simplify_subreg (outermode, op, innermode, byte);
5500 if (GET_CODE (op) == SUBREG
5501 || GET_CODE (op) == CONCAT
5502 || GET_MODE (op) == VOIDmode)
5505 if (validate_subreg (outermode, innermode, op, byte))
5506 return gen_rtx_SUBREG (outermode, op, byte);
5511 /* Simplify X, an rtx expression.
5513 Return the simplified expression or NULL if no simplifications
5516 This is the preferred entry point into the simplification routines;
5517 however, we still allow passes to call the more specific routines.
5519 Right now GCC has three (yes, three) major bodies of RTL simplification
5520 code that need to be unified.
5522 1. fold_rtx in cse.c. This code uses various CSE specific
5523 information to aid in RTL simplification.
5525 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5526 it uses combine specific information to aid in RTL
5529 3. The routines in this file.
5532 Long term we want to only have one body of simplification code; to
5533 get to that state I recommend the following steps:
5535 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5536 which are not pass dependent state into these routines.
5538 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5539 use this routine whenever possible.
5541 3. Allow for pass dependent state to be provided to these
5542 routines and add simplifications based on the pass dependent
5543 state. Remove code from cse.c & combine.c that becomes
5546 It will take time, but ultimately the compiler will be easier to
5547 maintain and improve. It's totally silly that when we add a
5548 simplification that it needs to be added to 4 places (3 for RTL
5549 simplification and 1 for tree simplification. */
5552 simplify_rtx (const_rtx x)
5554 const enum rtx_code code = GET_CODE (x);
5555 const enum machine_mode mode = GET_MODE (x);
5557 switch (GET_RTX_CLASS (code))
5560 return simplify_unary_operation (code, mode,
5561 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
5562 case RTX_COMM_ARITH:
5563 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5564 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
5566 /* Fall through.... */
5569 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5572 case RTX_BITFIELD_OPS:
5573 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
5574 XEXP (x, 0), XEXP (x, 1),
5578 case RTX_COMM_COMPARE:
5579 return simplify_relational_operation (code, mode,
5580 ((GET_MODE (XEXP (x, 0))
5582 ? GET_MODE (XEXP (x, 0))
5583 : GET_MODE (XEXP (x, 1))),
5589 return simplify_subreg (mode, SUBREG_REG (x),
5590 GET_MODE (SUBREG_REG (x)),
5597 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5598 if (GET_CODE (XEXP (x, 0)) == HIGH
5599 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))